metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jonaskoelker/entomophagous",
"score": 2
} |
#### File: tools/dd/sbt-test-order-dependency-finder.py
```python
import dd, os, random, sys
def main():
result = find_minimal_set()
if result == None:
sys.exit("No minimal failing set of test cases found")
for test_case in result: print(test_case)
def find_minimal_set():
test_cases = extract_test_cases()
evaluate = partial_evaluate(test_cases)
all_of_them = list(range(len(test_cases)))
for _ in range(100):
# Try each pair in both orderings in guaranteed O(1) runs.
random.shuffle(test_cases) if _ != 1 else test_cases.reverse()
has_bug = evaluate(all_of_them)
if not has_bug: continue
minimal_set = dd.ddmin(len(test_cases), evaluate)
case_combination = [test_cases[i] for i in minimal_set]
return case_combination
# Trying all triples, quadruples, k-tuples, in a guaranteed
# rather than expected k! runs is impossible for k = 3, n = 5.
# Greedily committing to untried k-tuples until the next run
# is uniquely determined is too slow for non-trivial n (and k).
def partial_evaluate(test_cases):
def evaluate(indices):
cases = [test_cases[i] for i in indices]
command = "sbt 'testOnly %s'" % ' '.join(cases)
command_result = os.system(command)
return not not command_result
return evaluate
def extract_test_cases():
test_cases = []
pattern = '[info] * '
output = os.popen("sbt 'show test:definedTestNames'")
for line in output:
if line.startswith(pattern):
test_cases.append(line[len(pattern):-1])
return sorted(test_cases)
if __name__ == '__main__': main()
``` |
{
"source": "JonasKoenig/CodeOnMyMind",
"score": 3
} |
#### File: machine-learning/kmeans/1-cluster.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
np.random.seed(0)
data = pd.read_csv('animals.csv') # import data
k = 3
# normalize data to range {0,...,1}
raw_weights = np.interp(data.values[:,0], (data.values[:,0].min(), data.values[:,0].max()), (0, 1))
raw_heights = np.interp(data.values[:,1], (data.values[:,1].min(), data.values[:,1].max()), (0, 1))
normalized_data = np.array(list(zip(raw_weights, raw_heights)))
def assignData(centers, x):
distances = [sum((c - x)**2) for c in centers]
return np.argmin(distances)
def adjustCenter(assignments, i):
members = normalized_data[assignments == i]
return np.mean(members[:,0]), np.mean(members[:,1])
def KMeans(X, k):
iterations = 0
centers = np.zeros((k,2))
newCenters = X[np.random.randint(len(X), size=k)] # initial random centers from data
while(np.any(centers != newCenters)): # continue if centers changed
centers = newCenters
assignments = np.array([assignData(centers, x) for x in X]) # determine assignments
newCenters = [adjustCenter(assignments, i) for i in range(k)] # adjust centers
iterations += 1
assignments = np.array([assignData(centers, x) for x in X])
print('K-Means terminated after ' + str(iterations) + ' iterations.')
return assignments, centers
# execute k-means algorithm
assignments, centers = KMeans(normalized_data, k)
# plot result
for i in range(k):
plt.scatter(normalized_data[assignments == i,0], normalized_data[assignments == i,1], marker='o', label='cluster '+str(i+1))
plt.scatter([c[0] for c in centers], [c[1] for c in centers], marker='+', s=50, color='black', label='centers')
plt.xlabel('Normalized Weight')
plt.ylabel('Normalized Height')
plt.legend()
plt.show()
``` |
{
"source": "JonasKorte/PyGameEngine",
"score": 3
} |
#### File: JonasKorte/PyGameEngine/main.py
```python
import pygame
# Set window display constants
window = None
resolution = (1280, 720)
title = "PyGameEngine"
# Set frame timing constants
fps = 144
clock = None
# Main game function
def main():
# Set window resolution & title
window = pygame.display.set_mode(resolution)
pygame.display.set_caption(title)
# Initialize game clock
clock = pygame.time.Clock()
# Game loop
run = True
while run:
clock.tick(fps)
# Event loop
for event in pygame.event.get():
# Quit even
if event.type == pygame.QUIT:
run = False
# Only run game when exectuted directly
if __name__ == "__main__":
pygame.init()
main()
pygame.quit()
```
#### File: PyGameEngine/nodes/node.py
```python
class Node():
# Constructor
def __init__(self, name, ID, nodeTree, parent=None):
self.name = name
self.ID = ID
self.nodeTree = nodeTree
self.parent = parent
# Awake event (Initialization)
def awake(self):
pass
# Start event (After initialization)
def start(self):
pass
# Update event (For input handling)
def update(self, deltaTime):
pass
# Render event (After update, for rendering)
def render(self, deltaTime):
pass
# Destroy event (Once node is destroyed)
def destroy(self):
pass
```
#### File: PyGameEngine/nodes/scene.py
```python
import json
# Scene class
class Scene():
# Constructor
def __init__(self, name, ID, rootNode, settings, nodeTree, parent=None):
self.name = name
self.ID = ID
self.rootNode = rootNode
self.settings = settings
self.nodeTree = nodeTree
self.parent = parent
# Getter for root node
def getRoot(self):
return self.rootNode
# Load scene from file
def loadScene(sceneFile):
# Open, read & close specified file
scene = open(sceneFile, 'r')
sceneData = scene.read()
scene.close()
# Return JSON object
sceneObject = json.loads(sceneData)
return sceneObject
``` |
{
"source": "jonaskpb/CameraPiTelegramBot",
"score": 2
} |
#### File: jonaskpb/CameraPiTelegramBot/camerapibot.py
```python
import telepot
import time
import picamera
import sqlite3
import sys
import os
import logging
from subprocess import check_call, CalledProcessError
""" Class based implementation of the telepot bot """
class PiCameraBot(telepot.Bot):
def __init__(self, access_token):
telepot.Bot.__init__(self, access_token)
self.camera = picamera.PiCamera()
self.listening_since = time.time()
self.request_count = 0
self.invalid_users_count = 0
# Set some camera options if needed
# self.camera.vflip = True
# self.camera.hflip = True
""" Handles incoming messages """
def handle(self, msg):
chat_id = msg['from']['id']
user_name = "%s %s" % (msg['from']['first_name'], msg['from']['last_name'])
command = msg['text']
self.request_count += 1
conn = sqlite3.connect('camerapibot_auth.db')
c = conn.cursor()
c.execute('SELECT id FROM allowed_user_ids')
query = c.fetchall()
valid_ids = []
for x in query:
s = str(x[0])
valid_ids.append(s)
if not str(chat_id) in valid_ids:
logging.warning("[Access] Failed authentication attempt! user_id: %s" % chat_id)
self.invalid_users_count += 1
else:
if command == '/get_status':
logging.info("Received /get_status from %s (username: %s )" % (chat_id, user_name))
self.sendMessage(chat_id, 'The Bot is listening since: %s' % self.listening_since)
self.sendMessage(chat_id, 'Total request count: %s' % self.request_count)
self.sendMessage(chat_id, 'Total unauthorized access attempts: %s' % self.invalid_users_count)
logging.info("Sent the current status to %s (username: %s )" % (chat_id, user_name))
elif command == '/get_image':
logging.info("Received /get_image from %s (username: %s )" % (chat_id, user_name))
self.camera.resolution = (1920, 1080)
self.camera.capture('image.jpg')
f = open('image.jpg', 'rb')
self.sendPhoto(chat_id, f)
logging.info("Sent a picture to %s (username: %s )" % (chat_id, user_name))
elif command == '/get_video':
logging.info("Received /get_video from %s (username: %s )" % (chat_id, user_name))
try:
os.remove('video.h264')
except OSError:
pass
try:
os.remove('video.mp4')
except OSError:
pass
self.camera.resolution = (1280, 720)
self.camera.start_recording('video.h264')
self.camera.wait_recording(10)
self.camera.stop_recording()
cmd = ['MP4Box', '-add', 'video.h264', 'video.mp4']
try:
check_call(cmd)
f = open('video.mp4', 'rb')
self.sendVideo(chat_id, f)
logging.info("Sent a video to %s (username: %s )" % (chat_id, user_name))
except CalledProcessError:
logging.info("A problem occured while encoding the video!")
self.sendMessage(chat_id, 'A problem occured!')
conn.close()
# Getting the token from command line input is a safer way than to put it into the script as it is meant to be kept secret
TOKEN = sys.argv[1]
# Set up the logging format
logging.basicConfig(format='[%(asctime)s] %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', filename='camerapi_bot.log', level=logging.INFO)
logging.info("Initializing CameraPiBot")
# Initialize the bot
bot = PiCameraBot(TOKEN)
bot.notifyOnMessage()
logging.info("Listening...")
# Main loop catching Keyboard Interrupts: If one is detected it shuts down the bot and cleans up the produces files except for the log file
while 1:
try:
time.sleep(5)
except KeyboardInterrupt:
try:
os.remove("image.jpg")
except OSError:
pass
try:
os.remove("video.h264")
except OSError:
pass
try:
os.remove("video.mp4")
except OSError:
pass
logging.info("KeyboardInterrupt detected, shutting down the bot!")
sys.exit("\nShutting down the Telegram Bot!")
``` |
{
"source": "JonasKs/aioify",
"score": 2
} |
#### File: aioify/aioify/__init__.py
```python
from functools import wraps, partial
import asyncio
import functools
import inspect
import module_wrapper
__all__ = ['aioify']
wrapper_assignments = tuple(x for x in functools.WRAPPER_ASSIGNMENTS if x != '__annotations__')
def wrap(func):
@wraps(func, assigned=wrapper_assignments)
async def run(*args, loop=None, executor=None, **kwargs):
if loop is None:
loop = asyncio.get_event_loop()
pfunc = partial(func, *args, **kwargs)
return await loop.run_in_executor(executor, pfunc)
@wraps(func, assigned=wrapper_assignments)
async def coroutine_run(*args, **kwargs):
_, _ = args, kwargs
return await func
@wraps(func, assigned=wrapper_assignments)
async def coroutine_function_run(*args, **kwargs):
return await func(*args, **kwargs)
if inspect.iscoroutine(object=func):
result = coroutine_run
elif inspect.iscoroutinefunction(object=func):
result = coroutine_function_run
else:
result = run
return result
def default_create_name_function(cls):
_ = cls
return 'create'
def aioify(obj, name=None, create_name_function=None, skip=(), wrap_return_values=False):
create_name_function = create_name_function or default_create_name_function
def create(cls):
func = wrap(func=cls) if inspect.isclass(object=cls) else None
return create_name_function(cls=cls), func
return module_wrapper.wrap(obj=obj,
wrapper=wrap,
methods_to_add={create},
name=name,
skip=skip,
wrap_return_values=wrap_return_values)
``` |
{
"source": "JonasKs/arq",
"score": 2
} |
#### File: arq/tests/conftest.py
```python
import asyncio
import functools
import msgpack
import pytest
from aioredis import create_redis_pool
from arq.connections import ArqRedis, create_pool
from arq.worker import Worker
@pytest.fixture(name='loop')
def _fix_loop(event_loop):
asyncio.set_event_loop(event_loop)
return event_loop
@pytest.fixture
async def arq_redis(loop):
redis_ = await create_redis_pool(
('localhost', 6379), encoding='utf8', loop=loop, commands_factory=ArqRedis, minsize=5
)
await redis_.flushall()
yield redis_
redis_.close()
await redis_.wait_closed()
@pytest.fixture
async def arq_redis_msgpack(loop):
redis_ = await create_redis_pool(
('localhost', 6379),
encoding='utf8',
loop=loop,
commands_factory=functools.partial(
ArqRedis, job_serializer=msgpack.packb, job_deserializer=functools.partial(msgpack.unpackb, raw=False)
),
)
await redis_.flushall()
yield redis_
redis_.close()
await redis_.wait_closed()
@pytest.fixture
async def worker(arq_redis):
worker_: Worker = None
def create(functions=[], burst=True, poll_delay=0, max_jobs=10, arq_redis=arq_redis, **kwargs):
nonlocal worker_
worker_ = Worker(
functions=functions, redis_pool=arq_redis, burst=burst, poll_delay=poll_delay, max_jobs=max_jobs, **kwargs
)
return worker_
yield create
if worker_:
await worker_.close()
@pytest.fixture(name='create_pool')
async def fix_create_pool(loop):
pools = []
async def create_pool_(settings, *args, **kwargs):
pool = await create_pool(settings, *args, **kwargs)
pools.append(pool)
return pool
yield create_pool_
for p in pools:
p.close()
await asyncio.gather(*[p.wait_closed() for p in pools])
@pytest.fixture(name='cancel_remaining_task')
def fix_cancel_remaining_task(loop):
async def cancel_remaining_task():
tasks = asyncio.all_tasks(loop)
cancelled = []
for task in tasks:
# in repr works in 3.7 where get_coro() is not available
if 'cancel_remaining_task()' not in repr(task):
cancelled.append(task)
task.cancel()
await asyncio.gather(*cancelled, return_exceptions=True)
yield
loop.run_until_complete(cancel_remaining_task())
``` |
{
"source": "JonasKs/django-swagger-tester",
"score": 2
} |
#### File: django_swagger_tester/dynamic/get_schema.py
```python
import logging
from json import dumps, loads
from typing import Union
from django_swagger_tester.exceptions import OpenAPISchemaError
logger = logging.getLogger('django_swagger_tester')
def fetch_generated_schema(url: str, method: str, status_code: Union[int, str]) -> dict:
"""
Fetches a dynamically generated OpenAPI schema.
:param url: API endpoint URL, str
:param method: HTTP method, str
:param status_code: HTTP response code
:return: The section of the schema relevant for testing, dict
"""
logger.debug('Fetching generated dynamic schema')
from drf_yasg.openapi import Info
from drf_yasg.generators import OpenAPISchemaGenerator
# Fetch schema and convert to dict
schema = OpenAPISchemaGenerator(info=Info(title='', default_version='')).get_schema()
schema = loads(dumps(schema.as_odict()['paths'])) # Converts OrderedDict to dict
try:
schema = schema[url]
except KeyError:
raise OpenAPISchemaError(
f'No path found for url `{url}`. Valid urls include {", ".join([key for key in schema.keys()])}')
try:
schema = schema[method.lower()]['responses']
except KeyError:
raise OpenAPISchemaError(
f'No schema found for method {method.upper()}. Available methods include '
f'{", ".join([method.upper() for method in schema.keys() if method.upper() != "PARAMETERS"])}.'
)
try:
schema = schema[f'{status_code}']['schema']
except KeyError:
raise OpenAPISchemaError(
f'No schema found for response code {status_code}. Documented responses include '
f'{", ".join([code for code in schema.keys()])}.'
)
return schema
```
#### File: tests/unit/test_exceptions.py
```python
import pytest
from django.core.exceptions import ImproperlyConfigured
from django_swagger_tester.exceptions import OpenAPISchemaError
def test_specification_error():
with pytest.raises(OpenAPISchemaError, match='test'):
raise OpenAPISchemaError('test')
def test_improperly_configured_error():
with pytest.raises(ImproperlyConfigured, match='test'):
raise ImproperlyConfigured('test')
```
#### File: tests/unit/test_kebab.py
```python
import pytest
from django_swagger_tester.case_checks import is_kebab_case
from django_swagger_tester.exceptions import OpenAPISchemaError
kebab_case_test_data = [
{'incorrect': 'snake_case', 'correct': 'snake-case'},
{'incorrect': 'PascalCase', 'correct': 'pascal-case'},
{'incorrect': 'camelCase', 'correct': 'camel-case'},
{'incorrect': 'l ower', 'correct': 'lower'},
{'incorrect': 'UPPER', 'correct': 'u-p-p-e-r'},
]
def test_kebab_cased_words():
"""
Verifies that our kebab case verification function actually works as expected.
"""
for item in kebab_case_test_data:
is_kebab_case(item['correct'])
with pytest.raises(OpenAPISchemaError):
is_kebab_case(item['incorrect'])
def test_less_than_two_chars():
"""
When the length of an input is less than 2, our regex logic breaks down,
:return:
"""
is_kebab_case('')
with pytest.raises(OpenAPISchemaError):
is_kebab_case(' ')
is_kebab_case('-')
is_kebab_case('_')
is_kebab_case(None)
is_kebab_case('%')
is_kebab_case('R')
is_kebab_case('s')
``` |
{
"source": "JonasKs/starlite",
"score": 3
} |
#### File: starlite/tests/test_app.py
```python
import json
from starlette.exceptions import HTTPException as StarletteHTTPException
from starlette.status import HTTP_500_INTERNAL_SERVER_ERROR
from starlite import HTTPException, Starlite
def test_handle_http_exception():
response = Starlite.handle_http_exception("", HTTPException(detail="starlite_exception", extra={"key": "value"}))
assert response.status_code == HTTP_500_INTERNAL_SERVER_ERROR
assert json.loads(response.body) == {
"detail": "starlite_exception",
"extra": {"key": "value"},
}
response = Starlite.handle_http_exception(
"", StarletteHTTPException(detail="starlite_exception", status_code=HTTP_500_INTERNAL_SERVER_ERROR)
)
assert response.status_code == HTTP_500_INTERNAL_SERVER_ERROR
assert json.loads(response.body) == {
"detail": "starlite_exception",
}
response = Starlite.handle_http_exception("", AttributeError("oops"))
assert response.status_code == HTTP_500_INTERNAL_SERVER_ERROR
assert json.loads(response.body) == {
"detail": repr(AttributeError("oops")),
}
```
#### File: starlite/tests/test_params.py
```python
from datetime import datetime
from typing import Any, Dict, List, Optional
from urllib.parse import urlencode
from uuid import uuid1, uuid4
import pytest
from pydantic import UUID4, BaseModel
from pydantic.fields import FieldInfo
from starlette.datastructures import UploadFile
from starlette.status import HTTP_200_OK, HTTP_201_CREATED, HTTP_400_BAD_REQUEST
from typing_extensions import Type
from starlite import (
Body,
ImproperlyConfiguredException,
Parameter,
RequestEncodingType,
Starlite,
create_test_client,
get,
post,
)
@pytest.mark.parametrize(
"params_dict,should_raise",
[
(
{
"page": 1,
"pageSize": 1,
"brands": ["Nike", "Adidas"],
},
False,
),
(
{
"page": 1,
"pageSize": 1,
"brands": ["Nike", "Adidas", "Rebok"],
},
False,
),
(
{
"page": 1,
"pageSize": 1,
},
True,
),
(
{
"page": 1,
"pageSize": 1,
"brands": ["Nike", "Adidas", "Rebok", "Polgat"],
},
True,
),
(
{
"page": 1,
"pageSize": 101,
"brands": ["Nike", "Adidas", "Rebok"],
},
True,
),
(
{
"page": 1,
"pageSize": 1,
"brands": [],
},
True,
),
(
{
"page": 1,
"pageSize": 1,
"brands": ["Nike", "Adidas", "Rebok"],
"from_date": datetime.now().timestamp(),
},
False,
),
(
{
"page": 1,
"pageSize": 1,
"brands": ["Nike", "Adidas", "Rebok"],
"from_date": datetime.now().timestamp(),
"to_date": datetime.now().timestamp(),
},
False,
),
],
)
def test_query_params(params_dict: dict, should_raise: bool):
test_path = "/test"
@get(path=test_path)
def test_method(
page: int,
page_size: int = Parameter(query="pageSize", gt=0, le=100),
brands: List[str] = Parameter(min_items=1, max_items=3),
from_date: Optional[datetime] = None,
to_date: Optional[datetime] = None,
) -> None:
assert page
assert page_size
assert brands
assert from_date or from_date is None
assert to_date or to_date is None
with create_test_client(test_method) as client:
response = client.get(f"{test_path}?{urlencode(params_dict, doseq=True)}")
if should_raise:
assert response.status_code == HTTP_400_BAD_REQUEST
else:
assert response.status_code == HTTP_200_OK
@pytest.mark.parametrize(
"params_dict,should_raise",
[
(
{
"version": 1.0,
"service_id": 1,
"user_id": "abc",
"order_id": str(uuid4()),
},
False,
),
(
{
"version": 4.1,
"service_id": 1,
"user_id": "abc",
"order_id": str(uuid4()),
},
True,
),
(
{
"version": 0.2,
"service_id": 101,
"user_id": "abc",
"order_id": str(uuid4()),
},
True,
),
(
{
"version": 0.2,
"service_id": 1,
"user_id": "abcdefghijklm",
"order_id": str(uuid4()),
},
True,
),
(
{
"version": 0.2,
"service_id": 1,
"user_id": "abc",
"order_id": str(uuid1()),
},
True,
),
],
)
def test_path_params(params_dict: dict, should_raise: bool):
test_path = "{version:float}/{service_id:int}/{user_id:str}/{order_id:uuid}"
@get(path=test_path)
def test_method(
order_id: UUID4,
version: float = Parameter(gt=0.1, le=4.0),
service_id: int = Parameter(gt=0, le=100),
user_id: str = Parameter(min_length=1, max_length=10),
) -> None:
assert version
assert service_id
assert user_id
assert order_id
with create_test_client(test_method) as client:
response = client.get(
f"{params_dict['version']}/{params_dict['service_id']}/{params_dict['user_id']}/{params_dict['order_id']}"
)
if should_raise:
assert response.status_code == HTTP_400_BAD_REQUEST
else:
assert response.status_code == HTTP_200_OK
def test_path_param_validation():
@get(path="/{param}")
def test_method() -> None:
pass
with pytest.raises(ImproperlyConfiguredException):
Starlite(route_handlers=[test_method])
@pytest.mark.parametrize(
"t_type,param_dict, param, should_raise",
[
(str, {"special-header": "123"}, Parameter(header="special-header", min_length=1, max_length=3), False),
(str, {"special-header": "123"}, Parameter(header="special-header", min_length=1, max_length=2), True),
(str, {}, Parameter(header="special-header", min_length=1, max_length=2), True),
(Optional[str], {}, Parameter(header="special-header", min_length=1, max_length=2, required=False), False),
(int, {"special-header": "123"}, Parameter(header="special-header", ge=100, le=201), False),
(int, {"special-header": "123"}, Parameter(header="special-header", ge=100, le=120), True),
(int, {}, Parameter(header="special-header", ge=100, le=120), True),
(Optional[int], {}, Parameter(header="special-header", ge=100, le=120, required=False), False),
],
)
def test_header_params(t_type: Type, param_dict: dict, param: FieldInfo, should_raise: bool):
test_path = "/test"
@get(path=test_path)
def test_method(special_header: t_type = param) -> None:
if special_header:
assert special_header in [param_dict.get("special-header"), int(param_dict.get("special-header"))]
with create_test_client(test_method) as client:
response = client.get(test_path, headers=param_dict)
if should_raise:
assert response.status_code == HTTP_400_BAD_REQUEST
else:
assert response.status_code == HTTP_200_OK
@pytest.mark.parametrize(
"t_type,param_dict, param, should_raise",
[
(str, {"special-cookie": "123"}, Parameter(cookie="special-cookie", min_length=1, max_length=3), False),
(str, {"special-cookie": "123"}, Parameter(cookie="special-cookie", min_length=1, max_length=2), True),
(str, {}, Parameter(cookie="special-cookie", min_length=1, max_length=2), True),
(Optional[str], {}, Parameter(cookie="special-cookie", min_length=1, max_length=2, required=False), False),
(int, {"special-cookie": "123"}, Parameter(cookie="special-cookie", ge=100, le=201), False),
(int, {"special-cookie": "123"}, Parameter(cookie="special-cookie", ge=100, le=120), True),
(int, {}, Parameter(cookie="special-cookie", ge=100, le=120), True),
(Optional[int], {}, Parameter(cookie="special-cookie", ge=100, le=120, required=False), False),
],
)
def test_cookie_params(t_type: Type, param_dict: dict, param: FieldInfo, should_raise: bool):
test_path = "/test"
@get(path=test_path)
def test_method(special_cookie: t_type = param) -> None:
if special_cookie:
assert special_cookie in [param_dict.get("special-cookie"), int(param_dict.get("special-cookie"))]
with create_test_client(test_method) as client:
response = client.get(test_path, cookies=param_dict)
if should_raise:
assert response.status_code == HTTP_400_BAD_REQUEST
else:
assert response.status_code == HTTP_200_OK
class Form(BaseModel):
name: str
age: int
programmer: bool
def test_request_body_json():
body = Body(media_type=RequestEncodingType.JSON)
test_path = "/test"
data = Form(name="<NAME>", age=30, programmer=True).dict()
@post(path=test_path)
def test_method(data: Form = body) -> None:
assert isinstance(data, Form)
with create_test_client(test_method) as client:
response = client.post(test_path, json=data)
assert response.status_code == HTTP_201_CREATED
def test_request_body_url_encoded():
body = Body(media_type=RequestEncodingType.URL_ENCODED)
test_path = "/test"
data = Form(name="<NAME>", age=30, programmer=True).dict()
@post(path=test_path)
def test_method(data: Form = body) -> None:
assert isinstance(data, Form)
with create_test_client(test_method) as client:
response = client.post(test_path, data=data)
assert response.status_code == HTTP_201_CREATED
class FormData(BaseModel):
name: UploadFile
age: UploadFile
programmer: UploadFile
class Config:
arbitrary_types_allowed = True
@pytest.mark.parametrize("t_type", [FormData, Dict[str, UploadFile], List[UploadFile], UploadFile])
def test_request_body_multi_part(t_type: Type[Any]):
body = Body(media_type=RequestEncodingType.MULTI_PART)
test_path = "/test"
data = Form(name="<NAME>", age=30, programmer=True).dict()
@post(path=test_path)
def test_method(data: t_type = body) -> None:
assert data
with create_test_client(test_method) as client:
response = client.post(test_path, files=data)
assert response.status_code == HTTP_201_CREATED
@pytest.mark.xfail
def test_request_body_multi_part_mixed_field_content_types() -> None:
class MultiPartFormWithMixedFields(BaseModel):
# TODO: define an API for declaring the fields
image: UploadFile
tags: List[str]
@post(path="/")
async def test_method(data: MultiPartFormWithMixedFields = Body(media_type=RequestEncodingType.MULTI_PART)) -> None:
assert await data.file.read() == b"data"
assert data.tags == ["1", "2"]
with create_test_client(test_method) as client:
response = client.post(
"/",
files=[
("image", ("image.png", b"data")),
],
data=[
("tags", "1"),
("tags", "2"),
],
)
assert response.status_code == HTTP_201_CREATED
``` |
{
"source": "jonaskulhanek/mdp-python",
"score": 3
} |
#### File: mdp-python/codes/test_ZUI_MDP.py
```python
from ZUI_MDP_solution import *
from unittest import TestCase
import itertools as it
import numpy as np
import numpy.testing as nptest
# Taken from http://www.neuraldump.net/2017/06/how-to-suppress-python-unittest-warnings/.
def ignore_warnings(test_func):
def do_test(self, *args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
test_func(self, *args, **kwargs)
return do_test
class TestGridWorld2x2(TestCase):
rtol = 1e-4 # relative tolerance for comparing two floats
def setUp(self):
self.gw = GridWorld.get_world('2x2')
def test_is_obstacle_at(self):
self.assertFalse(self.gw._is_obstacle([0, 0]))
self.assertFalse(self.gw._is_obstacle([0, 1]))
self.assertFalse(self.gw._is_obstacle([1, 0]))
self.assertFalse(self.gw._is_obstacle([1, 1]))
def test_is_on_grid_true(self):
self.assertTrue(self.gw._is_on_grid([0, 0]),msg='The point [{},{}] should be on the grid.'.format(0, 0))
self.assertTrue(self.gw._is_on_grid([1, 1]), msg='The point [{},{}] should be on the grid.'.format(1, 1))
def test_is_on_grid_false(self):
for point in ([-1,0], [-2,-2], [2,0], [0,2], [5,5], [0,-1]):
self.assertFalse(self.gw._is_on_grid(point),msg='The point [{}] should not be on the grid.'.format(point))
def test_transition_proba(self):
true_transition_proba = np.load('./test_data/test_gw_2x2_transition_proba.npy')
nptest.assert_allclose(self.gw.transition_proba,true_transition_proba,rtol=self.rtol)
def test_Q_from_V_zeros(self):
V = np.zeros((self.gw.n_states + 1,))
desired_Q = np.array([[-0.04, -0.04, -0.04, -0.04],
[1., 1., 1., 1.],
[-0.04, -0.04, -0.04, -0.04],
[-1., -1., -1., -1.],
[0., 0., 0., 0.]])
nptest.assert_allclose(self.gw.Q_from_V(V=V), desired_Q, rtol=self.rtol)
def test_Q_from_V_ones(self):
V = np.ones((self.gw.n_states + 1,))
desired_Q = np.array([[0.96, 0.96, 0.96, 0.96],
[2., 2., 2., 2.],
[0.96, 0.96, 0.96, 0.96],
[0., 0., 0., 0.],
[1., 1., 1., 1.]])
nptest.assert_allclose(self.gw.Q_from_V(V=V), desired_Q, rtol=self.rtol)
def test_Q_from_V_init(self):
V = np.max(self.gw.rewards,axis=1)
desired_Q = np.array([[0.024, 0.752, 0.024, -0.08],
[1., 1., 1., 1.],
[-0.176, -0.848, -0.176, -0.08],
[-1., -1., -1., -1.],
[0., 0., 0., 0.]])
nptest.assert_allclose(self.gw.Q_from_V(V=V), desired_Q, rtol=self.rtol)
def test_Q2V_single(self):
desired_V = np.array([0.752, 1., -0.08, -1., 0.])
Q = np.array([[0.024, 0.752, 0.024, -0.08],
[1., 1., 1., 1.],
[-0.176, -0.848, -0.176, -0.08],
[-1., -1., -1., -1.],
[0., 0., 0., 0.]])
actual_V = self.gw.Q2V(Q)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2V should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
actual_V = self.gw.Q2V(Q)
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol)
def test_Q2V(self):
desired_V = np.array([0.752, 1., -0.08, -1., 0.])
Q = np.array([[0.024, 0.752, 0.024, -0.08],
[1., 1., 1., 1.],
[-0.176, -0.848, -0.176, -0.08],
[-1., -1., -1., -1.],
[0., 0., 0., 0.]])
actual_V = self.gw.Q2V(Q)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2V should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
actual_V = self.gw.Q2V(Q)
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol)
def test_Q2Vbypolicy(self):
desired_V = np.array([0.9178081, 1., 0.66027364, -1., 0., ])
Q = np.array([[0.88602712, 0.9178081, 0.67999927, 0.85205443],
[1., 1., 1., 1.],
[0.66027364, -0.6821919, 0.45424578, 0.64602658],
[-1., -1., -1., -1.],
[0., 0., 0., 0.]])
policy = np.array([1, 0, 0, 0, 0], dtype=int)
actual_V = self.gw.Q2Vbypolicy(Q,policy)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2Vbypolicy should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol)
def test_Q2policy(self):
Q = np.array([[0.88602712, 0.9178081, 0.67999927, 0.85205443],
[1., 1., 1., 1.],
[0.66027364, -0.6821919, 0.45424578, 0.64602658],
[-1., -1., -1., -1.],
[0., 0., 0., 0.]])
desired_policy = np.array([1, 0, 0, 0, 0], dtype=int)
actual_policy = self.gw.Q2policy(Q)
self.assertEqual(actual_policy.shape, (self.gw.n_states + 1,), msg='Q2policy should return array policy of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_policy.shape))
nptest.assert_allclose(actual_policy, desired_policy, rtol=self.rtol)
@ignore_warnings
def test_value_iteration_single_iter(self):
actual_Q = self.gw.value_iteration(max_iter=1)
desired_Q = np.array([[0.024, 0.752, 0.024, -0.08],
[1., 1., 1., 1.],
[-0.176, -0.848, -0.176, -0.08],
[-1., -1., -1., -1.],
[0., 0., 0., 0.]])
desired_Q_shape = (self.gw.n_states + 1,self.gw.n_actions)
self.assertEqual(actual_Q.shape, desired_Q_shape,
msg='Value_iteration should return array Q of'
' shape {} but has returned V with shape {}.'.format(
desired_Q_shape,
actual_Q.shape))
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol)
def test_value_iteration(self):
desired_Q = np.array([[0.88602712, 0.9178081, 0.67999927, 0.85205443],
[1., 1., 1., 1.],
[0.66027364, -0.6821919, 0.45424578, 0.64602658],
[-1., -1., -1., -1.],
[0., 0., 0., 0.]])
actual_Q = self.gw.value_iteration()
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol)
def test_policy_iteration_policy_only(self):
actual_policy = self.gw.Q2policy(self.gw.policy_iteration())
desired_Q = np.array([[0.88602712, 0.9178081, 0.67999927, 0.85205443],
[1., 1., 1., 1.],
[0.66027364, -0.6821919, 0.45424578, 0.64602658],
[-1., -1., -1., -1.],
[0., 0., 0., 0.]])
desired_policy = self.gw.Q2policy(desired_Q)
nptest.assert_allclose(actual_policy, desired_policy, rtol=self.rtol)
def test_policy_iteration(self):
actual_Q = self.gw.policy_iteration()
desired_Q = np.array([[0.88602712, 0.9178081, 0.67999927, 0.85205443],
[1., 1., 1., 1.],
[0.66027364, -0.6821919, 0.45424578, 0.64602658],
[-1., -1., -1., -1.],
[0., 0., 0., 0.]])
desired_Q_shape = (self.gw.n_states + 1, self.gw.n_actions)
self.assertEqual(actual_Q.shape, desired_Q_shape,
msg='Policy_iteration should return array Q of'
' shape {} but has returned V with shape {}.'.format(
desired_Q_shape,
actual_Q.shape))
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol)
class TestGridWorld3x3(TestCase):
rtol = 1e-3 # relative tolerance for comparing two floats
def setUp(self):
self.gw = GridWorld.get_world('3x3')
def test_is_obstacle_at(self):
for i,j in it.product(range(self.gw.n_rows),range(self.gw.n_columns)):
self.assertFalse(self.gw._is_obstacle([i, j]), msg='No obstacle should be at [{},{}].'.format(i,j))
def test_is_on_grid_true(self):
for i,j in it.product(range(self.gw.n_rows),range(self.gw.n_columns)):
self.assertTrue(self.gw._is_on_grid([i, j]),msg='The point [{},{}] should be on the grid.'.format(i, j))
def test_is_on_grid_false(self):
for point in ([-1,0], [-2,-2], [3,0], [0,3], [5,5], [0,-1]):
self.assertFalse(self.gw._is_on_grid(point),msg='The point [{}] should not be on the grid.'.format(point))
def test_transition_proba(self):
true_transition_proba = np.load('./test_data/test_gw_3x3_transition_proba.npy')
nptest.assert_allclose(self.gw.transition_proba,true_transition_proba,rtol=self.rtol)
def test_Q2V_single(self):
desired_V = np.load('./test_data/test_gw_3x3_V_single_iter.npy')
Q = np.load('./test_data/test_gw_3x3_Q_single_iter.npy')
actual_V = self.gw.Q2V(Q)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2V should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
actual_V = self.gw.Q2V(Q)
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol)
def test_Q2V(self):
desired_V = np.load('./test_data/test_gw_3x3_V.npy')
Q = np.load('./test_data/test_gw_3x3_Q.npy')
actual_V = self.gw.Q2V(Q)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2V should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
actual_V = self.gw.Q2V(Q)
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol)
def test_Q2Vbypolicy(self):
desired_V = np.load('./test_data/test_gw_3x3_V.npy')
Q = np.load('./test_data/test_gw_3x3_Q.npy')
policy = np.array([1, 1, 0, 0, 3, 0, 0, 3, 2, 0],dtype=int)
actual_V = self.gw.Q2Vbypolicy(Q, policy)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2Vbypolicy should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol)
def test_Q2policy(self):
Q = np.load('./test_data/test_gw_3x3_Q.npy')
desired_policy = np.array([1, 1, 0, 0, 3, 0, 0, 3, 2, 0],dtype=int)
actual_policy = self.gw.Q2policy(Q)
self.assertEqual(actual_policy.shape, (self.gw.n_states + 1,), msg='Q2policy should return array policy of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_policy.shape))
nptest.assert_allclose(actual_policy, desired_policy, rtol=self.rtol)
@ignore_warnings
def test_value_iteration_single_iter(self):
actual_Q = self.gw.value_iteration(max_iter=1)
desired_Q = np.load('./test_data/test_gw_3x3_Q_single_iter.npy')
desired_Q_shape = (self.gw.n_states + 1,self.gw.n_actions)
self.assertEqual(actual_Q.shape, desired_Q_shape,
msg='Value_iteration should return array Q of'
' shape {} but has returned V with shape {}.'.format(
desired_Q_shape,
actual_Q.shape))
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol)
def test_value_iteration(self):
desired_Q = np.load('./test_data/test_gw_3x3_Q.npy')
desired_Q_shape = (self.gw.n_states + 1, self.gw.n_actions)
actual_Q = self.gw.value_iteration()
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol)
def test_policy_iteration_policy_only(self):
actual_policy = self.gw.Q2policy(self.gw.policy_iteration())
desired_Q = np.load('./test_data/test_gw_3x3_Q.npy')
desired_policy = self.gw.Q2policy(desired_Q)
#actual_policy = self.gw.Q2policy(actual_Q)
nptest.assert_allclose(actual_policy, desired_policy, rtol=self.rtol)
def test_policy_iteration(self):
actual_Q = self.gw.policy_iteration()
desired_Q = np.load('./test_data/test_gw_3x3_Q.npy')
desired_Q_shape = (self.gw.n_states + 1, self.gw.n_actions)
self.assertEqual(actual_Q.shape, desired_Q_shape,
msg='Policy_iteration should return array Q of'
' shape {} but has returned V with shape {}.'.format(
desired_Q_shape,
actual_Q.shape))
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol)
class TestGridWorld3x4(TestCase):
rtol = 1e-3 # relative tolerance for comparing two floats
def setUp(self):
self.gw = GridWorld.get_world('3x4')
def test_is_obstacle_at(self):
for i,j in it.product(range(self.gw.n_rows),range(self.gw.n_columns)):
if i == 1 and j == 1:
continue
self.assertFalse(self.gw._is_obstacle([i, j]), msg='No obstacle should be at [{},{}].'.format(i,j))
self.assertTrue(self.gw._is_obstacle([1, 1]), msg='An obstacle should be at [{},{}].'.format(1, 1))
def test_is_on_grid_true(self):
for i,j in it.product(range(self.gw.n_rows),range(self.gw.n_columns)):
self.assertTrue(self.gw._is_on_grid([i, j]),msg='The point [{},{}] should be on the grid.'.format(i, j))
def test_is_on_grid_false(self):
for point in ([-1,0], [-2,-2], [3,0], [0,4], [5,5], [0,-1]):
self.assertFalse(self.gw._is_on_grid(point),msg='The point [{}] should not be on the grid.'.format(point))
def test_transition_proba(self):
true_transition_proba = np.load('./test_data/test_gw_3x4_transition_proba.npy')
nptest.assert_allclose(self.gw.transition_proba,true_transition_proba,rtol=self.rtol)
def test_Q2V_single(self):
desired_V = np.load('./test_data/test_gw_3x4_V_single_iter.npy')
Q = np.load('./test_data/test_gw_3x4_Q_single_iter.npy')
actual_V = self.gw.Q2V(Q)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2V should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
actual_V = self.gw.Q2V(Q)
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol)
def test_Q2V(self):
desired_V = np.load('./test_data/test_gw_3x4_V.npy')
Q = np.load('./test_data/test_gw_3x4_Q.npy')
actual_V = self.gw.Q2V(Q)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2V should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
actual_V = self.gw.Q2V(Q)
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol)
def test_Q2policy(self):
Q = np.load('./test_data/test_gw_3x4_Q.npy')
desired_policy = np.array([1, 1, 1, 0, 0, 0, 0, 0, 0, 3, 3, 3, 0],dtype=int)
actual_policy = self.gw.Q2policy(Q)
self.assertEqual(actual_policy.shape, (self.gw.n_states + 1,), msg='Q2policy should return array policy of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_policy.shape))
nptest.assert_allclose(actual_policy, desired_policy, rtol=self.rtol)
def test_Q2Vbypolicy(self):
desired_V = np.load('./test_data/test_gw_3x4_V.npy')
Q = np.load('./test_data/test_gw_3x4_Q.npy')
policy = np.array([1, 1, 1, 0, 0, 0, 0, 0, 0, 3, 3, 3, 0],dtype=int)
actual_V = self.gw.Q2Vbypolicy(Q, policy)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2Vbypolicy should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol)
@ignore_warnings
def test_value_iteration_single_iter(self):
actual_Q = self.gw.value_iteration(max_iter=1)
desired_Q = np.load('./test_data/test_gw_3x4_Q_single_iter.npy')
desired_Q_shape = (self.gw.n_states + 1,self.gw.n_actions)
self.assertEqual(actual_Q.shape, desired_Q_shape,
msg='Value_iteration should return array Q of'
' shape {} but has returned V with shape {}.'.format(
desired_Q_shape,
actual_Q.shape))
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol)
def test_value_iteration(self):
desired_Q = np.load('./test_data/test_gw_3x4_Q.npy')
desired_Q_shape = (self.gw.n_states + 1, self.gw.n_actions)
actual_Q = self.gw.value_iteration()
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol)
def test_policy_iteration_policy_only(self):
actual_policy = self.gw.Q2policy(self.gw.policy_iteration())
desired_Q = np.load('./test_data/test_gw_3x4_Q.npy')
desired_policy = self.gw.Q2policy(desired_Q)
#actual_policy = self.gw.Q2policy(actual_Q)
nptest.assert_allclose(actual_policy, desired_policy, rtol=self.rtol)
def test_policy_iteration(self):
actual_Q = self.gw.policy_iteration()
desired_Q = np.load('./test_data/test_gw_3x4_Q.npy')
desired_Q_shape = (self.gw.n_states + 1, self.gw.n_actions)
self.assertEqual(actual_Q.shape, desired_Q_shape,
msg='Policy_iteration should return array Q of'
' shape {} but has returned V with shape {}.'.format(
desired_Q_shape,
actual_Q.shape))
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol)
class TestGridWorld4x4(TestCase):
rtol = 1e-3 # relative tolerance for comparing two floats
atol = 1e-08 # absolute tolerance for comparing two floats
def setUp(self):
self.gw = GridWorld.get_world('4x4')
def test_is_obstacle_at(self):
for i,j in it.product(range(self.gw.n_rows),range(self.gw.n_columns)):
if (i,j) in [(1,1),(2,2)]:
continue
self.assertFalse(self.gw._is_obstacle([i, j]), msg='No obstacle should be at [{},{}].'.format(i,j))
self.assertTrue(self.gw._is_obstacle([1, 1]), msg='An obstacle should be at [{},{}].'.format(1, 1))
def test_is_on_grid_true(self):
for i,j in it.product(range(self.gw.n_rows),range(self.gw.n_columns)):
self.assertTrue(self.gw._is_on_grid([i, j]),msg='The point [{},{}] should be on the grid.'.format(i, j))
def test_is_on_grid_false(self):
for point in ([-1,0], [-2,-2], [4,0], [0,4], [5,5], [0,-1]):
self.assertFalse(self.gw._is_on_grid(point),msg='The point [{}] should not be on the grid.'.format(point))
def test_transition_proba(self):
true_transition_proba = np.load('./test_data/test_gw_4x4_transition_proba.npy')
nptest.assert_allclose(self.gw.transition_proba,true_transition_proba,rtol=self.rtol, atol=self.atol)
def test_Q2V_single(self):
desired_V = np.load('./test_data/test_gw_4x4_V_single_iter.npy')
Q = np.load('./test_data/test_gw_4x4_Q_single_iter.npy')
actual_V = self.gw.Q2V(Q)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2V should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
actual_V = self.gw.Q2V(Q)
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol, atol=self.atol)
def test_Q2V(self):
desired_V = np.load('./test_data/test_gw_4x4_V.npy')
Q = np.load('./test_data/test_gw_4x4_Q.npy')
actual_V = self.gw.Q2V(Q)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2V should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
actual_V = self.gw.Q2V(Q)
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol, atol=self.atol)
def test_Q2Vbypolicy(self):
desired_V = np.load('./test_data/test_gw_4x4_V.npy')
Q = np.load('./test_data/test_gw_4x4_Q.npy')
policy = np.load('./test_data/test_gw_4x4_policy.npy')
actual_V = self.gw.Q2Vbypolicy(Q, policy)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2Vbypolicy should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol, atol=self.atol)
def test_Q2policy(self):
Q = np.load('./test_data/test_gw_4x4_Q.npy')
desired_policy = np.load('./test_data/test_gw_4x4_policy.npy')
actual_policy = self.gw.Q2policy(Q)
self.assertEqual(actual_policy.shape, (self.gw.n_states + 1,), msg='Q2policy should return array policy of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_policy.shape))
nptest.assert_allclose(actual_policy, desired_policy, rtol=self.rtol)
@ignore_warnings
def test_value_iteration_single_iter(self):
actual_Q = self.gw.value_iteration(max_iter=1)
desired_Q = np.load('./test_data/test_gw_4x4_Q_single_iter.npy')
desired_Q_shape = (self.gw.n_states + 1,self.gw.n_actions)
self.assertEqual(actual_Q.shape, desired_Q_shape,
msg='Value_iteration should return array Q of'
' shape {} but has returned V with shape {}.'.format(
desired_Q_shape,
actual_Q.shape))
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol, atol=self.atol)
def test_value_iteration(self):
desired_Q = np.load('./test_data/test_gw_4x4_Q.npy')
desired_Q_shape = (self.gw.n_states + 1, self.gw.n_actions)
actual_Q = self.gw.value_iteration()
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol, atol=self.atol)
def test_policy_iteration_policy_only(self):
actual_policy = self.gw.Q2policy(self.gw.policy_iteration())
desired_Q = np.load('./test_data/test_gw_4x4_Q.npy')
desired_policy = self.gw.Q2policy(desired_Q)
#actual_policy = self.gw.Q2policy(actual_Q)
nptest.assert_allclose(actual_policy, desired_policy, rtol=self.rtol, atol=self.atol)
def test_policy_iteration(self):
actual_Q = self.gw.policy_iteration()
desired_Q = np.load('./test_data/test_gw_4x4_Q.npy')
desired_Q_shape = (self.gw.n_states + 1, self.gw.n_actions)
self.assertEqual(actual_Q.shape, desired_Q_shape,
msg='Policy_iteration should return array Q of'
' shape {} but has returned V with shape {}.'.format(
desired_Q_shape,
actual_Q.shape))
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol, atol=self.atol)
class TestGridWorld5x5(TestCase):
rtol = 1e-4 # relative tolerance for comparing two floats
atol = 1e-08 # absolute tolerance for comparing two floats
def setUp(self):
self.gw = GridWorld.get_world('5x5')
def test_is_obstacle_at(self):
for i,j in it.product(range(self.gw.n_rows),range(self.gw.n_columns)):
if (i,j) in [(1,0), (1,1), (2,2)]:
continue
self.assertFalse(self.gw._is_obstacle([i, j]), msg='No obstacle should be at [{},{}].'.format(i,j))
self.assertTrue(self.gw._is_obstacle([1, 1]), msg='An obstacle should be at [{},{}].'.format(1, 1))
def test_is_on_grid_true(self):
for i,j in it.product(range(self.gw.n_rows),range(self.gw.n_columns)):
self.assertTrue(self.gw._is_on_grid([i, j]),msg='The point [{},{}] should be on the grid.'.format(i, j))
def test_is_on_grid_false(self):
for point in ([-1,0], [-2,-2], [5,0], [0,5], [5,5], [0,-1]):
self.assertFalse(self.gw._is_on_grid(point),msg='The point [{}] should not be on the grid.'.format(point))
def test_transition_proba(self):
true_transition_proba = np.load('./test_data/test_gw_5x5_transition_proba.npy')
nptest.assert_allclose(self.gw.transition_proba,true_transition_proba,rtol=self.rtol, atol=self.atol)
def test_Q2V_single(self):
desired_V = np.load('./test_data/test_gw_5x5_V_single_iter.npy')
Q = np.load('./test_data/test_gw_5x5_Q_single_iter.npy')
actual_V = self.gw.Q2V(Q)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2V should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
actual_V = self.gw.Q2V(Q)
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol, atol=self.atol)
def test_Q2V(self):
desired_V = np.load('./test_data/test_gw_5x5_V.npy')
Q = np.load('./test_data/test_gw_5x5_Q.npy')
actual_V = self.gw.Q2V(Q)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2V should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
actual_V = self.gw.Q2V(Q)
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol, atol=self.atol)
def test_Q2Vbypolicy(self):
desired_V = np.load('./test_data/test_gw_5x5_V.npy')
Q = np.load('./test_data/test_gw_5x5_Q.npy')
policy = np.load('./test_data/test_gw_5x5_policy.npy')
actual_V = self.gw.Q2Vbypolicy(Q, policy)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2Vbypolicy should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol, atol=self.atol)
def test_Q2policy(self):
Q = np.load('./test_data/test_gw_5x5_Q.npy')
desired_policy = np.load('./test_data/test_gw_5x5_policy.npy')
actual_policy = self.gw.Q2policy(Q)
self.assertEqual(actual_policy.shape, (self.gw.n_states + 1,), msg='Q2policy should return array policy of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_policy.shape))
nptest.assert_allclose(actual_policy, desired_policy, rtol=self.rtol)
@ignore_warnings
def test_value_iteration_single_iter(self):
actual_Q = self.gw.value_iteration(max_iter=1)
desired_Q = np.load('./test_data/test_gw_5x5_Q_single_iter.npy')
desired_Q_shape = (self.gw.n_states + 1,self.gw.n_actions)
self.assertEqual(actual_Q.shape, desired_Q_shape,
msg='Value_iteration should return array Q of'
' shape {} but has returned V with shape {}.'.format(
desired_Q_shape,
actual_Q.shape))
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol, atol=self.atol)
def test_value_iteration(self):
desired_Q = np.load('./test_data/test_gw_5x5_Q.npy')
actual_Q = self.gw.value_iteration()
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol, atol=self.atol)
def test_policy_iteration_policy_only(self):
actual_policy = self.gw.Q2policy(self.gw.policy_iteration())
desired_Q = np.load('./test_data/test_gw_5x5_Q.npy')
desired_policy = self.gw.Q2policy(desired_Q)
nptest.assert_allclose(actual_policy, desired_policy, rtol=self.rtol, atol=self.atol)
def test_policy_iteration(self):
actual_Q = self.gw.policy_iteration()
desired_Q = np.load('./test_data/test_gw_5x5_Q.npy')
desired_Q_shape = (self.gw.n_states + 1, self.gw.n_actions)
self.assertEqual(actual_Q.shape, desired_Q_shape,
msg='Policy_iteration should return array Q of'
' shape {} but has returned V with shape {}.'.format(
desired_Q_shape,
actual_Q.shape))
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol, atol=self.atol)
``` |
{
"source": "jonaslalin/label-maker",
"score": 3
} |
#### File: test/integration/test_directory_move.py
```python
import unittest
import json
from os import makedirs, chdir
from shutil import copyfile, rmtree
import subprocess
import numpy as np
class TestOutsideDirectory(unittest.TestCase):
"""Tests for classification label creation outside of the current directory"""
@classmethod
def setUpClass(cls):
makedirs('integration-cl')
copyfile('test/fixtures/integration/portugal-z17.mbtiles', 'integration-cl/portugal-z17.mbtiles')
@classmethod
def tearDownClass(cls):
rmtree('integration-cl')
def test_cli(self):
"""Verify geojson and labels.npz produced by CLI"""
# first move outside the directory
chdir('..')
directory = 'label-maker'
cmd = 'label-maker labels --dest {}/integration-cl --config {}/test/fixtures/integration/config.integration.json'.format(directory, directory)
cmd = cmd.split(' ')
subprocess.run(cmd, universal_newlines=True)
# our labels should look like this
expected_labels = {
'62092-50162-17': np.array([1, 0, 0, 0, 0, 0, 0]),
'62092-50163-17': np.array([0, 0, 0, 0, 0, 0, 1]),
'62092-50164-17': np.array([0, 0, 0, 0, 0, 0, 1]),
'62093-50162-17': np.array([0, 0, 0, 0, 0, 0, 1]),
'62093-50164-17': np.array([0, 0, 0, 0, 0, 0, 1]),
'62094-50162-17': np.array([0, 0, 0, 0, 0, 0, 1]),
'62094-50164-17': np.array([0, 0, 0, 0, 0, 0, 1]),
'62094-50163-17': np.array([0, 1, 1, 0, 0, 0, 1]),
'62093-50163-17': np.array([0, 0, 0, 0, 1, 1, 1])
}
# move back into the directory
chdir(directory)
labels = np.load('integration-cl/labels.npz')
self.assertEqual(len(labels.files), len(expected_labels.keys())) # First check number of tiles
for tile in labels.files:
self.assertTrue(np.array_equal(expected_labels[tile], labels[tile])) # Now, content
# our GeoJSON looks like the fixture
with open('test/fixtures/integration/classification.geojson') as fixture:
with open('integration-cl/classification.geojson') as geojson_file:
expected_geojson = json.load(fixture)
geojson = json.load(geojson_file)
self.assertCountEqual(expected_geojson, geojson)
```
#### File: test/integration/test_ms_bands_package.py
```python
import unittest
from os import makedirs
from shutil import copyfile, copytree, rmtree
import subprocess
import os
import numpy as np
class TestClassificationPackage(unittest.TestCase):
"""Tests for local GeoTIFF package creation"""
@classmethod
def setUpClass(cls):
makedirs('integration-ms')
copyfile('test/fixtures/integration/ms_img.tif', 'integration-ms/ms-img.tif')
copyfile('test/fixtures/integration/roads_ms.geojson', 'integration-ms/ms-roads.geojson')
copyfile('test/fixtures/integration/labels-ms.npz', 'integration-ms/labels.npz')
@classmethod
def tearDownClass(cls):
rmtree('integration-ms')
def test_cli(self):
"""Verify data.npz produced by CLI"""
cmd = 'label-maker images --dest integration-ms --config test/fixtures/integration/config.integration.bands.json'
cmd = cmd.split(' ')
subprocess.run(cmd, universal_newlines=True)
cmd = 'label-maker package --dest integration-ms --config test/fixtures/integration/config.integration.bands.json'
cmd = cmd.split(' ')
subprocess.run(cmd, universal_newlines=True)
print(os.listdir('integration-ms/'))
data = np.load('integration-ms/data.npz')
# validate our label data with exact matches in shape
self.assertEqual(data['x_train'].shape, (8, 256, 256, 2))
self.assertEqual(data['x_test'].shape, (3, 256, 256, 2))
# validate our label data with exact matches in shape
self.assertEqual(data['y_train'].shape, (8, 3))
self.assertEqual(data['y_test'].shape, (3, 3))
#validate img dtype
self.assertEqual(np.uint16, data['x_train'].dtype)
``` |
{
"source": "jonaslalin/tensorflow-skeleton",
"score": 2
} |
#### File: tests/my_package/test_model.py
```python
import pytest
import tensorflow as tf
from my_package.model import create_model
g = tf.random.Generator.from_seed(1)
@pytest.fixture
def inputs():
return g.uniform(shape=(32, 5))
def test_create_model(inputs):
model = create_model()
outputs = model(inputs)
assert outputs.shape == (32, 1)
``` |
{
"source": "jonaslalin/unet-from-scratch",
"score": 3
} |
#### File: tests/unet/test_model.py
```python
import tensorflow as tf
from unet.model import unet
def test_unet_with_valid_padding():
_, _, autoencoder = unet(
shape=(572, 572, 1),
num_classes=2,
padding="valid",
filters=(64, 128, 256, 512, 1024),
)
with tf.device("/device:CPU:0"): # tensorflow-metal fix
g = tf.random.Generator.from_seed(1)
inputs = g.uniform(shape=(2, 572, 572, 1))
outputs = autoencoder(inputs)
assert outputs.shape == (2, 388, 388, 2)
def test_unet_with_same_padding():
_, _, autoencoder = unet(
shape=(512, 512, 3),
num_classes=3,
padding="same",
filters=(64, 128, 256, 512, 1024),
)
with tf.device("/device:CPU:0"): # tensorflow-metal fix
g = tf.random.Generator.from_seed(1)
inputs = g.uniform(shape=(2, 512, 512, 3))
outputs = autoencoder(inputs)
assert outputs.shape == (2, 512, 512, 3)
``` |
{
"source": "jonaslb/signal2html",
"score": 2
} |
#### File: signal2html/signal2html/models.py
```python
from abc import ABCMeta
from dataclasses import dataclass, field
from typing import List
@dataclass
class RecipientId:
_id: str # phone number (?)
def __hash__(self):
return hash(self._id)
@dataclass
class Quote:
_id: int
author: RecipientId
text: str
@dataclass
class Attachment:
contentType: str
fileName: str
voiceNote: bool
width: int
height: int
quote: bool
unique_id: str
@dataclass
class Recipient:
recipientId: RecipientId
name: str
color: str
isgroup: bool
def __hash__(self):
return hash(self.recipientId)
@dataclass
class DisplayRecord(metaclass=ABCMeta):
addressRecipient: Recipient # Recipient corresponding to address field
recipient: Recipient
dateSent: int
dateReceived: int
threadId: int
body: str
_type: int
@dataclass
class MessageRecord(DisplayRecord):
_id: int
@dataclass
class MMSMessageRecord(MessageRecord):
quote: Quote
attachments: List[Attachment]
@dataclass
class SMSMessageRecord(MessageRecord):
pass
@dataclass
class Thread:
_id: int
recipient: Recipient # need to deal with groups later
mms: List[MMSMessageRecord] = field(default_factory=lambda: [])
sms: List[SMSMessageRecord] = field(default_factory=lambda: [])
``` |
{
"source": "jonaslindemann/compute-course-public",
"score": 2
} |
#### File: compute-course-public/dataviz/pyvtk_ex2.py
```python
from pyvtk import *
vtk = VtkData(StructuredPoints([3,4,6]),
PointData(Scalars([0,0,0,0,0,0,0,0,0,0,0,0,
0,5,10,15,20,25,25,20,15,10,5,0,
0,10,20,30,40,50,50,40,30,20,10,0,
0,10,20,30,40,50,50,40,30,20,10,0,
0,5,10,15,20,25,25,20,15,10,5,0,
0,0,0,0,0,0,0,0,0,0,0,0
])))
vtk.tofile('example2')
vtk.tofile('example2b','binary')
vtk = VtkData('example2',only_structure = 1)
def f(x,y,z):
return x*y*z
vtk.point_data.append(vtk.structure.Scalars(f,'x*y*z'))
vtk.tofile('example2f_sp')
pp = [(i,j,k) for k in range(6) for j in range(4) for i in range(3)]
vtk = VtkData(StructuredGrid([3,4,6],pp))
vtk.point_data.append(vtk.structure.Scalars(f,'x*y*z'))
vtk.tofile('example2f_sg')
vtk = VtkData(RectilinearGrid(range(3),range(4),range(6)))
vtk.point_data.append(vtk.structure.Scalars(f,'x*y*z'))
vtk.tofile('example2f_rg')
```
#### File: compute-course-public/projects/mymod.py
```python
class Point:
"""This is Point class"""
def __init__(self):
"""This is the Point constructur"""
print("Point created")
def hello(self):
"""Hello method"""
print("Hello")
def hello_module():
"""Hello function"""
print("Hello from mymod.")
if __name__ == "__main__":
print("My module")
```
#### File: compute-course-public/ui/events1.py
```python
import sys
from PyQt5.QtWidgets import *
class MyWindow(QWidget):
"""Main Window class for our application"""
def __init__(self):
"""Class constructor"""
super().__init__()
self.resize(400,200)
self.move(50,50)
self.setWindowTitle("MyWindow")
self.button = QPushButton("Tryck", self)
self.button.move(50,50)
self.button.resize(100,50)
self.button.clicked.connect(self.on_button_clicked)
def on_button_clicked(self):
"""Respond to button click"""
QMessageBox.information(self, "Meddelande", "Ouch!")
if __name__ == '__main__':
app = QApplication(sys.argv)
window = MyWindow()
window.show()
sys.exit(app.exec_())
```
#### File: compute-course-public/ui/ex2.py
```python
import sys
from PyQt5.QtWidgets import QApplication, QWidget
class MyWindow(QWidget):
def __init__(self):
"""MyWindow constructor"""
super().__init__()
# Skapa gränssnittskontroller
self.init_gui()
def init_gui(self):
"""Initiera gränssnitt"""
self.setGeometry(300, 300, 600, 600)
self.setWindowTitle("MyWindow")
# Visa fönster
self.show()
if __name__ == "__main__":
app = QApplication(sys.argv)
# Skapa vårt MyWindow objekt
window = MyWindow()
# Starta händelseloop
sys.exit(app.exec_())
```
#### File: compute-course-public/ui/ex3.py
```python
import sys
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton
from PyQt5.QtCore import QCoreApplication
class MyWindow(QWidget):
def __init__(self):
"""MyWindow konstruktor"""
super().__init__()
# Skapa gränssnittskontroller
self.init_gui()
def init_gui(self):
"""Initiera gränssnitt"""
# Skapa en knappkontroll
self.button = QPushButton("Press me", self)
self.button.setToolTip("I am a button. Please press me")
self.button.resize(100,50)
self.button.move(50,50)
# Koppla metod till signalen clicked
self.button.clicked.connect(self.on_button_clicked)
# Sätt fönsteregenskaper
self.setGeometry(300, 300, 300, 300)
self.setWindowTitle("MyWidget")
# Visa fönster
self.show()
def on_button_clicked(self):
"""Händelsemetod för signalen clicked"""
print("Hello")
if __name__ == "__main__":
app = QApplication(sys.argv)
# Skapa vårt MyWindow objekt
window = MyWindow()
# Starta händelseloop
sys.exit(app.exec_())
```
#### File: compute-course-public/ui/ex4.py
```python
import sys
from PyQt5.QtWidgets import *
class MyWindow(QWidget):
def __init__(self):
"""MyWidget constructor"""
super().__init__()
self.init_gui()
def init_gui(self):
"""Initialise UI"""
self.button = QPushButton("Press me", self)
self.button.setToolTip("I am a button. Please press me")
self.button.resize(self.button.sizeHint())
self.button.move(50,50)
self.button.clicked.connect(self.on_button_clicked)
self.line_edit = QLineEdit(self)
self.line_edit.move(50,20)
self.line_edit.setText("Text")
self.setGeometry(300, 300, 600, 600)
self.setWindowTitle("MyWidget")
self.show()
def on_button_clicked(self):
"""Event method for button clicked"""
QMessageBox.information(self, "Text", self.line_edit.text())
if __name__ == "__main__":
app = QApplication(sys.argv)
window = MyWindow()
sys.exit(app.exec_())
```
#### File: compute-course-public/ui/form_test.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(448, 305)
self.verticalLayout_3 = QtWidgets.QVBoxLayout(MainWindow)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.textBrowser = QtWidgets.QTextBrowser(MainWindow)
self.textBrowser.setObjectName("textBrowser")
self.horizontalLayout_2.addWidget(self.textBrowser)
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.push_button = QtWidgets.QPushButton(MainWindow)
self.push_button.setObjectName("push_button")
self.verticalLayout.addWidget(self.push_button)
self.pushButton_2 = QtWidgets.QPushButton(MainWindow)
self.pushButton_2.setObjectName("pushButton_2")
self.verticalLayout.addWidget(self.pushButton_2)
self.pushButton_3 = QtWidgets.QPushButton(MainWindow)
self.pushButton_3.setEnabled(False)
self.pushButton_3.setObjectName("pushButton_3")
self.verticalLayout.addWidget(self.pushButton_3)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.horizontalLayout_2.addLayout(self.verticalLayout)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.pushButton_4 = QtWidgets.QPushButton(MainWindow)
self.pushButton_4.setObjectName("pushButton_4")
self.horizontalLayout.addWidget(self.pushButton_4)
self.pushButton_5 = QtWidgets.QPushButton(MainWindow)
self.pushButton_5.setObjectName("pushButton_5")
self.horizontalLayout.addWidget(self.pushButton_5)
self.pushButton_6 = QtWidgets.QPushButton(MainWindow)
self.pushButton_6.setObjectName("pushButton_6")
self.horizontalLayout.addWidget(self.pushButton_6)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.verticalLayout_3.addLayout(self.verticalLayout_2)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Form"))
self.push_button.setText(_translate("MainWindow", "PushButton"))
self.pushButton_2.setText(_translate("MainWindow", "PushButton"))
self.pushButton_3.setText(_translate("MainWindow", "PushButton"))
self.pushButton_4.setText(_translate("MainWindow", "PushButton"))
self.pushButton_5.setText(_translate("MainWindow", "PushButton"))
self.pushButton_6.setText(_translate("MainWindow", "PushButton"))
```
#### File: compute-course-public/ui/form_ui.py
```python
import sys
from PyQt5.QtWidgets import *
from form_window import *
class MainWindow(QWidget):
"""Main window class for the Flow application"""
def __init__(self):
"""Class constructor"""
super().__init__()
self.setup_ui()
def setup_ui(self):
"""Initiera gränssnitt"""
# Instantiera en referens till den av pyuic5 skapade
# klassen.
self.ui = Ui_MainWindow()
# Skapa objektstrukturen.
self.ui.setupUi(self)
# Uppdatera gränssnit. Notere att vi måste använda
# self.ui som prefix.
self.ui.push_button.setText("Press me!")
if __name__ == '__main__':
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
```
#### File: compute-course-public/ui/label1.py
```python
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
class MyWindow(QWidget):
"""Huvudklass för fönstret"""
def __init__(self):
"""Klass constructor"""
super().__init__()
self.init_gui()
def init_gui(self):
"""Initiera gränssnitt"""
# Konfigurera fönster
self.resize(400, 200)
self.move(50, 50)
self.setWindowTitle("MyWindow")
# Skapa knapp
self.button = QPushButton("Tryck", self)
self.button.move(230, 18)
self.button.clicked.connect(self.on_button_clicked)
# Skapa textetikett
self.label = QLabel("Textruta", self)
self.label.move(20, 22)
# Skapa textkontroll
self.line_edit = QLineEdit(self)
self.line_edit.move(80, 20)
self.line_edit.setText("Text")
# Skapa label-kontroll med bild.
self.image_label = QLabel("Bild", self)
self.image_label.move(20, 60)
self.image_label.setScaledContents(True)
self.image_label.resize(300, 100)
self.image_label.setPixmap(QPixmap("python_logo.png"))
self.show()
def on_button_clicked(self):
"""Händelsemetod för signalen clicked"""
QMessageBox.information(self, "Text", self.line_edit.text())
if __name__ == "__main__":
app = QApplication(sys.argv)
# Skapa vårt MyWindow objekt
window = MyWindow()
# Starta händelseloop
sys.exit(app.exec_())
```
#### File: ui/old/uiloading.py
```python
import sys
from PyQt5.QtWidgets import *
from PyQt5 import uic
class MainWindow:
"""Main window class for the Flow application"""
def __init__(self, app):
"""Class constructor"""
# Assign our application instance as a member variable
self.app = app
# Load and show our user interface
self.ui = uic.loadUi('mainwindow.ui')
self.ui.actionFileOpen.triggered.connect(self.onFileOpen)
self.ui.addButton.clicked.connect(self.onAddButtonClicked)
self.ui.nodeList.currentRowChanged.connect(self.onCurrentRowChanged)
self.ui.addTextButton.clicked.connect(self.onAddTextButtonClicked)
self.ui.myButton.clicked.connect(self.onMyButtonClicked)
self.ui.show()
self.ui.raise_()
def onMyButtonClicked(self, event):
print("onMyButtonClicked")
def onFileOpen(self, event):
print("onFileOpen")
def onAddButtonClicked(self, event):
print("onAddButtonClicked")
self.ui.nodeList.addItem("Hej hopp!")
def onCurrentRowChanged(self, event):
print("onCurrentRowChanged")
self.ui.nodeList.addItem("test")
def onAddTextButtonClicked(self, event):
print("onAddTextButtonClicked")
self.ui.nodeList.addItem(self.ui.lineEdit.text())
if __name__ == '__main__':
app = QApplication(sys.argv)
window = MainWindow(app)
sys.exit(app.exec_())
```
#### File: compute-course-public/ui/toolbar1.py
```python
import sys
from PyQt5.QtWidgets import *
class MyWindow(QMainWindow):
"""Main Window class for our application"""
def __init__(self):
"""Class constructor"""
super().__init__()
self.init_gui()
def init_gui(self):
"""Initiera gränssnitt"""
self.resize(200,200)
self.move(50,50)
self.setWindowTitle("MyWindow")
# Define action
self.my_action = QAction("MyAction", self.ui)
self.my_action.setShortcut("Ctrl-T")
self.my_action.triggered.connect(self.on_my_action)
# Connect action to menu
self.file_menu = self.ui.menuBar().addMenu("File")
self.file_menu.addAction(self.my_action)
# Create a toolbar
self.toolbar = self.ui.addToolBar("MyToolbar")
self.toolbar.addAction(self.my_action)
def on_my_action(self):
"""Method for handling MyAction"""
QMessageBox.information(self, "Meddelande", "Ouch!")
if __name__ == '__main__':
app = QApplication(sys.argv)
window = MyWindow()
window.show()
sys.exit(app.exec_())
``` |
{
"source": "jonaslindemann/mxdisplay",
"score": 3
} |
#### File: jonaslindemann/mxdisplay/mx-screen.py
```python
from samplebase import SampleBase
from rgbmatrix import graphics
from datetime import datetime
from math import *
import time
import socket
import zmq
def get_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except:
IP = '127.0.0.1'
finally:
s.close()
return IP
class StatusDisplay:
"""Main class implementing the control of the LED display"""
DM_OFF = 0
DM_TIME_LEFT = 1
DM_INFO_TEXT = 2
DM_WARNING_TEXT = 3
DM_TIME = 4
DM_CLOSED = 5
DM_STARTUP = 6
DM_ONE_LAP = 7
DM_TWO_LAP = 8
DM_FINISH = 9
DM_TIME_QUALIFY = 10
DM_TIME_LEFT_20_FULL = 11
DM_TIME_LEFT_20_HALF = 15
DM_TIME_LEFT_25_35_FULL = 13
DM_TIME_LEFT_25_35_HALF = 14
DM_TIMING = 12
MX_VERSION = "1.0.8"
def __init__(self, canvas, graphics):
"""Class constructor"""
self.ip = get_ip()
self.debug_datetime = datetime(2020, 1, 1, 17, 24, 00)
self.debug = False
print(self.ip)
self._display_mode = StatusDisplay.DM_STARTUP
self.default_mode = StatusDisplay.DM_TIME_LEFT
self.canvas = canvas
self.graphics = graphics
self.elapsed_time = 0.0
self.startup_delay = 60
self.startup_finished = False
self.timing_start = datetime.now()
self.font = self.graphics.Font()
self.font.LoadFont("fonts/7x13.bdf")
self.large_font = self.graphics.Font()
self.large_font.LoadFont("fonts/9x18B.bdf")
self.huge_font = self.graphics.Font()
self.huge_font.LoadFont("fonts/Bahnschrift_large.bdf")
self.extra_large_font = self.graphics.Font()
self.extra_large_font.LoadFont("fonts/Bahnschrift.bdf")
self.time_color = graphics.Color(255, 255, 255)
self.time_warning_color = graphics.Color(255, 255, 0)
self.info_color = graphics.Color(255, 255, 255)
self.info_background = graphics.Color(0, 0, 140)
self.warn_color = graphics.Color(0, 0, 0)
self.warn_background = graphics.Color(200, 200, 0)
self.warn_border = graphics.Color(200, 0, 0)
self.time_over_color = graphics.Color(255, 0, 0)
self.training_back = graphics.Color(128, 0, 0)
self.training_bar = graphics.Color(0, 255, 0)
self.training_text = graphics.Color(0, 0, 0)
self.white = graphics.Color(230, 230, 230)
self.white_safe = graphics.Color(230, 230, 230)
self.black = graphics.Color(0,0,0)
self.hour_color = graphics.Color(255,0,0)
self.minute_color = graphics.Color(0,255,0)
self.second_color = graphics.Color(0,0,255)
self.info_text = "Infotext"
self.warning_text = "Varningstext"
def current_time(self):
if self.debug:
return self.debug_datetime
else:
return datetime.now()
def draw_filled_rect(self, x0, y0, x1, y1, color):
"""Draws a filled rectangle in the LED display"""
for y in range(y0,y1+1):
self.graphics.DrawLine(self.canvas, x0, y, x1, y, color)
def draw_rect(self, x0, y0, x1, y1, color):
"""Draw a rectangle in the LED display"""
self.graphics.DrawLine(self.canvas, x0, y0, x1, y0, color)
self.graphics.DrawLine(self.canvas, x0, y1, x1, y1, color)
self.graphics.DrawLine(self.canvas, x0, y0, x0, y1, color)
self.graphics.DrawLine(self.canvas, x1, y0, x1, y1, color)
def draw_time_left(self, text, value):
"""Draw time left as a bar"""
self.draw_filled_rect(64, 0, 127, 31, self.training_back)
self.draw_filled_rect(64, 0, 64+value, 31, self.training_bar)
self.graphics.DrawText(self.canvas, self.font, 64+2, 12, self.training_text, text)
self.graphics.DrawText(self.canvas, self.font, 64+2, 31, self.training_text, str(value))
def draw_time(self):
"""Draw current time in the LED display"""
self.draw_rect(0, 0, 127, 31, self.time_color)
self.draw_rect(1, 1, 126, 30, self.time_color)
now = self.current_time()
time_str = now.strftime("%H:%M:%S")
self.graphics.DrawText(self.canvas, self.extra_large_font, 6, 28, self.time_color, time_str)
def draw_arrow_forward(self, color):
x0 = 32*3+19
al = 8
m = 8
self.graphics.DrawLine(self.canvas, x0-1, m, x0-1, 32-m, color)
self.graphics.DrawLine(self.canvas, x0, m, x0, 32-m, color)
self.graphics.DrawLine(self.canvas, x0+1, m, x0+1, 32-m, color)
self.graphics.DrawLine(self.canvas, x0 - 1, m, x0-1 + al, m + al, color)
self.graphics.DrawLine(self.canvas, x0, m, x0 + al, m + al, color)
self.graphics.DrawLine(self.canvas, x0 + 1, m, x0 + 1 + al, m + al, color)
self.graphics.DrawLine(self.canvas, x0 - 1, m, x0 - 1 - al, m + al, color)
self.graphics.DrawLine(self.canvas, x0, m, x0 - al, m + al, color)
self.graphics.DrawLine(self.canvas, x0 + 1, m, x0 + 1 - al, m + al, color)
def draw_arrow_right(self, color):
x0 = 32*3
y0 = 15
al = 8
m = 8
self.graphics.DrawLine(self.canvas, x0 + m, y0 - 1, x0 + 32 - m, y0 - 1, color)
self.graphics.DrawLine(self.canvas, x0 + m, y0, x0 + 32 - m, y0, color)
self.graphics.DrawLine(self.canvas, x0 + m, y0 + 1, x0 + 32 - m, y0 + 1, color)
self.graphics.DrawLine(self.canvas, x0 + 32 - m, y0 - 1, x0 + 32 - m - al, y0 - 1 - al, color)
self.graphics.DrawLine(self.canvas, x0 + 32 - m, y0, x0 + 32 - m - al, y0 - al, color)
self.graphics.DrawLine(self.canvas, x0 + 32 - m, y0 + 1, x0 + 32 - m - al, y0 + 1 - al, color)
self.graphics.DrawLine(self.canvas, x0 + 32 - m, y0 - 1, x0 + 32 - m - al, y0 - 1 + al, color)
self.graphics.DrawLine(self.canvas, x0 + 32 - m, y0, x0 + 32 - m - al, y0 + al, color)
self.graphics.DrawLine(self.canvas, x0 + 32 - m, y0 + 1, x0 + 32 - m - al, y0 + 1 + al, color)
def draw_half_hour(self):
"""Draw time left in half-hour practice sessions."""
now = self.current_time()
now_modified = datetime(2020, 1, 1, now.hour, now.minute, now.second)
min30 = datetime(2020, 1, 1, now_modified.hour, 29, 59)
min00 = datetime(2020, 1, 1, now_modified.hour, 59, 59)
left30 = min30 - now_modified
left00 = min00 - now_modified
m30, s30 = divmod(left30.seconds, 60)
h30, m30 = divmod(m30, 60)
m00, s00 = divmod(left00.seconds, 60)
h00, m00 = divmod(m00, 60)
if (m30<m00):
time_str = '{:02d}:{:02d}'.format(m30, s30)
left_minutes = m30
else:
time_str = '{:02d}:{:02d}'.format(m00, s00)
left_minutes = m00
if left_minutes > 1:
self.graphics.DrawText(self.canvas, self.huge_font, 0, 32, self.time_color, time_str)
self.draw_arrow_forward(self.time_color)
else:
if now.second % 2 == 0:
self.graphics.DrawText(self.canvas, self.huge_font, 0, 32, self.time_color, time_str)
self.draw_arrow_right(self.time_color)
else:
self.graphics.DrawText(self.canvas, self.huge_font, 0, 32, self.time_over_color, time_str)
self.draw_arrow_right(self.time_over_color)
def draw_25_35_full(self):
"""Draw time left in half-hour practice sessions."""
now = self.current_time()
now_modified = datetime(2020, 1, 1, now.hour, now.minute, now.second)
min25 = datetime(2020, 1, 1, now_modified.hour, 24, 59)
min00 = datetime(2020, 1, 1, now_modified.hour, 59, 59)
left25 = min25 - now_modified
left00 = min00 - now_modified
m25, s25 = divmod(left25.seconds, 60)
h25, m25 = divmod(m25, 60)
m00, s00 = divmod(left00.seconds, 60)
h00, m00 = divmod(m00, 60)
if (m25<m00):
time_str = '{:02d}:{:02d}'.format(m25, s25)
left_minutes = m25
else:
time_str = '{:02d}:{:02d}'.format(m00, s00)
left_minutes = m00
if left_minutes > 1:
self.graphics.DrawText(self.canvas, self.huge_font, 0, 32, self.time_color, time_str)
self.draw_arrow_forward(self.time_color)
else:
if now.second % 2 == 0:
self.graphics.DrawText(self.canvas, self.huge_font, 0, 32, self.time_color, time_str)
self.draw_arrow_right(self.time_color)
else:
self.graphics.DrawText(self.canvas, self.huge_font, 0, 32, self.time_over_color, time_str)
self.draw_arrow_right(self.time_over_color)
def draw_25_35_half(self):
"""Draw time left in half-hour practice sessions."""
now = self.current_time()
now_modified = datetime(2020, 1, 1, now.hour, now.minute, now.second)
min25 = datetime(2020, 1, 1, now_modified.hour, 54, 59)
min00 = datetime(2020, 1, 1, now_modified.hour, 29, 59)
left25 = min25 - now_modified
left00 = min00 - now_modified
m25, s25 = divmod(left25.seconds, 60)
h25, m25 = divmod(m25, 60)
m00, s00 = divmod(left00.seconds, 60)
h00, m00 = divmod(m00, 60)
if (m25<m00):
time_str = '{:02d}:{:02d}'.format(m25, s25)
left_minutes = m25
else:
time_str = '{:02d}:{:02d}'.format(m00, s00)
left_minutes = m00
if left_minutes > 1:
self.graphics.DrawText(self.canvas, self.huge_font, 0, 32, self.time_color, time_str)
self.draw_arrow_forward(self.time_color)
else:
if now.second % 2 == 0:
self.graphics.DrawText(self.canvas, self.huge_font, 0, 32, self.time_color, time_str)
self.draw_arrow_right(self.time_color)
else:
self.graphics.DrawText(self.canvas, self.huge_font, 0, 32, self.time_over_color, time_str)
self.draw_arrow_right(self.time_over_color)
def draw_twenty_minutes_full(self):
"""Draw time left in 20-minute practice sessions."""
now = self.current_time()
now_modified = datetime(2020, 1, 1, now.hour, now.minute, now.second)
min20 = datetime(2020, 1, 1, now_modified.hour, 19, 59)
min40 = datetime(2020, 1, 1, now_modified.hour, 39, 59)
min00 = datetime(2020, 1, 1, now_modified.hour, 59, 59)
left20 = min20 - now_modified
left40 = min40 - now_modified
left00 = min00 - now_modified
m20, s20 = divmod(left20.seconds, 60)
h20, m20 = divmod(m20, 60)
m40, s40 = divmod(left40.seconds, 60)
h40, m40 = divmod(m40, 60)
m00, s00 = divmod(left00.seconds, 60)
h00, m00 = divmod(m00, 60)
m = min(m00, m20, m40)
if (m20 == m):
time_str = '{:02d}:{:02d}'.format(m20, s20)
left_minutes = m20
elif (m40 == m):
time_str = '{:02d}:{:02d}'.format(m40, s40)
left_minutes = m40
else:
time_str = '{:02d}:{:02d}'.format(m00, s00)
left_minutes = m00
if left_minutes > 1:
self.graphics.DrawText(self.canvas, self.huge_font, 0, 32, self.time_color, time_str)
self.draw_arrow_forward(self.time_color)
else:
if now.second % 2 == 0:
self.graphics.DrawText(self.canvas, self.huge_font, 0, 32, self.time_color, time_str)
self.draw_arrow_right(self.time_color)
else:
self.graphics.DrawText(self.canvas, self.huge_font, 0, 32, self.time_over_color, time_str)
self.draw_arrow_right(self.time_over_color)
def draw_twenty_minutes_half(self):
"""Draw time left in 20-minute practice sessions."""
now = self.current_time()
now_modified = datetime(2020, 1, 1, now.hour, now.minute, now.second)
min20 = datetime(2020, 1, 1, now_modified.hour, 49, 59)
min40 = datetime(2020, 1, 1, now_modified.hour, 9, 59)
min00 = datetime(2020, 1, 1, now_modified.hour, 29, 59)
left20 = min20 - now_modified
left40 = min40 - now_modified
left00 = min00 - now_modified
m20, s20 = divmod(left20.seconds, 60)
h20, m20 = divmod(m20, 60)
m40, s40 = divmod(left40.seconds, 60)
h40, m40 = divmod(m40, 60)
m00, s00 = divmod(left00.seconds, 60)
h00, m00 = divmod(m00, 60)
m = min(m00, m20, m40)
if (m20 == m):
time_str = '{:02d}:{:02d}'.format(m20, s20)
left_minutes = m20
elif (m40 == m):
time_str = '{:02d}:{:02d}'.format(m40, s40)
left_minutes = m40
else:
time_str = '{:02d}:{:02d}'.format(m00, s00)
left_minutes = m00
if left_minutes > 1:
self.graphics.DrawText(self.canvas, self.huge_font, 0, 32, self.time_color, time_str)
self.draw_arrow_forward(self.time_color)
else:
if now.second % 2 == 0:
self.graphics.DrawText(self.canvas, self.huge_font, 0, 32, self.time_color, time_str)
self.draw_arrow_right(self.time_color)
else:
self.graphics.DrawText(self.canvas, self.huge_font, 0, 32, self.time_over_color, time_str)
self.draw_arrow_right(self.time_over_color)
def draw_line_angular(self, x0, y0, r, angle, color):
"""Draw angular line in LED display"""
x1 = x0 + r*cos(angle)
y1 = y0 + r*sin(angle)
self.graphics.DrawLine(self.canvas, x0, y0, x1, y1, color)
def draw_clock(self):
"""Draw analog clock in LED display."""
now = self.current_time()
hour = now.hour
minute = now.minute
second = now.second
x0 = 32*3+19
y0 = 12
self.graphics.DrawCircle(self.canvas, x0, y0, 12, self.time_color)
self.graphics.DrawCircle(self.canvas, x0, y0+1, 12, self.time_color)
hour_angle = (hour+minute/60)*2.0*pi/12.0 - 0.5*pi
minute_angle = minute*2.0*pi/60.0 - 0.5*pi
second_angle = second*2.0*pi/60.0 - 0.5*pi
self.draw_line_angular(x0, y0, 10, second_angle, self.second_color)
self.draw_line_angular(x0, y0, 10, minute_angle, self.minute_color)
self.draw_line_angular(x0, y0+1, 10, minute_angle, self.minute_color)
self.draw_line_angular(x0+1, y0, 10, minute_angle, self.minute_color)
self.draw_line_angular(x0, y0, 8, hour_angle, self.hour_color)
self.draw_line_angular(x0, y0+1, 8, hour_angle, self.hour_color)
self.draw_line_angular(x0+1, y0, 8, hour_angle, self.hour_color)
def draw_timing(self):
"""Draw time since start of timing."""
now = self.current_time()
elapsed_time = now - self.timing_start
seconds = elapsed_time.total_seconds()
hours = seconds // 3600
minutes = (seconds % 3600) // 60
seconds = seconds % 60
time_str = "%02i:%02i" % (minutes, seconds)
self.graphics.DrawText(self.canvas, self.huge_font, 0, 32, self.time_color, time_str)
def draw_time_date(self):
"""Draw time and date in the LED display."""
now = self.current_time()
time_str = now.strftime("%H:%M:%S")
date_str = now.strftime("%y-%m-%d")
self.graphics.DrawText(self.canvas, self.font, 0, 12, self.time_color, time_str)
self.graphics.DrawText(self.canvas, self.font, 0, 31, self.time_color, date_str)
def draw_info_text(self):
"""Draw information text."""
self.draw_filled_rect(0, 0, 127, 31, self.info_background)
self.graphics.DrawText(self.canvas, self.large_font, 10, 22, self.info_color, self.info_text)
self.draw_rect(0, 0, 127, 31, self.info_color)
self.draw_rect(1, 1, 126, 30, self.info_color)
def draw_warn_text(self):
"""Draw warning text"""
self.draw_filled_rect(0, 0, 127, 31, self.warn_background)
self.graphics.DrawText(self.canvas, self.large_font, 10, 22, self.warn_color, self.warning_text)
self.draw_rect(0, 0, 127, 31, self.warn_border)
self.draw_rect(1, 1, 126, 30, self.warn_border)
def draw_startup(self):
"""Draw startup screen with ip and version."""
self.ip = get_ip()
self.graphics.DrawText(self.canvas, self.font, 4, 11, self.time_color, self.ip+":5000")
self.graphics.DrawText(self.canvas, self.font, 4, 30, self.time_color, "mxdisplay-"+self.MX_VERSION)
def draw_lap_left(self, laps_left, offset):
"""Draw laps left sign"""
self.draw_filled_rect(0, 0, 127, 31, self.white_safe)
self.graphics.DrawText(self.canvas, self.extra_large_font, 20+offset, 28, self.black, str(laps_left)+" VARV")
def draw_time_qualify(self):
"""Draw time qualify in sign"""
self.graphics.DrawText(self.canvas, self.extra_large_font, 10, 28, self.white, "Tidskval")
def draw_finish(self, invert=False):
"""Draw finish flag"""
if invert:
offset = 8
else:
offset = 0
sq_size = 8
for y in range(32):
if y % sq_size == 0:
if offset == 0:
offset = sq_size
else:
offset = 0
for x in range(0,128,sq_size*2):
self.graphics.DrawLine(self.canvas, x+offset, y, x+(sq_size-1)+offset, y, self.white)
def draw(self):
"""Main draw routine of the display."""
if self._display_mode == StatusDisplay.DM_TIME_LEFT:
self.draw_half_hour()
elif self._display_mode == StatusDisplay.DM_TIME_LEFT_20_FULL:
self.draw_twenty_minutes_full()
elif self._display_mode == StatusDisplay.DM_TIME_LEFT_20_HALF:
self.draw_twenty_minutes_half()
elif self._display_mode == StatusDisplay.DM_TIME_LEFT_25_35_FULL:
self.draw_25_35_full()
elif self._display_mode == StatusDisplay.DM_TIME_LEFT_25_35_HALF:
self.draw_25_35_half()
elif self._display_mode == StatusDisplay.DM_CLOSED:
pass
elif self._display_mode == StatusDisplay.DM_TIME:
self.draw_time()
elif self._display_mode == StatusDisplay.DM_INFO_TEXT:
self.draw_info_text()
elif self._display_mode == StatusDisplay.DM_WARNING_TEXT:
self.draw_warn_text()
elif self._display_mode == StatusDisplay.DM_STARTUP:
self.draw_startup()
elif self._display_mode == StatusDisplay.DM_OFF:
pass
elif self._display_mode == StatusDisplay.DM_ONE_LAP:
self.draw_lap_left(1, 0)
elif self._display_mode == StatusDisplay.DM_TWO_LAP:
self.draw_lap_left(2, -3)
elif self._display_mode == StatusDisplay.DM_FINISH:
now = datetime.now()
if now.second % 2 == 0:
self.draw_finish(True)
else:
self.draw_finish(False)
elif self._display_mode == StatusDisplay.DM_TIME_QUALIFY:
self.draw_time_qualify()
elif self._display_mode == StatusDisplay.DM_TIMING:
self.draw_timing()
def reset_timing(self):
"""Reset timing to zero."""
self.timing_start = self.current_time()
def set_display_mode(self, mode):
"""Display mode setter"""
self._display_mode = mode
def get_display_mode(self):
"""Display mode getter"""
return self._display_mode
display_mode = property(get_display_mode, set_display_mode)
class MxDisplay(SampleBase):
"""Class implementing the display server"""
def __init__(self, *args, **kwargs):
"""Class constructor"""
super(MxDisplay, self).__init__(*args, **kwargs)
self.context = zmq.Context()
self.socket = self.context.socket(zmq.REP)
self.socket.bind("tcp://*:5555")
self.mode_text = ""
def run(self):
"""Main run loop of the server."""
offscreen_canvas = self.matrix.CreateFrameCanvas()
status_display = StatusDisplay(offscreen_canvas, graphics)
status_display.display_mode = StatusDisplay.DM_STARTUP
#status_display.debug = False
#status_display.debug_datetime = datetime(2020, 1, 1, 17, 51, 00)
while True:
try:
message = self.socket.recv_string(flags=zmq.NOBLOCK)
print("Message received: ", message)
status_display.startup_finished = True
if message == "time_left":
print("Switching to DM_TIME_LEFT")
status_display.display_mode = StatusDisplay.DM_TIME_LEFT
elif message == "time_left_twenty":
print("Switching to DM_TIME_LEFT")
status_display.display_mode = StatusDisplay.DM_TIME_LEFT_20_FULL
elif message == "time_left_twenty_half":
print("Switching to DM_TIME_LEFT")
status_display.display_mode = StatusDisplay.DM_TIME_LEFT_20_HALF
elif message == "time_left_25_35_full":
print("Switching to DM_TIME_25_35_FULL")
status_display.display_mode = StatusDisplay.DM_TIME_LEFT_25_35_FULL
elif message == "time_left_25_35_half":
print("Switching to DM_TIME_25_35_HALF")
status_display.display_mode = StatusDisplay.DM_TIME_LEFT_25_35_HALF
elif message == "time":
print("Switching to DM_TIME")
status_display.display_mode = StatusDisplay.DM_TIME
elif message == "off":
print("Switching to DM_OFF")
status_display.display_mode = StatusDisplay.DM_OFF
elif message == "info":
print("Switchiong to DM_INFO_TEXT")
status_display.display_mode = StatusDisplay.DM_INFO_TEXT
elif message == "warn":
print("Switchiong to DM_WARNING_TEXT")
status_display.display_mode = StatusDisplay.DM_WARNING_TEXT
elif message == "one_lap":
print("Switching to DM_ONE_LAP")
status_display.display_mode = StatusDisplay.DM_ONE_LAP
elif message == "two_lap":
print("Switching to DM_TWO_LAP")
status_display.display_mode = StatusDisplay.DM_TWO_LAP
elif message == "finish":
print("Switching to DM_FINISH")
status_display.display_mode = StatusDisplay.DM_FINISH
elif message == "qualify":
print("Switching to DM_TIME_QUALIFY")
status_display.display_mode = StatusDisplay.DM_TIME_QUALIFY
elif message == "startup":
print("Switching to DM_STARTUP")
status_display.display_mode = StatusDisplay.DM_STARTUP
elif message == "set_info_text":
print("set_info_text:")
self.socket.send_string("OK")
text = self.socket.recv_string()
print("Text received: ", text)
status_display.info_text = text
status_display.display_mode = StatusDisplay.DM_INFO_TEXT
elif message == "set_warn_text":
print("Setting warn_text")
self.socket.send_string("OK")
text = self.socket.recv_string()
print("Text received: ", text)
status_display.warning_text = text
status_display.display_mode = StatusDisplay.DM_WARNING_TEXT
elif message == "timing":
print("Switching to DM_TIMING")
status_display.display_mode = StatusDisplay.DM_TIMING
elif message == "reset_timing":
print("Resetting timing")
status_display.reset_timing()
status_display.display_mode = StatusDisplay.DM_TIMING
elif message == "status":
print("Sending status")
if status_display.display_mode == StatusDisplay.DM_TIME_LEFT_20_FULL:
self.mode_text = "20 min / 20 min / 20 min (heltimme)"
elif status_display.display_mode == StatusDisplay.DM_TIME_LEFT_20_HALF:
self.mode_text = "20 min / 20 min / 20 min (halvtimme)"
elif status_display.display_mode == StatusDisplay.DM_TIME_LEFT_25_35_FULL:
self.mode_text = "25 min / 35 min (heltimme)"
elif status_display.display_mode == StatusDisplay.DM_TIME_LEFT_25_35_HALF:
self.mode_text = "25 min / 35 min (halvtimme)"
elif status_display.display_mode == StatusDisplay.DM_TIME_LEFT:
self.mode_text = "30 min / 30 min"
elif status_display.display_mode == StatusDisplay.DM_TIME:
self.mode_text = "Tidvisning"
elif status_display.display_mode == StatusDisplay.DM_OFF:
self.mode_text = "Display avstängd"
elif status_display.display_mode == StatusDisplay.DM_INFO_TEXT:
self.mode_text = "Infotext visad"
elif status_display.display_mode == StatusDisplay.DM_WARNING_TEXT:
self.mode_text = "Varningstext visad"
elif status_display.display_mode == StatusDisplay.DM_ONE_LAP:
self.mode_text = "1-varv"
elif status_display.display_mode == StatusDisplay.DM_TWO_LAP:
self.mode_text = "2-varv"
elif status_display.display_mode == StatusDisplay.DM_FINISH:
self.mode_text = "Målflagg"
elif status_display.display_mode == StatusDisplay.DM_TIME_QUALIFY:
self.mode_text = "Kvalificering"
elif status_display.display_mode == StatusDisplay.DM_STARTUP:
self.mode_text = "Uppstart"
elif status_display.display_mode == StatusDisplay.DM_TIMING:
self.mode_text = "Tidtagning"
self.socket.send_string("OK,%s" % (self.mode_text))
except zmq.Again as e:
pass
offscreen_canvas.Clear()
status_display.canvas = offscreen_canvas
status_display.draw()
# Check if startup delay is completed and switch
# to default mode
if (status_display.elapsed_time > status_display.startup_delay) and not status_display.startup_finished:
status_display.startup_finished = True
now = datetime.now()
if (now.hour>16):
status_display.display_mode = StatusDisplay.DM_TIME_LEFT_25_35_HALF
else:
status_display.display_mode = StatusDisplay.DM_TIME_LEFT_25_35_FULL
time.sleep(0.1)
status_display.elapsed_time += 0.1
offscreen_canvas = self.matrix.SwapOnVSync(offscreen_canvas)
# Main function
if __name__ == "__main__":
mx_display = MxDisplay()
if (not mx_display.process()):
mx_display.print_help()
``` |
{
"source": "jonaslindemann/raspibot",
"score": 3
} |
#### File: jonaslindemann/raspibot/raspicontrol.py
```python
import sys
import zerorpc
import time
from math import *
from socket import *
from PyQt5.QtCore import pyqtSlot, QThread, pyqtSignal
from PyQt5.QtWidgets import QApplication, QDialog, QWidget
from PyQt5.QtGui import QPixmap
from PyQt5.uic import loadUi
from joystick import *
from raspibot import *
class JoystickThread(QThread):
update = pyqtSignal()
def __init__(self, parent, joystick):
super(JoystickThread, self).__init__(parent)
self.joystick = joystick
def run(self):
while True:
self.joystick.poll()
self.update.emit()
time.sleep(0.2)
def stop(self):
self.terminate()
class SensorThread(QThread):
update = pyqtSignal()
def __init__(self, parent, ip):
super(SensorThread, self).__init__(parent)
self.parent = parent
self.ip = ip
def run(self):
sensors = zerorpc.Client()
sensors.connect("tcp://%s:4242" % self.ip)
while True:
self.parent.temperature = sensors.getTemperature()
self.parent.humidity = sensors.getHumidity()
self.parent.pressure = sensors.getPressure()
self.parent.orientation = sensors.getOrientation()
self.update.emit()
self.msleep(500)
def stop(self):
self.terminate()
class RemoteControlWindow(QWidget):
def __init__(self, robot):
"""Constructor"""
super(RemoteControlWindow, self).__init__()
self.robot = robot
# Defined speed limits
self.forwardSpeed = 250
self.backwardSpeed = 250
self.rotateSpeed = 150
self.moving = False
self.temperature = 0.0
self.humidity = 0.0
self.orientation = None
self.sensorThread = None
self.hasJoystick = False
self.x = 0
self.y = 0
# Initalise joystick object
i = 0
self.joystick = Joystick(i)
while not self.joystick.connected:
i = i + 1
self.joystick = Joystick(i)
if i > 4:
break
if self.joystick.connected:
self.joystickThread = JoystickThread(self, self.joystick)
self.joystickThread.setTerminationEnabled(True)
self.joystickThread.update.connect(self.on_joystickUpdate)
# Load user interface from UI-file
loadUi('remote_control.ui', self)
# Initially disable controls
print("Starting sensor thread...")
self.sensorThread = SensorThread(self, self.robot.ip)
self.sensorThread.setTerminationEnabled(True)
self.sensorThread.update.connect(self.on_sensor_update)
self.sensorThread.start()
def disable_controls(self):
"""Disable all controls"""
self.backButton.setEnabled(False)
self.forwardButton.setEnabled(False)
self.turnLeftButton.setEnabled(False)
self.turnRightButton.setEnabled(False)
self.stopButton.setEnabled(False)
self.disconnectButton.setEnabled(False)
self.testButton.setEnabled(False)
self.connectButton.setEnabled(True)
def enable_controls(self):
"""Enable all controls"""
self.backButton.setEnabled(True)
self.forwardButton.setEnabled(True)
self.turnLeftButton.setEnabled(True)
self.turnRightButton.setEnabled(True)
self.stopButton.setEnabled(True)
self.disconnectButton.setEnabled(True)
self.testButton.setEnabled(True)
self.connectButton.setEnabled(False)
def on_joystickUpdate(self):
"""Handle joystick updates"""
x = self.joystick.x
y = self.joystick.y
lt = self.joystick.lt
rx = self.joystick.rx
ry = self.joystick.ry
rt = self.joystick.rt
buttons_list = self.joystick.buttons_text.split()
# print("\r(% .3f % .3f % .3f) (% .3f % .3f % .3f)%s%s" % (x, y, lt,
# rx, ry, rt, buttons_list, " "))
# print(buttons_text.split())
if len(buttons_list) > 0:
if buttons_list[0] == "a":
self.robot.clear(255, 255, 255)
else:
self.robot.clear(0, 0, 0)
self.temperature = self.robot.getTemperature()
self.humidity = self.robot.getHumidity()
d = sqrt(pow(x, 2) + pow(y, 2))
if d > 0.1:
print(-self.forwardSpeed * (y + x), -self.forwardSpeed * (y - x))
self.robot.doMotor(9, -self.forwardSpeed * (y + x))
self.robot.doMotor(10, -self.forwardSpeed * (y - x))
self.moving = True
else:
if self.moving:
self.robot.doMotor(9, 0)
self.robot.doMotor(10, 0)
self.moving = False
def on_sensor_update(self):
self.pitchDial.setValue(self.orientation[b"pitch"])
self.rollDial.setValue(self.orientation[b"roll"])
self.yawDial.setValue(self.orientation[b"yaw"])
self.temperatureSlider.setValue(self.temperature)
self.humiditySlider.setValue(self.humidity)
self.pressureSlider.setValue(self.pressure)
@pyqtSlot()
def on_connectButton_clicked(self):
self.connectToRobot()
@pyqtSlot()
def on_disconnectButton_clicked(self):
print("Close robot")
self.robot.clear(0, 0, 0)
print("Stopping sensor thread")
self.sensorThread.stop()
self.robot.close()
self.disable_controls()
@pyqtSlot()
def on_testButton_clicked(self):
print("Capturing image start...")
image = self.robot.captureImage()
pixmap = QPixmap()
pixmap.loadFromData(image)
self.previewImageLabel.setPixmap(pixmap)
# imageFile = open("image.png", "wb")
# imageFile.write(image)
# imageFile.close()
print("Capture complete.")
@pyqtSlot()
def on_forwardButton_pressed(self):
self.robot.setRotation(90)
self.robot.showLetter("F")
self.robot.doMotor(9, self.forwardSpeed)
self.robot.doMotor(10, self.forwardSpeed)
@pyqtSlot()
def on_forwardButton_released(self):
self.robot.setRotation(90)
self.robot.clear(0, 0, 0)
self.robot.doMotor(9, 0)
self.robot.doMotor(10, 0)
@pyqtSlot()
def on_backButton_pressed(self):
self.robot.setRotation(90)
self.robot.showLetter("B")
self.robot.doMotor(9, -self.backwardSpeed)
self.robot.doMotor(10, -self.backwardSpeed)
@pyqtSlot()
def on_backButton_released(self):
self.robot.setRotation(90)
self.robot.clear(0, 0, 0)
self.robot.doMotor(9, 0)
self.robot.doMotor(10, 0)
@pyqtSlot()
def on_turnLeftButton_pressed(self):
self.robot.setRotation(90)
self.robot.showLetter("L")
self.robot.doMotor(9, self.rotateSpeed)
self.robot.doMotor(10, -self.rotateSpeed)
@pyqtSlot()
def on_turnLeftButton_released(self):
self.robot.setRotation(90)
self.robot.clear(0, 0, 0)
self.robot.doMotor(9, 0)
self.robot.doMotor(10, 0)
@pyqtSlot()
def on_turnRightButton_pressed(self):
self.robot.setRotation(90)
self.robot.showLetter("R")
self.robot.doMotor(9, -self.rotateSpeed)
self.robot.doMotor(10, self.rotateSpeed)
@pyqtSlot()
def on_turnRightButton_released(self):
self.robot.setRotation(90)
self.robot.clear(0, 0, 0)
self.robot.doMotor(9, 0)
self.robot.doMotor(10, 0)
@pyqtSlot()
def on_stopButton_clicked(self):
self.robot.doMotor(9, 0)
self.robot.doMotor(10, 0)
def remoteControl(robot):
app = QApplication(sys.argv)
widget = RemoteControlWindow(robot)
widget.show()
sys.exit(app.exec_())
if __name__ == "__main__":
bot = RaspiBot()
bot.connect()
remoteControl(bot)
``` |
{
"source": "JonasLoos/tw-utils",
"score": 3
} |
#### File: tw-utils/src/create_random_blocks.py
```python
import numpy as np
from map_creator import create_map
import sys
def create_random_blocks(filename=None):
# create the map matrix
game = np.zeros((50,50,4), dtype='B')
tiles = np.zeros((50,50,4), dtype='B')
# add content
game[0,:,0] = 1 # top wall
game[-1,:,0] = 1 # ground wall
game[:,0,0] = 1 # left wall
game[:,-1,0] = 1 # right wall
game[-2,24,0] = 192 # spawn
game[5:-5,5:-5,0] = np.random.rand(40,40) > 0.95 # random blocks
tiles[0,:,0] = 1 # top wall
tiles[-1,:,0] = 1 # ground wall
tiles[:,0,0] = 1 # left wall
tiles[:,-1,0] = 1 # right wall
tiles[5:-5,5:-5,0] = game[5:-5,5:-5,0] * 16
# generate the map file
create_map(game, [('grass_main', tiles)], filename=filename)
# generate a map when the script is called from the command line
if __name__ == "__main__":
if len(sys.argv) > 1:
create_random_blocks(sys.argv[1])
else:
create_random_blocks()
```
#### File: tw-utils/src/create_spiral.py
```python
import numpy as np
from map_creator import create_map
import sys
class CycleArray:
'''cyclic numpy array'''
def __init__(self, *args):
self.arr = np.array(*args)
def __getitem__(self, i):
return self.arr[i % len(self)]
def __len__(self):
return len(self.arr)
def create_spiral(filename=None):
# config
basesize = 200
blocklen = 20
min_wall_thickness = 1 # on each side
max_wall_thickness = 4 # on each side
wall_thickness_change_probability = 0.15
obstacle_size = 5
obstacle_side_switch_probability = 0.8
obstacle_direction_change_probability = 0.4
obstacle_freeze_probability = 0.5
# create the map matrix
# 0: nothing, 1: normal, 3: unhookable, 33: start, 34: finish, 192: spwan
size = np.array([basesize]*2)
game = np.zeros((size[0],size[1],4), dtype='B')
# add content
sidelen = 1
pos = size//2-1 # set position to center
direction = 0
directions = CycleArray([(0,-1),(-1,0),(0,1),(1,0)])
pos += sidelen * blocklen // 2 # start with a bit offset from center
newpos = pos + directions[direction] * sidelen * blocklen
hellofromtheotherside = False # side to grow obstacles from
inner_thickness = 1
outer_thickness = 1
while 0 <= newpos[0]+blocklen*2 <= size[0] and 0 <= newpos[1]+blocklen*2 <= size[1]:
# directions
currdir = directions[direction]
nextdir = directions[direction + 1]
# calculate position
a = np.array([min(pos[0], newpos[0]),min(pos[1], newpos[1])])
b = np.array([max(pos[0], newpos[0]),max(pos[1], newpos[1])]) + 1
# create wall
game[a[0]:b[0],a[1]:b[1],0] = 1
# make wall thick and add freeze
for x in list(range(a[0], b[0], 1))[::currdir[0] or 1]:
for y in list(range(a[1], b[1], 1))[::currdir[1] or 1]:
# set blocks
xa = x + inner_thickness * (nextdir[0] if nextdir[0] < 0 else 0)
xb = x + inner_thickness * (nextdir[0] if nextdir[0] > 0 else 0)
ya = y + inner_thickness * (nextdir[1] if nextdir[1] < 0 else 0)
yb = y + inner_thickness * (nextdir[1] if nextdir[1] > 0 else 0)
game[xa:xb+1,ya:yb+1,0] = 1 # inner
tmpx = x+inner_thickness*nextdir[0]
tmpy = y+inner_thickness*nextdir[1]
tmp = game[tmpx-1:tmpx+2,tmpy-1:tmpy+2]
game[tmpx-1:tmpx+2,tmpy-1:tmpy+2] = np.where(tmp > 0, tmp, 9) # put inner freeze around block, without overwriting
xa = x - outer_thickness * (nextdir[0] if nextdir[0] > 0 else 0)
xb = x - outer_thickness * (nextdir[0] if nextdir[0] < 0 else 0)
ya = y - outer_thickness * (nextdir[1] if nextdir[1] > 0 else 0)
yb = y - outer_thickness * (nextdir[1] if nextdir[1] < 0 else 0)
game[xa:xb+1,ya:yb+1,0] = 1 # outer
tmpx = x-outer_thickness*nextdir[0]
tmpy = y-outer_thickness*nextdir[1]
tmp = game[tmpx-1:tmpx+2,tmpy-1:tmpy+2]
game[tmpx-1:tmpx+2,tmpy-1:tmpy+2] = np.where(tmp > 0, tmp, 9) # put outer freeze around block, without overwriting
# set next thickness
p = wall_thickness_change_probability
inner_thickness += np.random.choice([-1,0,1], 1, p=[p/2,1-p,p/2])[0]
if inner_thickness > max_wall_thickness: inner_thickness = max_wall_thickness
elif inner_thickness < min_wall_thickness: inner_thickness = min_wall_thickness
outer_thickness += np.random.choice([-1,0,1], 1, p=[p/2,1-p,p/2])[0]
if outer_thickness > max_wall_thickness: outer_thickness = max_wall_thickness
elif outer_thickness < min_wall_thickness: outer_thickness = min_wall_thickness
for i in range(1,outer_thickness+1):
start = newpos - nextdir * i
end = newpos - nextdir * i + currdir * (outer_thickness-i)
game[min(start[0],end[0]):max(start[0],end[0])+1, min(start[1],end[1]):max(start[1],end[1])+1, 0] = 1 # outer corners
tmp = game[end[0]-1:end[0]+2,end[1]-1:end[1]+2]
game[end[0]-1:end[0]+2,end[1]-1:end[1]+2] = np.where(tmp > 0, tmp, 9) # put outer freeze around block, without overwriting
# create obstacles
growlen = int(blocklen*0.6) # has be be less than sqrt(0.5) ~= 0,7
first = True
for startx in list(range(a[0], b[0], blocklen))[::currdir[0] or 1]:
for starty in list(range(a[1], b[1], blocklen))[::currdir[1] or 1]:
# skip first to avoid obstacle collisions
if first:
first = False
continue
putfreeze = np.random.choice([True, False], 1, p=[obstacle_freeze_probability,1-obstacle_freeze_probability])[0]
# grow multiple obstacles at the same place to increase size
for _ in range(obstacle_size):
# set start position and grow direction
if hellofromtheotherside:
grow_direction = direction + 1
pos = np.array([startx,starty]) - nextdir * blocklen
else:
grow_direction = direction + 3
pos = np.array([startx,starty]).copy()
start = pos.copy()
initial_grow_dir = directions[grow_direction]
pos += directions[grow_direction]
# grow obstacle
while sum((pos-start)**2) < growlen**2 and ((pos-start+directions[grow_direction]) * initial_grow_dir >= 0).all(): # stop when going too far or when hitting a wall
game[pos[0],pos[1],0] = 1 # grow obstacle block
if putfreeze:
tmp = game[pos[0]-1:pos[0]+2,pos[1]-1:pos[1]+2]
game[pos[0]-1:pos[0]+2,pos[1]-1:pos[1]+2] = np.where(tmp > 0, tmp, 9) # put freeze around block, without overwriting
grow_direction += np.random.choice([-1,0,1], 1, p=[obstacle_direction_change_probability/2,1-obstacle_direction_change_probability,obstacle_direction_change_probability/2])[0] # select random new grow direction
pos += directions[grow_direction]
# randomly switch side
hellofromtheotherside ^= np.random.choice([True, False], 1, p=[obstacle_side_switch_probability,1-obstacle_side_switch_probability])[0]
# update variables for next run
direction = (direction + 1) % len(directions) # % is only needed to keey the variable small for performance reasons
if direction in [0,2]:
sidelen += 1
pos = newpos
newpos = pos + directions[direction] * sidelen * blocklen
# generate last spiral round
while 0 <= newpos[0]+blocklen <= size[0] and 0 <= newpos[1]+blocklen <= size[1]:
# calculate position
a = np.array([min(pos[0], newpos[0]),min(pos[1], newpos[1])])
b = np.array([max(pos[0], newpos[0]),max(pos[1], newpos[1])]) + 1
# create freeze
currdir = directions[direction]
nextdir = directions[direction + 1]
a_ = a + nextdir + np.absolute(currdir)
b_ = b + nextdir - np.absolute(currdir)
game[a_[0]:b_[0],a_[1]:b_[1],0] = np.where(game[a_[0]:b_[0],a_[1]:b_[1],0] > 0, game[a_[0]:b_[0],a_[1]:b_[1],0], 9) # inner freeze (dont overwrite obstacles)
# create wall
game[a[0]:b[0],a[1]:b[1],0] = 1
# make wall thick and add freeze
for x in list(range(a[0], b[0], 1))[::currdir[0] or 1]:
for y in list(range(a[1], b[1], 1))[::currdir[1] or 1]:
# set blocks
xa = x + inner_thickness * (nextdir[0] if nextdir[0] < 0 else 0)
xb = x + inner_thickness * (nextdir[0] if nextdir[0] > 0 else 0)
ya = y + inner_thickness * (nextdir[1] if nextdir[1] < 0 else 0)
yb = y + inner_thickness * (nextdir[1] if nextdir[1] > 0 else 0)
game[xa:xb+1,ya:yb+1,0] = 1 # inner
tmpx = x+inner_thickness*nextdir[0]
tmpy = y+inner_thickness*nextdir[1]
tmp = game[tmpx-1:tmpx+2,tmpy-1:tmpy+2]
game[tmpx-1:tmpx+2,tmpy-1:tmpy+2] = np.where(tmp > 0, tmp, 9) # put inner freeze around block, without overwriting
xa = x - outer_thickness * (nextdir[0] if nextdir[0] > 0 else 0)
xb = x - outer_thickness * (nextdir[0] if nextdir[0] < 0 else 0)
ya = y - outer_thickness * (nextdir[1] if nextdir[1] > 0 else 0)
yb = y - outer_thickness * (nextdir[1] if nextdir[1] < 0 else 0)
game[xa:xb+1,ya:yb+1,0] = 1 # outer
tmpx = x-outer_thickness*nextdir[0]
tmpy = y-outer_thickness*nextdir[1]
tmp = game[tmpx-1:tmpx+2,tmpy-1:tmpy+2]
game[tmpx-1:tmpx+2,tmpy-1:tmpy+2] = np.where(tmp > 0, tmp, 9) # put outer freeze around block, without overwriting
# set next thickness
p = wall_thickness_change_probability
inner_thickness += np.random.choice([-1,0,1], 1, p=[p/2,1-p,p/2])[0]
if inner_thickness > max_wall_thickness: inner_thickness = max_wall_thickness
elif inner_thickness < min_wall_thickness: inner_thickness = min_wall_thickness
outer_thickness += np.random.choice([-1,0,1], 1, p=[p/2,1-p,p/2])[0]
if outer_thickness > max_wall_thickness: outer_thickness = max_wall_thickness
elif outer_thickness < min_wall_thickness: outer_thickness = min_wall_thickness
for i in range(1,outer_thickness+1):
start = newpos - nextdir * i
end = newpos - nextdir * i + currdir * (outer_thickness-i)
game[min(start[0],end[0]):max(start[0],end[0])+1, min(start[1],end[1]):max(start[1],end[1])+1, 0] = 1 # outer corners
tmp = game[end[0]-1:end[0]+2,end[1]-1:end[1]+2]
game[end[0]-1:end[0]+2,end[1]-1:end[1]+2] = np.where(tmp > 0, tmp, 9) # put outer freeze around block, without overwriting
# update variables for next run
direction = (direction + 1) % len(directions) # `%` is only needed to keep the variable small for performance reasons
if direction in [0,2]:
sidelen += 1
pos = newpos
newpos = pos + directions[direction] * sidelen * blocklen
# create freeze free spawn with start
mid = size//2-1
a = mid - blocklen//2 + 1
b = mid + blocklen//2
tmp = game[a[0]:b[0],a[1]:b[1],0]
game[a[0]:b[0],a[1]:b[1],0] = np.where(np.isin(tmp, [1,3]), tmp, 0) # remove freeze
game[mid[0],mid[1],0] = 192 # create spawn
tmp = game[mid[0]-blocklen//2:mid[0]+blocklen//2+1,mid[1]+blocklen//2+1,0]
game[mid[0]-blocklen//2:mid[0]+blocklen//2+1,mid[1]+blocklen//2,0] = np.where(np.isin(tmp, [1,3]), tmp, 33) # create start line without overwriting blocks
finish_line_start = pos - directions[direction-1]*blocklen
finish_line_end = finish_line_start + directions[direction]*blocklen
a = np.array([min(finish_line_start[0], finish_line_end[0]),min(finish_line_start[1], finish_line_end[1])])
b = np.array([max(finish_line_start[0], finish_line_end[0]),max(finish_line_start[1], finish_line_end[1])]) + 1
tmp = game[a[0]:b[0],a[1]:b[1],0]
game[a[0]:b[0],a[1]:b[1],0] = np.where(np.isin(tmp, [1,3]), tmp, 34) # create finish line without overwriting blocks
# generate outer walls/nothing
game[blocklen,:,0] = 0 # top wall
game[-blocklen-1,:,0] = 0 # ground wall
game[:,blocklen,0] = 0 # left wall
game[:,-blocklen-1,0] = 0 # right wall
# generate visual tile layers
layer_unhookable = np.zeros(game.shape, dtype='B')
layer_unhookable[:,:,0] = np.array(np.where(game[:,:,0] == 3, 8, 0), dtype='B') # walls
layer_desert = np.zeros(game.shape, dtype='B')
layer_desert[:,:,0] += np.array(np.where(game[:,:,0] == 1, np.random.choice(np.array([7,64,65], dtype='B'), game[:,:,0].shape), 0), dtype='B') # obstacles
layer_desert[:,:,0] += np.array(np.where(game[:,:,0] == 9, 126, 0), dtype='B') # freeze
layer_desert[:,:,0] += np.array(np.where(game[:,:,0] == 33, 94, 0), dtype='B') # start line
layer_desert[:,:,0] += np.array(np.where(game[:,:,0] == 34, 94, 0), dtype='B') # finish line
# generate the map file
create_map(game, [('generic_unhookable', layer_unhookable), ('desert_main', layer_desert)], filename=filename)
# generate a map when the script is called from the command line
if __name__ == "__main__":
if len(sys.argv) > 1:
create_spiral(sys.argv[1])
else:
create_spiral()
```
#### File: tw-utils/src/tw-mapgen.py
```python
import tkinter as tk
from create_layered import create_layered
from pathlib import Path
config = [
('filename', str(Path.cwd().joinpath('newmap.map')), str),
('base map size', '300', int),
('block length (max tunnel size)', '20', int),
('min wall thickness (per side)', '1', int), # on each side
('max wall thickness (per side)', '5', int), # on each side
('wall thickness change probability', '0.15', float),
('obstacle grow length', '11', int), # has be be less than sqrt(0.5) * blocklen - 2
('obstacle size', '5', int),
('obstacle side switch probability', '0.8', float),
('obstacle direction change probability', '0.4', float),
('obstacle freeze probability', '0.8', float),
('block wall (game layer)', '1', int),
('block corner (game layer)', '1', int),
('block obstacle (game layer)', '1', int),
('block freeze (game layer)', '9', int),
('directions (0:left, 1:up, 2:right, 3:down)', '2,2,2,3,3,3,2,1,1,1,2,2,3,3,3,2,1,1,1,2,2,2,2', lambda x: list(map(int,x.split(',')) if x.strip() else None)) # directions to build along
]
# window
window = tk.Tk()
window.title('Teeworlds Map Generator')
# window.geometry("1400x1200")
# header
tk.Label(text='enter settings below and hit generate').pack()
# inputs
frame = tk.Frame()
entries = []
for i, (text, default, t) in enumerate(config):
label = tk.Label(text=text, master=frame)
label.grid(row=i, column=0, padx=10, pady=10, sticky='e')
entry = tk.Entry(master=frame)
entry.insert(tk.END, default)
entry.grid(row=i, column=1, padx=10, pady=10, sticky='ew')
entries.append(entry)
frame.columnconfigure(0,weight=0)
frame.columnconfigure(1,weight=1)
frame.pack(fill=tk.BOTH, expand=True)
# generate
status_label = None
def generate(*args):
try:
create_layered(*[t(x.get()) for x, (text, default, t) in zip(entries, config)])
result = 'success!'
except Exception as e:
result = f'error: {e}'
print(result)
status_label['text'] = result
button = tk.Button(text="generate", command=generate)
button.pack()
status_label = tk.Label()
status_label.pack()
# mainloop
window.mainloop()
``` |
{
"source": "JonasLueg/geomstats",
"score": 2
} |
#### File: tests/data/full_rank_correlation_matrices_data.py
```python
import random
from geomstats.geometry.full_rank_correlation_matrices import (
CorrelationMatricesBundle,
FullRankCorrelationMatrices,
)
from geomstats.geometry.symmetric_matrices import SymmetricMatrices
from tests.data_generation import TestData, _LevelSetTestData
class RankFullRankCorrelationMatricesTestData(_LevelSetTestData):
n_list = random.sample(range(2, 4), 2)
space_args_list = [(n,) for n in n_list]
shape_list = [(n, n) for n in n_list]
n_points_list = random.sample(range(2, 5), 2)
n_vecs_list = random.sample(range(2, 5), 2)
def random_point_belongs_test_data(self):
smoke_space_args_list = [(2,), (3,)]
smoke_n_points_list = [1, 2]
return self._random_point_belongs_test_data(
smoke_space_args_list,
smoke_n_points_list,
self.space_args_list,
self.n_points_list,
)
def projection_belongs_test_data(self):
return self._projection_belongs_test_data(
self.space_args_list, self.shape_list, self.n_points_list
)
def to_tangent_is_tangent_test_data(self):
return self._to_tangent_is_tangent_test_data(
FullRankCorrelationMatrices,
self.space_args_list,
self.shape_list,
self.n_vecs_list,
)
def random_tangent_vec_is_tangent_test_data(self):
return self._random_tangent_vec_is_tangent_test_data(
FullRankCorrelationMatrices, self.space_args_list, self.n_vecs_list
)
class CorrelationMatricesBundleTestData(TestData):
n_list = random.sample(range(2, 3), 1)
n_samples_list = random.sample(range(1, 3), 1)
def riemannian_submersion_belongs_to_base_test_data(self):
random_data = []
for n, n_samples in zip(self.n_list, self.n_samples_list):
bundle = CorrelationMatricesBundle(n)
point = bundle.base.random_point(n_samples)
random_data.append(dict(n=n, point=point))
return self.generate_tests([], random_data)
def lift_riemannian_submersion_composition_test_data(self):
random_data = []
for n, n_samples in zip(self.n_list, self.n_samples_list):
bundle = CorrelationMatricesBundle(n)
point = bundle.base.random_point(n_samples)
random_data.append(dict(n=n, point=point))
return self.generate_tests([], random_data)
def tangent_riemannian_submersion_test_data(self):
random_data = []
for n, n_samples in zip(self.n_list, self.n_samples_list):
bundle = CorrelationMatricesBundle(n)
mat = bundle.random_point()
point = bundle.riemannian_submersion(mat)
vec = bundle.random_point(n_samples)
random_data.append(dict(n=n, vec=vec, point=point))
return self.generate_tests([], random_data)
def vertical_projection_tangent_submersion_test_data(self):
random_data = []
for n in self.n_list:
bundle = CorrelationMatricesBundle(n)
mat = bundle.random_point(2)
vec = SymmetricMatrices(n).random_point(2)
random_data.append(dict(n=n, vec=vec, mat=mat))
return self.generate_tests([], random_data)
def horizontal_projection_test_data(self):
random_data = []
for n in self.n_list:
bundle = CorrelationMatricesBundle(n)
mat = bundle.random_point()
vec = bundle.random_point()
random_data.append(dict(n=n, vec=vec, mat=mat))
return self.generate_tests([], random_data)
def horizontal_lift_is_horizontal_test_data(self):
random_data = []
for n, n_samples in zip(self.n_list, self.n_samples_list):
bundle = CorrelationMatricesBundle(n)
mat = bundle.base.random_point()
vec = bundle.base.random_point(n_samples)
tangent_vec = bundle.base.to_tangent(vec, mat)
random_data.append(dict(n=n, tangent_vec=tangent_vec, mat=mat))
return self.generate_tests([], random_data)
def vertical_projection_is_vertical_test_data(self):
random_data = []
for n, n_samples in zip(self.n_list, self.n_samples_list):
bundle = CorrelationMatricesBundle(n)
mat = bundle.random_point()
vec = bundle.random_point(n_samples)
tangent_vec = bundle.base.to_tangent(vec, mat)
random_data.append(dict(n=n, tangent_vec=tangent_vec, mat=mat))
return self.generate_tests([], random_data)
def horizontal_lift_and_tangent_riemannian_submersion_test_data(self):
random_data = []
for n, n_samples in zip(self.n_list, self.n_samples_list):
bundle = CorrelationMatricesBundle(n)
mat = bundle.base.random_point()
vec = bundle.base.random_point(n_samples)
tangent_vec = bundle.base.to_tangent(vec, mat)
random_data.append(dict(n=n, tangent_vec=tangent_vec, mat=mat))
return self.generate_tests([], random_data)
def log_after_align_is_horizontal_test_data(self):
n_list = [2, 3]
random_data = []
for n in n_list:
bundle = CorrelationMatricesBundle(n)
point = bundle.random_point(2)
random_data.append(dict(n=n, point_a=point[0], point_b=point[1]))
return self.generate_tests([], random_data)
class FullRankcorrelationAffineQuotientMetricTestData(TestData):
def exp_log_composition_test_data(self):
bundle = CorrelationMatricesBundle(3)
point = bundle.riemannian_submersion(bundle.random_point(2))
random_data = [dict(dim=3, point=point)]
return self.generate_tests([], random_data)
def exp_belongs_test_data(self):
bundle = CorrelationMatricesBundle(3)
base_point = bundle.base.random_point()
tangent_vec = bundle.base.to_tangent(bundle.random_point(), base_point)
smoke_data = [dict(dim=3, tangent_vec=tangent_vec, base_point=base_point)]
return self.generate_tests(smoke_data)
``` |
{
"source": "Jonas-Luetolf/System-Monitor",
"score": 2
} |
#### File: System-Monitor/src/backend.py
```python
import os
import yaml
import psutil
import platform
import socket
DEFAULTCONFIG="""
update_time: 2
loop_objects: ["general","cpu","ram","disks"]"""
class InvalidSettings(Exception):
def __init__(self) -> None:
super().__init__()
def __str__(self) -> str:
return "InvalidSettings: config file and backup file are invalid"
class SettingsHandler:
def __init__(self,config_path:str)->None:
self.config_path=config_path
def get_settings(self)->dict:
load_data=self.open_config(self.config_path)
if self.valid_settings(load_data):
pass
#load default config
else:
self.set_backup_config(self.config_path)
load_data=self.open_config(self.config_path)
return load_data
@staticmethod
def open_config(path:str)->dict:
try:
with open(path,"r") as f:
data=yaml.load(f.read(),Loader=yaml.FullLoader)
load_time=sum(i in ["cpu","ram"] for i in data["loop_objects"])
data.update({"data_load_time":load_time})
except:
data=None
return data
@staticmethod
def set_backup_config(config_path:str)->None:
try:
os.mkdir(config_path[0:len(config_path)-(list(reversed(config_path)).index("/"))])
except FileExistsError:
pass
f=open(config_path,'w')
f.write(DEFAULTCONFIG)
f.close()
def set_settings(self,data:dict)->None:
if self.valid_settings(data):
with open(self.config_path,"w") as f:
f.write(yaml.dump(data))
else:
raise InvalidSettings
@staticmethod
def valid_settings(data:dict)->bool:
try:
#assert os.path.isfile(data["help_text_path"])
assert type(data["update_time"])==int
assert sum(i in ["cpu","ram","general","disks"] for i in data["loop_objects"])==len(data["loop_objects"])
return True
except:
return False
class Handler:
def __init__(self,config_path:str)->None:
self.settings_handler=SettingsHandler(config_path)
self.cpu_handler=CPU_Data_Handler()
self.ram_handler=RAM_Data_Handler()
self.disks_handler=Disks_Data_Handler()
self.get_general_data()
def get_general_data(self)->None:
self.os=platform.platform()
self.hostname=socket.gethostname()
def get_config(self)->dict:
return self.settings_handler.get_settings()
def set_config_by_file(self,path:str)->None:
with open(path,"r") as f:
data=yaml.load(f.read(), Loader=yaml.FullLoader)
self.settings_handler.set_settings(data)
def get_data(self,objects:list)->dict:
ret={}
if "general" in objects:
ret.update({"general":{"os":self.os,"hostname":self.hostname}})
if "cpu" in objects:
ret.update({"cpu":self.cpu_handler.get_data()})
if "ram" in objects:
ret.update({"ram":self.ram_handler.get_data()})
if "disks" in objects:
ret.update({"disks":self.disks_handler.get_data()})
return ret
def get_help_data(self)->str:
pass
class CPU_Data_Handler:
def __init__(self)->None:
self.num_cores=psutil.cpu_count(logical=True)
self.max_freq=psutil.cpu_freq(percpu=False).max
def get_current_data(self)->dict:
general_usage=psutil.cpu_percent(percpu=False,interval=1)
usage_percpu=psutil.cpu_percent(percpu=True,interval=1)
return {"general_usage":general_usage,"usage_percpu":usage_percpu}
def get_data(self)->dict:
ret={"num_cores":self.num_cores,"max_freq":self.max_freq}
ret.update(self.get_current_data())
return ret
class RAM_Data_Handler:
def __init__(self)->None:
self.virtual_memory=psutil.virtual_memory()
self.total_size=self.virtual_memory.total
def get_current_data(self)->dict:
self.virtual_memory=psutil.virtual_memory()
return {"used":self.virtual_memory.used,"available":self.virtual_memory.available,"usage":self.virtual_memory.percent}
def get_data(self)->dict:
ret={"total":self.total_size}
ret.update(self.get_current_data())
return ret
class Disks_Data_Handler:
def __init__(self)->None:
self.disks=[]
raw_disks_data=psutil.disk_partitions(all=False)
for disk in raw_disks_data:
self.disks.append(Disk_Data_Handler(disk.device,disk.mountpoint,disk.fstype))
def get_data(self)->dict:
ret={}
for i in self.disks:
ret.update({i.device:i.get_data()})
return ret
class Disk_Data_Handler:
def __init__(self,device:str,mountpoint:str,fstype:str)->None:
self.device=device
self.mountpoint=mountpoint
self.fstype=fstype
self.total_size=psutil.disk_usage(self.mountpoint).total
def get_current_data(self)->dict:
usage=psutil.disk_usage(self.mountpoint)
free=usage.free
used=usage.used
return {"free":free,"used":used}
def get_data(self)->dict:
ret={"mountpoint":self.mountpoint,"fstype":self.fstype,"totalsize":self.total_size}
ret.update(self.get_current_data())
return ret
```
#### File: src/layout/widget.py
```python
import math
from dataclasses import dataclass
@dataclass
class Edge:
LEFTTOP="╔"
RIGHTTOP="╗"
LEFTBOTTOM="╚"
RIGHTBOTTOM="╝"
BOTTOMTOP="═"
LEFTRIGHT="║"
class Widget:
def __init__(self,name:str)->None:
self.name = name
self.lines=[]
self.edge=Edge()
def clear(self)->None:
self.lines=[]
def __iter__(self):
string=str(self)
ret_list=string.split("\n")
for i in ret_list:
yield i
def __setitem__(self,index:int,contend:str)->None:
if index>=len(self.lines):
self.lines+=("" for i in range(0,index+2-len(self.lines)-1))
self.lines[index]=contend
def get_x_len(self)->None:
x_len=0
return max(len(i) for i in self.lines+[self.name])
def __str__(self)->str:
x_len=self.get_x_len()
ret=f"{self.edge.LEFTTOP}{self.edge.BOTTOMTOP*math.ceil((x_len-1-len(self.name))/2)}{self.name}{self.edge.BOTTOMTOP*(math.ceil((x_len-len(self.name))/2))}{self.edge.RIGHTTOP}\n"
for i in self.lines:
ret+=f"{self.edge.LEFTRIGHT}{i}{' '*(x_len-len(i))}{self.edge.LEFTRIGHT}\n"
return ret+self.edge.LEFTBOTTOM+self.edge.BOTTOMTOP*(x_len)+self.edge.RIGHTBOTTOM
``` |
{
"source": "Jonas-Luetolf/todo-list",
"score": 3
} |
#### File: todo-list/table/column.py
```python
import math
from table.edge import Edge
class Column:
def __init__(self,name:str)->None:
self.name = name
self.lines=[]
def clear(self)->None:
self.lines=[]
def __iter__(self):
string=str(self)
ret_list=string.split("\n")
for i in ret_list:
yield i
def __setitem__(self,index:int,contend:str)->None:
if index>=len(self.lines):
self.lines+=("" for i in range(0,index+2-len(self.lines)-1))
self.lines[index]=contend
def get_x_len(self)->None:
x_len=0
return max(len(i) for i in self.lines+[self.name])
def __str__(self)->str:
x_len=self.get_x_len()
ret=f"{Edge.LEFTTOP}{Edge.BOTTOMTOP*math.ceil((x_len-len(self.name))/2)}{self.name}{Edge.BOTTOMTOP*(math.floor((x_len-len(self.name))/2))}{Edge.RIGHTTOP}\n"
for i in self.lines:
ret+=f"{Edge.LEFTRIGHT}{i}{' '*(x_len-len(i))}{Edge.LEFTRIGHT}\n"
return ret+f"{Edge.LEFTBOTTOM}{Edge.BOTTOMTOP*(x_len)}{Edge.RIGHTBOTTOM}"
```
#### File: todo-list/todo_list/listhandler.py
```python
import json
from os import mkdir
from os.path import expanduser, isfile, isdir
from todo_list.task import Task
class TaskNotFound(Exception):
def __init__(self,mes:str="")->None:
super().__init__()
self.mes=mes
def __str__(self)->None:
return f"TaskNotFound: {self.mes}"
class ListHandler:
def __init__(self,name:str,folder:str)->None:
self.name=name
self.folder=folder
self.open()
def open(self)->None:
if isfile(f"{self.folder}{self.name}.json"):
with open(f"{self.folder}{self.name}.json","r") as f:
self.data = json.load(f)
else:
if not isdir(self.folder):
mkdir(self.folder)
with open(f"{self.folder}{self.name}.json","w") as f:
f.write('{"tasks":{}}')
self.data=json.loads('{"tasks":{}}')
self.tasks=[]
for task in self.data["tasks"]:
self.tasks.append(Task(task,self.data["tasks"][task][0],self.data["tasks"][task][1]))
def get_tasks(self,state:int=None)->list:
if type(state)!=int and state!= None:
raise TypeError
elif state==None:
return self.tasks
else:
tasks_list=[]
for task in self.tasks:
if task.state==state:
tasks_list.append(task)
return tasks_list
def write(self)->None:
task_data={}
for task in self.tasks:
task_data.update(task.get_raw())
raw_str=json.dumps({"name":self.name,"tasks":task_data})
with open(f"{self.folder}{self.name}.json","w") as file:
file.write(raw_str)
def add_task(self,name:str,description:str,state:int)->None:
self.tasks.append(Task(name,description,state))
def delete_task(self,name:str)->None:
for index,task in enumerate(self.tasks):
if task.name == name:
del self.tasks[index]
return None
raise TaskNotFound(f"Task {name} not found")
def change_state(self,name:str,state:int=1)->None:
for task in self.tasks:
if task.name == name:
task.state=state
``` |
{
"source": "JonasLukasczyk/workbench",
"score": 3
} |
#### File: workbench/cinemasci/Core.py
```python
class Port():
def __init__(self, type, value, parent, isInput = False):
self.type = type
self.parent = parent
self.isInput = isInput
self._listeners = []
self._value = value
def getValue(self):
if isinstance(self._value, Port):
return self._value.getValue()
return self._value;
def setValue(self, value, update = True):
# if old value is a port stop listing for push events
if isinstance(self._value, Port):
self._value._listeners.remove(self)
# replace old value with new value
self._value = value
# if new value is a port listen for push events
if isinstance(self._value, Port):
self._value._listeners.append(self)
# if value of an input port was changed trigger update of outputs
if update and self.isInput:
self.parent.update()
# if value of an output port was changed trigger update of listeners
if update and not self.isInput:
for listener in self._listeners:
listener.parent.update()
class Filter():
def __init__(self):
self.inputs = {}
self.outputs = {}
def addInputPort(self, name, type, value):
self.inputs[name] = Port(type, value, self, True)
def addOutputPort(self, name, type, value):
self.outputs[name] = Port(type, value, self)
def update(self):
print("-> "+type(self).__name__)
```
#### File: workbench/cinemasci/__init__.py
```python
__all__ = ["smoke"]
from .Core import *
from .DatabaseReader import *
from .DatabaseQuery import *
from .ImageReader import *
from .ImageRenderer import *
#
# new factory function
#
# creates new objects for a consistent high level interface
#
def new( vtype, args ):
result = None
if vtype == "cdb":
if "path" in args:
from . import cdb
result = cdb.cdb(args["path"])
else:
print("ERROR: unsupported viewer type: {}".format(vtype))
return result
``` |
{
"source": "jonaslu/thatswhatsup",
"score": 2
} |
#### File: c/bytecode/run_bytecode.py
```python
import bytecode_ext as vm
def defineInnerFunction():
codeBlock = vm.VMCodeBlock()
codeBlock.setCode([vm.PUSH_CONST, 0,
vm.PUSH_CONST, 1,
vm.ADD,
vm.RETURN])
localAddIntArgument = vm.VMIntObject()
localAddIntArgument.value = 2
codeBlock.setConstPool([localAddIntArgument])
codeBlock.numberArguments = 1
return codeBlock
def makeOuterFunction(innerCodeBlock):
rootCodeBlock = vm.VMRootCodeBlock()
innerFunctionArgument = vm.VMIntObject()
innerFunctionArgument.value = 2
localAddArgument = vm.VMIntObject()
localAddArgument.value = 1
# PUSH_CONST 0
rootCodeBlock.setCode([vm.PUSH_CONST, 0,
vm.CALL, 1,
vm.PUSH_CONST, 2,
vm.ADD,
vm.PRINT,
vm.RETURN])
rootCodeBlock.setConstPool(
[innerFunctionArgument, innerCodeBlock, localAddArgument])
return rootCodeBlock
innerFunctionCodeBlock = defineInnerFunction()
rootCodeBlock = makeOuterFunction(innerFunctionCodeBlock)
vm.vm_run(rootCodeBlock)
```
#### File: python/mymal/step0_repl.py
```python
import sys
def prn(output):
sys.stdout.write(output)
sys.stdout.flush()
def READ(program):
return program
def EVAL(ast):
return ast
def PRINT(result):
prn(result)
def rep(program):
ast = READ(program)
result = EVAL(ast)
PRINT(result)
if __name__ == "__main__":
while True:
prn("user> ")
program = sys.stdin.readline()
if program:
rep(program)
else:
print("\nBye!")
sys.exit(0)
``` |
{
"source": "jonasluz/mia-cana",
"score": 4
} |
#### File: mia-cana/Playground/merge_sort.py
```python
import sys
import numpy as np
INF = float('Inf')
def merge(a: list, p, q, r, verbose):
"""
Faz a intercalação ordenada dos vetores.
"""
global INF
## Separa os os vetores parciais
#
left, right = a[p:q+1], a[q+1:r+1]
if verbose:
print('Fazendo a intercalação dos vetores {} e {}'.
format(left, right))
left.append(INF) # Adiciona marcador infinito ao final dos vetores
right.append(INF) #
## Faz a intercalação ordenada
#
i = j = 0
for idx in range(p, r+1):
if (left[i] < right[j]):
a[idx] = left[i]
i+=1
else:
a[idx] = right[j]
j+=1
if verbose:
print('Resultado da intercalação: {}'.format(a[p:r+1]))
def mergeSort(values: list, p=None, r=None, verbose=False):
"""
Rotina recursiva da técnica de intercalação.
"""
## Atualiza valores default para os limites da ordenação.
#
if p == None: # sem limites da lista - pegar completa.
assert r == None
p, r = 0, len(values)-1 # marca os limites da lista completa.
if verbose:
print('Ordenando a lista {}'.format(values[p:r+1]))
## Chamadas recursivas da ordenação.
#
if p < r:
q = int((p+r)/2) # calcula ponto intermediário da lista.
if verbose:
print('Ponto intermediário entre {} e {} calculado: {}'.
format(p, r, q))
mergeSort(values, p, q, verbose) # ordena lado esquerdo.
mergeSort(values, q+1, r, verbose) # ordena lado direito.
merge(values, p, q, r, verbose) # faz a intercalação.
return(values)
```
#### File: mia-cana/Playground/quick_sort.py
```python
import random
def partition(A, p, r):
"""
Particiona o vetor.
"""
x = A[r]
i = p - 1
for j in range(p, r): # de p a r-1
if A[j] <= x:
i += 1
A[i], A[j] = A[j], A[i] # troca valores
A[i+1], A[r] = A[r], A[i+1] # troca valores
return i + 1
def randomPartition(A, p, r):
"""
Versão aleatória do particionamento.
"""
i = random.randint(p, r)
A[r], A[i] = A[i], A[r]
return partition(A, p, r)
def quickSort(A, p=0, r=None):
"""
Ordena o subvetor em A de p a r.
"""
if r == None:
r = len(A) - 1
if p < r:
q = partition(A, p, r)
quickSort(A, p, q-1)
quickSort(A, q+1, r)
def randomQuickSort(A, p=0, r=None):
"""
Versão aleatória do quick sort.
"""
if r == None:
r = len(A) - 1
if p < r:
q = randomPartition(A, p, r)
randomQuickSort(A, p, q-1)
randomQuickSort(A, q+1, r)
"""
Rotinas de teste
"""
test = [2, 8, 7, 1, 3, 5, 6, 4]
#quickSort(test)
randomQuickSort(test)
print(test)
```
#### File: mia-cana/Playground/selection_sort.py
```python
def selectionSort(values: list) -> list:
"""
Ordenação por seleção, com busca pelo menor.
"""
for i in range(0, len(values)):
k = i
for j in range(i+1, len(values)):
if values[j] < values[k]:
k = j
values[i], values[k] = values[k], values[i]
return values
def selectionSort2(values: list) -> list:
"""
Ordenação por seleção, com busca pelo menor e pelo maior.
"""
for i in range(0, int(len(values)/2)):
limit = len(values)-1-i
kmin, kmax = i, limit
if values[kmin] > values[kmax]:
values[kmin], values[kmax] = values[kmax], values[kmin]
for j in range(i+1, limit):
if values[j] < values[kmin]:
kmin = j
elif values[j] > values[kmax]:
kmax = j
values[i], values[kmin] = values[kmin], values[i]
values[limit], values[kmax] = values[kmax], values[limit]
return values
# Teste
#print(selectionSort2([7, 8, 2, 6, 5, 4, 1, 2, 3, 7, 9]))
``` |
{
"source": "jonasluz/mia-cg",
"score": 3
} |
#### File: mia-cg/Playground/common.py
```python
from typing import List
Vector = List[float]
import numpy as np
def dcos(v: Vector, verbose=False):
"""
Calcula os cosenos diretores do vetor a.
Para cada componente c1, c2, ... cn do vetor v, o cosseno diretor é dado por:
dcosk = ck / ||v||
"""
result = []
norm = np.linalg.norm(v) # norma do vetor dado ||v||
if verbose:
print("Norm of vector {} is {}".format(v, norm))
for component in v:
result.append(component / norm)
return result
def energy(photons=1):
"""
Função da energia emitida pelo número indicado de photons.
"""
# Plank constant
h = 6.6e-34 # J s
# Speed of light
c = 3e8 # m/s
# Photon wavelength
photon_wl = 650 # nm
# Nanometer factor
nm = 1e-9
# Photon energy
sigma = photon_wl * nm
e = (h * c) / sigma
return(photons * e)
```
#### File: mia-cg/Playground/exercises.py
```python
import common as c
def q2():
"""
Questão 2
"""
print ("Cossenos diretores:")
print (c.dcos([3, 6], True))
``` |
{
"source": "jonasluz/mia-estatistica",
"score": 2
} |
#### File: mia-estatistica/Exercicios/main.py
```python
import exerc01 as ex01
## Rotina principal
#
def main():
"""
Rotina principal
"""
ex01.q01()
ex01.q02()
ex01.q03()
ex01.q04()
ex01.q05()
ex01.q06()
if __name__ == "__main__":
main()
``` |
{
"source": "jonasmalm/lakritsrot",
"score": 2
} |
#### File: lakritsrot/server/__init__.py
```python
from flask import Flask
app = Flask(__name__, static_folder='../client', static_url_path='/')
#app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database.db'
#app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
from server.optimize import bp as optimize_bp
app.register_blueprint(optimize_bp, url_prefix='/optimize')
@app.route("/")
def client():
return app.send_static_file("birka.html")
``` |
{
"source": "JonasMarma/PyAudio-Real-Time-Filter",
"score": 3
} |
#### File: JonasMarma/PyAudio-Real-Time-Filter/filter.py
```python
import pyaudio
import _thread
import os
from scipy.signal import butter, lfilter
import numpy as np
FORMAT = pyaudio.paFloat32
CHANNELS = 1
RATE = 8000
CHUNK = 1024
RECORD_SECONDS = 30
p = pyaudio.PyAudio()
# Menu thread
class menu(object):
def __init__(self):
self.audioON = False
self.filterON = False
self.finished = False
def selectMenu(self):
while(True):
os.system("clear")
os.system("cls")
print(f'AUDIO OUTPUT: {self.audioON}')
print(f'FILTERING: {self.filterON}')
print("\nEnter a command:\n<A> Toggle audio output\n<F> Toggle filtering\n<Q> Exit\n")
sel = input('command: ')
if sel.lower() == 'a':
self.audioON = not self.audioON
elif sel.lower() == 'f':
self.filterON = not self.filterON
elif sel.lower() == 'q':
self.finished = True
else:
pass
if self.finished:
break
# Start an output stream on specified output_device_id
def start_out_stream(outDevice):
stream = p.open(format=FORMAT, channels=1, rate=8000, output_device_index=outDevice, output=True)
return stream
# Start an output stream on specified input_device_id
def start_input_stream(inDevice):
stream = p.open(format=FORMAT, channels=1, rate=8000, input_device_index=inDevice, input=True)
return stream
# Make a list of the connected audio devices
def list_devices():
info = p.get_host_api_info_by_index(0)
num_devices = info.get('deviceCount')
print('The following audio devices were found:')
print('INPUT')
for i in range(0, num_devices):
if (p.get_device_info_by_host_api_device_index(0, i).get('maxInputChannels')) > 0:
print("ID: ", i, " : ", p.get_device_info_by_host_api_device_index(0, i).get('name'))
print('OUTPUT')
for i in range(0, num_devices):
if (p.get_device_info_by_host_api_device_index(0, i).get('maxOutputChannels')) > 0:
print("ID: ", i, " : ", p.get_device_info_by_host_api_device_index(0, i).get('name'))
# Butterworth bandstop filter
def butter_bandstop(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='bandstop')
return b, a
def butter_bandstop_filter(data, lowcut, highcut, fs, order=5):
b, a = butter_bandstop(lowcut, highcut, fs, order=order)
y = lfilter(b, a, data)
return y
list_devices()
input_ID = int(input("Select an input device ID:\n"))
output_ID = int(input("Select an output device ID:\n"))
# Start menu thread
menu = menu()
_thread.start_new_thread(menu.selectMenu,())
# Initialize input stream
in_stream = start_input_stream(input_ID)
# Initialize output stream
out_stream = start_out_stream(output_ID)
while(True):
# Read a chunk of data from input
data = in_stream.read(CHUNK)
# If output stream is enabled, write on output
if menu.audioON:
# If filter is enabled, filter the signal before writing
if menu.filterON:
# Decode input signal
decoded = np.frombuffer(data, 'float32')
# Process input signal
filtered_signal = None
filtered_signal = butter_bandstop_filter(decoded, 500, 2000, RATE)
# Encode the signal again and write on output stream
out = np.array(filtered_signal, dtype='<f4').tobytes()
out_stream.write(out)
else:
# Write signal without processing
out_stream.write(data)
if menu.finished:
break
print("END")
# Close streams
out_stream.stop_stream()
out_stream.close()
in_stream.stop_stream()
in_stream.close()
p.terminate()
``` |
{
"source": "Jonas-Meier/FrustratinglySimpleFsDet",
"score": 3
} |
#### File: FrustratinglySimpleFsDet/datasets/prepare_coco_few_shot.py
```python
import argparse
import json
import os
import random
import time
import sys
sys.path.append('..') # TODO: ugly but works for now
print("Path: {}".format(sys.path))
from class_splits import CLASS_SPLITS
from fsdet.config import get_cfg
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", type=str, choices=["coco", "isaid"], required=True,
help="Dataset name")
parser.add_argument("--class-split", type=str, required=True, dest="class_split",
help="Split of classes into base classes and novel classes")
parser.add_argument("--shots", type=int, nargs="+", default=[1, 2, 3, 5, 10, 30],
help="Amount of annotations per class for fine tuning")
parser.add_argument("--seeds", type=int, nargs="+", default=[0, 9],
help="Range of seeds to run. Just a single seed or two seeds representing a range with 2nd "
"argument being inclusive as well!")
parser.add_argument("--no-shuffle", action="store_false", default=True, dest="shuffle",
help="Shuffle images prior to sampling of annotations.")
args = parser.parse_args()
return args
def get_data_path(): # get path to training data annotations
# probably use cfg.DATA_DIR[args.dataset] if necessary
if args.dataset == "coco":
return os.path.join(cfg.ROOT_DIR, cfg.TRAIN_ANNOS['coco'])
elif args.dataset == "isaid":
return os.path.join(cfg.ROOT_DIR, cfg.TRAIN_ANNOS['isaid'])
else:
raise ValueError("Dataset {} is not supported!".format(args.dataset))
def generate_seeds(args):
start = time.time()
data_path = get_data_path()
data = json.load(open(data_path))
new_all_cats = [] # category "objects"
for cat in data['categories']:
new_all_cats.append(cat)
id2img = {}
for i in data['images']:
id2img[i['id']] = i
# same but shorter: id2img = {i['id']: i for i in data['images']}
# tuples of category names
# TODO: base- and novel classes do not matter when sampling few-shot data, but may be important when saving them!
base_classes = tuple(CLASS_SPLITS[args.dataset][args.class_split]['base'])
novel_classes = tuple(CLASS_SPLITS[args.dataset][args.class_split]['novel'])
all_classes = tuple(base_classes + novel_classes)
coco_cat_id_to_name = {c['id']: c['name'] for c in new_all_cats}
# Need make sure, 'all_classes' are all contained in 'coco_cat_id_to_name'
assert len(all_classes) <= len(coco_cat_id_to_name) \
and len(set(all_classes + tuple(coco_cat_id_to_name.values()))) == len(coco_cat_id_to_name), \
"Error, inconsistency with categories defined in the dataset and in the class split: {} and {}".\
format(coco_cat_id_to_name.values(), all_classes)
cat_name_to_annos = {i: [] for i in all_classes}
for anno in data['annotations']:
if anno['iscrowd'] == 1:
continue
cat_name = coco_cat_id_to_name[anno['category_id']]
if cat_name not in cat_name_to_annos: # if base and novel classes do not sum up to all classes in the dataset
continue
else:
cat_name_to_annos[cat_name].append(anno)
if len(args.seeds) == 1:
seeds = [args.seeds[0]]
else:
assert len(args.seeds) == 2
seeds = range(args.seeds[0], args.seeds[1] + 1)
for i in seeds:
print("Generating seed {}".format(i))
for cat_name in all_classes:
print("Generating data for class {}".format(cat_name))
img_id_to_annos = {}
for anno in cat_name_to_annos[cat_name]:
if anno['image_id'] in img_id_to_annos:
img_id_to_annos[anno['image_id']].append(anno)
else:
img_id_to_annos[anno['image_id']] = [anno]
for shots in args.shots:
sample_annos = [] # annotations
sample_imgs = [] # images
sample_img_ids = [] # ids of sampled images, just used for duplicate checks
if cat_name in base_classes:
assert cat_name not in novel_classes
if cfg.BASE_SHOT_MULTIPLIER == -1:
target_shots = len(cat_name_to_annos[cat_name]) # should be all available annos
print("Using all available {} annotations for base class {}!"
.format(target_shots, cat_name))
else:
assert cfg.BASE_SHOT_MULTIPLIER > 0
target_shots = cfg.BASE_SHOT_MULTIPLIER * shots
print("Generating {}x{} shot data for base class {}"
.format(cfg.BASE_SHOT_MULTIPLIER, shots, cat_name))
else:
assert cat_name in novel_classes
target_shots = shots
print("Generating {} shot data for novel class {}"
.format(shots, cat_name))
img_ids = list(img_id_to_annos.keys())
# while True:
# img_ids = random.sample(list(img_id_to_annos.keys()), shots)
# TODO: probably use random.sample(img_ids, 1) in a 'while True'-loop?
if args.shuffle:
shuffle_seed = i # Same order for same seeds, but should not matter...
random.seed(shuffle_seed)
print("shuffling images")
random.shuffle(img_ids)
else:
print("not shuffling images prior to sampling!")
for img_id in img_ids:
if img_id in sample_img_ids: # only necessary if we iterate multiple times through all images
continue
if len(img_id_to_annos[img_id]) + len(sample_annos) > target_shots:
# TODO: This condition may lead to following:
# 1. For k=5 shots and if each image had exactly 2 annotations per class we finally only
# have four annotations for that class -> probably too few annotations
# 2. In contrast to other approaches, they allow for taking multiple annotations from the
# same image (even more: they only want ALL annotations from an image (for a certain class)
# or none at all) (as support data) -> unknown consequences
continue
sample_annos.extend(img_id_to_annos[img_id]) # add all annotations of image with id 'img_id' with class 'c'
sample_imgs.append(id2img[img_id]) # add the image with id 'img_id'
sample_img_ids.append(img_id)
assert len(sample_imgs) <= len(sample_annos), \
"Error, got {} images but only {} annotations!".format(len(sample_imgs), len(sample_annos))
if len(sample_annos) == target_shots:
break
# TODO: Probably convert assertion to a warning.
assert len(sample_annos) == target_shots, "Wanted {} shots, but only found {} annotations!"\
.format(target_shots, len(sample_annos))
new_data = data.copy()
new_data['images'] = sample_imgs
new_data['annotations'] = sample_annos
new_data['categories'] = new_all_cats
# Note: even if we sample more annotations for base classes we use the original 'shots' in the file
# name for clarity!
save_path = get_save_path_seeds(data_path, cat_name, shots, i)
with open(save_path, 'w') as f:
# json.dump(new_data, f)
json.dump(new_data, f, indent=2) # Easier to check files manually
end = time.time()
m, s = divmod(int(end-start), 60)
print("Created few-shot data for {} shots and {} seeds in {}m {}s"
.format(len(args.shots), len(seeds), m, s))
def get_save_path_seeds(path, cls, shots, seed):
s = path.split('/')
train_name = cfg.TRAIN_SPLIT[args.dataset]
prefix = 'full_box_{}shot_{}_{}'.format(shots, cls, train_name)
save_dir = os.path.join(cfg.DATA_SAVE_PATH_PATTERN[args.dataset].format(args.class_split), 'seed{}'.format(seed))
os.makedirs(save_dir, exist_ok=True)
save_path = os.path.join(save_dir, prefix + '.json')
return save_path
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
cfg = get_cfg() # get default config to obtain the correct load- and save paths for the created data
generate_seeds(args)
```
#### File: modeling/meta_arch/rcnn.py
```python
import torch
from torch import nn
from fsdet.modeling.roi_heads import build_roi_heads
import logging
from detectron2.modeling.backbone import build_backbone
from detectron2.modeling.postprocessing import detector_postprocess
from detectron2.modeling.proposal_generator import build_proposal_generator
from detectron2.structures import ImageList
from detectron2.utils.logger import log_first_n
# avoid conflicting with the existing GeneralizedRCNN module in Detectron2
from .build import META_ARCH_REGISTRY
__all__ = ["GeneralizedRCNN", "ProposalNetwork"]
@META_ARCH_REGISTRY.register()
class GeneralizedRCNN(nn.Module):
"""
Generalized R-CNN. Any models that contains the following three components:
1. Per-image feature extraction (aka backbone)
2. Region proposal generation
3. Per-region feature extraction and prediction
"""
def __init__(self, cfg):
super().__init__()
self.device = torch.device(cfg.MODEL.DEVICE)
self.backbone = build_backbone(cfg)
self.proposal_generator = build_proposal_generator(
cfg, self.backbone.output_shape()
)
self.roi_heads = build_roi_heads(cfg, self.backbone.output_shape())
assert len(cfg.MODEL.PIXEL_MEAN) == len(cfg.MODEL.PIXEL_STD)
num_channels = len(cfg.MODEL.PIXEL_MEAN)
pixel_mean = (
torch.Tensor(cfg.MODEL.PIXEL_MEAN)
.to(self.device)
.view(num_channels, 1, 1)
)
pixel_std = (
torch.Tensor(cfg.MODEL.PIXEL_STD)
.to(self.device)
.view(num_channels, 1, 1)
)
self.normalizer = lambda x: (x - pixel_mean) / pixel_std
self.to(self.device)
if cfg.MODEL.BACKBONE.FREEZE:
for p in self.backbone.parameters():
p.requires_grad = False
print("froze backbone parameters")
if cfg.MODEL.PROPOSAL_GENERATOR.FREEZE:
for p in self.proposal_generator.parameters():
p.requires_grad = False
print("froze proposal generator parameters")
if cfg.MODEL.ROI_HEADS.FREEZE_FEAT:
# keep this case for backwards-compatibility:
# In old version, 'ROI_BOX_HEAD.FREEZE_*'-configs did not exist:
# base-training leaves all freeze configs at default values, which is no freezing at all
# fine-tuning always uses MODEL.ROI_HEADS.FREEZE_FEAT=True which leads to freezing of all
# roi box heads parameters
# In new version, 'ROI_HEADS.FREEZE_FEAT' is never used:
# base-training also leaves freeze configs at default values which defaults to no freezing
# fine-tuning sets 'ROI_BOX_HEAD'-configs and leaves 'ROI_HEADS.FREEZE_FEAT' at False
for p in self.roi_heads.box_head.parameters():
p.requires_grad = False
print("froze roi_box_head parameters")
elif cfg.MODEL.ROI_HEADS.NAME == 'StandardROIDoubleHeads' and \
cfg.MODEL.ROI_BOX_HEAD.NAME == 'FastRCNNConvFCMultiHead':
# Custom freezing options for fine-tuning of a model with two heads (first head for base-classes and second
# head for novel classes), where the first head and its fc layers will be completely frozen (even
# classification and bbox regression!) and for the second head, the last fc-layer will remain unfrozen.
# This setting should allow for maintaining the base class performance while allowing the novel classes to
# be learned well.
# freeze all 'conv', 'fc1' and 'fc2:1' parameters of the box head
for k, v in self.roi_heads.box_head.named_modules():
# We hard-code freezing of 'fc1' and 'fc2:1' because the class 'StandardROIDoubleHeads' ensures that
# we have exactly two heads and that we split the head always at index 2!
if ('conv' in k) or ('fc1' in k) or ('fc2:1' in k):
for p in v.parameters():
p.requires_grad = False
print("Froze parameters of roi_box_head {} module".format(k))
# additionally freeze the first predictor completely!
for k, v in self.roi_heads.box_predictors[0].named_modules():
# We explicitly name the modules for more informative messages
if (k == 'cls_score') or (k == 'bbox_pred'):
for p in v.parameters():
p.requires_grad = False
print("Froze parameters of roi_box_predictor_1 {} module".format(k))
else:
# Freeze ROI BBOX Head Parameters
name_to_module = {k: v for k, v in self.roi_heads.box_head.named_modules()}
# could also use self.roi_heads.box_head.conv_norm_relus but we think of this solution as being more secure
for conv_id in cfg.MODEL.ROI_BOX_HEAD.FREEZE_CONVS:
assert 0 < conv_id <= len(cfg.MODEL.ROI_BOX_HEAD.FREEZE_CONVS)
assert len(cfg.MODEL.ROI_BOX_HEAD.FREEZE_CONVS) <= cfg.MODEL.ROI_BOX_HEAD.NUM_CONV
conv_name = 'conv{}'.format(conv_id)
assert conv_name in name_to_module
for p in name_to_module[conv_name].parameters():
p.requires_grad = False
print("froze roi_box_head {} parameters".format(conv_name))
for fc_id in cfg.MODEL.ROI_BOX_HEAD.FREEZE_FCS:
assert 0 < fc_id <= len(cfg.MODEL.ROI_BOX_HEAD.FREEZE_FCS)
assert len(cfg.MODEL.ROI_BOX_HEAD.FREEZE_FCS) <= cfg.MODEL.ROI_BOX_HEAD.NUM_FC
fc_name = 'fc{}'.format(fc_id)
assert fc_name in name_to_module
for p in name_to_module[fc_name].parameters():
p.requires_grad = False
print("froze roi_box_head {} parameters".format(fc_name))
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances (optional): groundtruth :class:`Instances`
* proposals (optional): :class:`Instances`, precomputed proposals.
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
list[dict]:
Each dict is the output for one input image.
The dict contains one key "instances" whose value is a :class:`Instances`.
The :class:`Instances` object has the following keys:
"pred_boxes", "pred_classes", "scores"
"""
if not self.training:
return self.inference(batched_inputs)
images = self.preprocess_image(batched_inputs)
if "instances" in batched_inputs[0]:
gt_instances = [
x["instances"].to(self.device) for x in batched_inputs
]
elif "targets" in batched_inputs[0]:
log_first_n(
logging.WARN,
"'targets' in the model inputs is now renamed to 'instances'!",
n=10,
)
gt_instances = [
x["targets"].to(self.device) for x in batched_inputs
]
else:
gt_instances = None
features = self.backbone(images.tensor)
if self.proposal_generator:
proposals, proposal_losses = self.proposal_generator(
images, features, gt_instances
)
else:
assert "proposals" in batched_inputs[0]
proposals = [
x["proposals"].to(self.device) for x in batched_inputs
]
proposal_losses = {}
_, detector_losses = self.roi_heads(
images, features, proposals, gt_instances
)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses
def inference(
self, batched_inputs, detected_instances=None, do_postprocess=True
):
"""
Run inference on the given inputs.
Args:
batched_inputs (list[dict]): same as in :meth:`forward`
detected_instances (None or list[Instances]): if not None, it
contains an `Instances` object per image. The `Instances`
object contains "pred_boxes" and "pred_classes" which are
known boxes in the image.
The inference will then skip the detection of bounding boxes,
and only predict other per-ROI outputs.
do_postprocess (bool): whether to apply post-processing on the outputs.
Returns:
same as in :meth:`forward`.
"""
assert not self.training
images = self.preprocess_image(batched_inputs)
features = self.backbone(images.tensor)
if detected_instances is None:
if self.proposal_generator:
proposals, _ = self.proposal_generator(images, features, None)
else:
assert "proposals" in batched_inputs[0]
proposals = [
x["proposals"].to(self.device) for x in batched_inputs
]
results, _ = self.roi_heads(images, features, proposals, None)
else:
detected_instances = [
x.to(self.device) for x in detected_instances
]
results = self.roi_heads.forward_with_given_boxes(
features, detected_instances
)
if do_postprocess:
processed_results = []
for results_per_image, input_per_image, image_size in zip(
results, batched_inputs, images.image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"instances": r})
return processed_results
else:
return results
def preprocess_image(self, batched_inputs):
"""
Normalize, pad and batch the input images.
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [self.normalizer(x) for x in images]
images = ImageList.from_tensors(
images, self.backbone.size_divisibility
)
return images
@META_ARCH_REGISTRY.register()
class ProposalNetwork(nn.Module):
def __init__(self, cfg):
super().__init__()
self.device = torch.device(cfg.MODEL.DEVICE)
self.backbone = build_backbone(cfg)
self.proposal_generator = build_proposal_generator(
cfg, self.backbone.output_shape()
)
pixel_mean = (
torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(-1, 1, 1)
)
pixel_std = (
torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(-1, 1, 1)
)
self.normalizer = lambda x: (x - pixel_mean) / pixel_std
self.to(self.device)
def forward(self, batched_inputs):
"""
Args:
Same as in :class:`GeneralizedRCNN.forward`
Returns:
list[dict]: Each dict is the output for one input image.
The dict contains one key "proposals" whose value is a
:class:`Instances` with keys "proposal_boxes" and "objectness_logits".
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [self.normalizer(x) for x in images]
images = ImageList.from_tensors(
images, self.backbone.size_divisibility
)
features = self.backbone(images.tensor)
if "instances" in batched_inputs[0]:
gt_instances = [
x["instances"].to(self.device) for x in batched_inputs
]
elif "targets" in batched_inputs[0]:
log_first_n(
logging.WARN,
"'targets' in the model inputs is now renamed to 'instances'!",
n=10,
)
gt_instances = [
x["targets"].to(self.device) for x in batched_inputs
]
else:
gt_instances = None
proposals, proposal_losses = self.proposal_generator(
images, features, gt_instances
)
# In training, the proposals are not useful at all but we generate them anyway.
# This makes RPN-only models about 5% slower.
if self.training:
return proposal_losses
processed_results = []
for results_per_image, input_per_image, image_size in zip(
proposals, batched_inputs, images.image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"proposals": r})
return processed_results
```
#### File: modeling/roi_heads/box_head.py
```python
import numpy as np
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.layers import Conv2d, ShapeSpec, get_norm
from detectron2.utils.registry import Registry
ROI_BOX_HEAD_REGISTRY = Registry("ROI_BOX_HEAD")
ROI_BOX_HEAD_REGISTRY.__doc__ = """
Registry for box heads, which make box predictions from per-region features.
The registered object will be called with `obj(cfg, input_shape)`.
"""
@ROI_BOX_HEAD_REGISTRY.register()
class FastRCNNConvFCHead(nn.Module):
"""
A head with several 3x3 conv layers (each followed by norm & relu) and
several fc layers (each followed by relu).
"""
def __init__(self, cfg, input_shape: ShapeSpec):
"""
The following attributes are parsed from config:
num_conv, num_fc: the number of conv/fc layers
conv_dim/fc_dim: the dimension of the conv/fc layers
norm: normalization for the conv layers
"""
super().__init__()
# fmt: off
num_conv = cfg.MODEL.ROI_BOX_HEAD.NUM_CONV
conv_dim = cfg.MODEL.ROI_BOX_HEAD.CONV_DIM
num_fc = cfg.MODEL.ROI_BOX_HEAD.NUM_FC
fc_dim = cfg.MODEL.ROI_BOX_HEAD.FC_DIM
norm = cfg.MODEL.ROI_BOX_HEAD.NORM
# fmt: on
assert num_conv + num_fc > 0
self._output_size = (
input_shape.channels,
input_shape.height,
input_shape.width,
)
self.conv_norm_relus = []
for k in range(num_conv):
conv = Conv2d(
self._output_size[0],
conv_dim,
kernel_size=3,
padding=1,
bias=not norm,
norm=get_norm(norm, conv_dim),
activation=F.relu,
)
self.add_module("conv{}".format(k + 1), conv)
self.conv_norm_relus.append(conv)
self._output_size = (
conv_dim,
self._output_size[1],
self._output_size[2],
)
self.fcs = []
for k in range(num_fc):
fc = nn.Linear(np.prod(self._output_size), fc_dim)
self.add_module("fc{}".format(k + 1), fc)
self.fcs.append(fc)
self._output_size = fc_dim
for layer in self.conv_norm_relus:
weight_init.c2_msra_fill(layer)
for layer in self.fcs:
weight_init.c2_xavier_fill(layer)
def forward(self, x):
for layer in self.conv_norm_relus:
x = layer(x)
if len(self.fcs):
if x.dim() > 2:
x = torch.flatten(x, start_dim=1)
for layer in self.fcs:
x = F.relu(layer(x))
return x
@property
def output_size(self):
return self._output_size
@ROI_BOX_HEAD_REGISTRY.register()
class FastRCNNConvFCMultiHead(nn.Module):
"""
A multi-head with several 3x3 conv layers (each followed by norm & relu) and
several fc layers (each followed by relu). Some fc layers may be redundant, depending on split_at_fc and
num_heads.
"""
def __init__(self, cfg, input_shape: ShapeSpec):
"""
We make following assumptions:
1. All conv layers are placed before the first fc layer
2. We don't allow splitting the head at conv layers, for simplicity
Arguments:
cfg:
input_shape:
split_at_fc: id of fc layer to start splitting the head. Ids start by 1. Per default, we use two fc layers,
starting to split at fc2 (which is the second fc layer)
num_heads: number of parallel roi heads
The following attributes are parsed from config:
num_conv, num_fc: the number of conv/fc layers
conv_dim/fc_dim: the dimension of the conv/fc layers
norm: normalization for the conv layers
"""
super().__init__()
# fmt: off
num_conv = cfg.MODEL.ROI_BOX_HEAD.NUM_CONV
conv_dim = cfg.MODEL.ROI_BOX_HEAD.CONV_DIM
num_fc = cfg.MODEL.ROI_BOX_HEAD.NUM_FC
fc_dim = cfg.MODEL.ROI_BOX_HEAD.FC_DIM
norm = cfg.MODEL.ROI_BOX_HEAD.NORM
num_heads = cfg.MODEL.ROI_BOX_HEAD.NUM_HEADS # number of parallel roi heads
# id of fc layer to start splitting the head. Ids start by 1. Per default, we use two fc layers, starting to
# split at fc2 (which is the second fc layer)
self.split_at_fc = cfg.MODEL.ROI_BOX_HEAD.SPLIT_AT_FC
# fmt: on
assert num_heads > 0
assert num_heads == 1 or num_fc > 0 # multi-head without fcs is not allowed!
assert num_fc + num_conv > 0
assert self.split_at_fc <= num_fc
self._output_size = (
input_shape.channels,
input_shape.height,
input_shape.width,
)
self.conv_norm_relus = []
for k in range(num_conv):
conv = Conv2d(
self._output_size[0],
conv_dim,
kernel_size=3,
padding=1,
bias=not norm,
norm=get_norm(norm, conv_dim),
activation=F.relu,
)
self.add_module("conv{}".format(k + 1), conv)
self.conv_norm_relus.append(conv)
self._output_size = (
conv_dim,
self._output_size[1],
self._output_size[2],
)
self.fcs = []
for k in range(1, num_fc + 1):
if k >= self.split_at_fc and num_heads > 1:
tmp_fcs = []
for i in range(1, num_heads + 1):
fc = nn.Linear(np.prod(self._output_size), fc_dim)
self.add_module("fc{}:{}".format(k, i), fc) # '.' is not allowed!
tmp_fcs.append(fc)
self.fcs.append(tmp_fcs)
self._output_size = num_heads * fc_dim
else:
fc = nn.Linear(np.prod(self._output_size), fc_dim)
self.add_module("fc{}".format(k), fc)
self.fcs.append([fc])
self._output_size = fc_dim
for layer in self.conv_norm_relus:
weight_init.c2_msra_fill(layer)
for layers in self.fcs:
for layer in layers:
weight_init.c2_xavier_fill(layer)
def forward(self, x):
for layer in self.conv_norm_relus:
x = layer(x)
if len(self.fcs):
if x.dim() > 2:
x = torch.flatten(x, start_dim=1)
inputs = [x] # inputs for first fc layers.
for layers in self.fcs:
assert len(layers) # need at least one fc layer in the list
if len(layers) > len(inputs):
assert len(layers) % len(inputs) == 0
inputs = (len(layers) // len(inputs)) * inputs
outputs = []
for i, layer in enumerate(layers):
# TODO: sequential forward of parallelisable branch could be slow!
outputs.append(F.relu(layer(inputs[i])))
inputs = outputs
return outputs
else:
return x
@property
def output_size(self):
return self._output_size
def build_box_head(cfg, input_shape):
"""
Build a box head defined by `cfg.MODEL.ROI_BOX_HEAD.NAME`.
"""
name = cfg.MODEL.ROI_BOX_HEAD.NAME
return ROI_BOX_HEAD_REGISTRY.get(name)(cfg, input_shape)
```
#### File: modeling/roi_heads/roi_heads.py
```python
import copy
import numpy as np
import torch
from torch import nn
import logging
from detectron2.data import MetadataCatalog
from detectron2.layers import ShapeSpec
from detectron2.modeling.backbone.resnet import BottleneckBlock, make_stage
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.modeling.matcher import Matcher
from detectron2.modeling.poolers import ROIPooler
from detectron2.modeling.proposal_generator.proposal_utils import add_ground_truth_to_proposals
from detectron2.modeling.sampling import subsample_labels
from detectron2.structures import Boxes, Instances, pairwise_iou
from detectron2.utils.events import get_event_storage
from detectron2.utils.registry import Registry
from typing import Dict, List
from .box_head import build_box_head
from .fast_rcnn import ROI_HEADS_OUTPUT_REGISTRY, FastRCNNOutputLayers, FastRCNNOutputs
ROI_HEADS_REGISTRY = Registry("ROI_HEADS")
ROI_HEADS_REGISTRY.__doc__ = """
Registry for ROI heads in a generalized R-CNN model.
ROIHeads take feature maps and region proposals, and
perform per-region computation.
The registered object will be called with `obj(cfg, input_shape)`.
The call is expected to return an :class:`ROIHeads`.
"""
logger = logging.getLogger(__name__)
def build_roi_heads(cfg, input_shape):
"""
Build ROIHeads defined by `cfg.MODEL.ROI_HEADS.NAME`.
"""
name = cfg.MODEL.ROI_HEADS.NAME
return ROI_HEADS_REGISTRY.get(name)(cfg, input_shape)
def select_foreground_proposals(proposals, bg_label):
"""
Given a list of N Instances (for N images), each containing a `gt_classes` field,
return a list of Instances that contain only instances with `gt_classes != -1 &&
gt_classes != bg_label`.
Args:
proposals (list[Instances]): A list of N Instances, where N is the number of
images in the batch.
bg_label: label index of background class.
Returns:
list[Instances]: N Instances, each contains only the selected foreground instances.
list[Tensor]: N boolean vector, correspond to the selection mask of
each Instances object. True for selected instances.
"""
assert isinstance(proposals, (list, tuple))
assert isinstance(proposals[0], Instances)
assert proposals[0].has("gt_classes")
fg_proposals = []
fg_selection_masks = []
for proposals_per_image in proposals:
gt_classes = proposals_per_image.gt_classes
fg_selection_mask = (gt_classes != -1) & (gt_classes != bg_label)
fg_idxs = fg_selection_mask.nonzero().squeeze(1)
fg_proposals.append(proposals_per_image[fg_idxs])
fg_selection_masks.append(fg_selection_mask)
return fg_proposals, fg_selection_masks
class ROIHeads(torch.nn.Module):
"""
ROIHeads perform all per-region computation in an R-CNN.
It contains logic of cropping the regions, extract per-region features,
and make per-region predictions.
It can have many variants, implemented as subclasses of this class.
"""
def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
super(ROIHeads, self).__init__()
# fmt: off
self.batch_size_per_image = cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE
self.positive_sample_fraction = cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION
self.test_score_thresh = cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST
self.test_nms_thresh = cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST
self.test_detections_per_img = cfg.TEST.DETECTIONS_PER_IMAGE
self.in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
self.num_classes = cfg.MODEL.ROI_HEADS.NUM_CLASSES
self.proposal_append_gt = cfg.MODEL.ROI_HEADS.PROPOSAL_APPEND_GT
self.feature_strides = {k: v.stride for k, v in input_shape.items()}
self.feature_channels = {k: v.channels for k, v in input_shape.items()}
self.cls_agnostic_bbox_reg = cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG
self.smooth_l1_beta = cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA
# fmt: on
# Matcher to assign box proposals to gt boxes
self.proposal_matcher = Matcher(
cfg.MODEL.ROI_HEADS.IOU_THRESHOLDS,
cfg.MODEL.ROI_HEADS.IOU_LABELS,
allow_low_quality_matches=False,
)
# Box2BoxTransform for bounding box regression
self.box2box_transform = Box2BoxTransform(
weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS
)
def _sample_proposals(self, matched_idxs, matched_labels, gt_classes):
"""
Based on the matching between N proposals and M groundtruth,
sample the proposals and set their classification labels.
Args:
matched_idxs (Tensor): a vector of length N, each is the best-matched
gt index in [0, M) for each proposal.
matched_labels (Tensor): a vector of length N, the matcher's label
(one of cfg.MODEL.ROI_HEADS.IOU_LABELS) for each proposal.
gt_classes (Tensor): a vector of length M.
Returns:
Tensor: a vector of indices of sampled proposals. Each is in [0, N).
Tensor: a vector of the same length, the classification label for
each sampled proposal. Each sample is labeled as either a category in
[0, num_classes) or the background (num_classes).
"""
has_gt = gt_classes.numel() > 0
# Get the corresponding GT for each proposal
if has_gt:
gt_classes = gt_classes[matched_idxs]
# Label unmatched proposals (0 label from matcher) as background (label=num_classes)
gt_classes[matched_labels == 0] = self.num_classes
# Label ignore proposals (-1 label)
gt_classes[matched_labels == -1] = -1
else:
gt_classes = torch.zeros_like(matched_idxs) + self.num_classes
sampled_fg_idxs, sampled_bg_idxs = subsample_labels(
gt_classes,
self.batch_size_per_image,
self.positive_sample_fraction,
self.num_classes,
)
sampled_idxs = torch.cat([sampled_fg_idxs, sampled_bg_idxs], dim=0)
return sampled_idxs, gt_classes[sampled_idxs]
@torch.no_grad()
def label_and_sample_proposals(self, proposals, targets):
"""
Prepare some proposals to be used to train the ROI heads.
It performs box matching between `proposals` and `targets`, and assigns
training labels to the proposals.
It returns `self.batch_size_per_image` random samples from proposals and groundtruth boxes,
with a fraction of positives that is no larger than `self.positive_sample_fraction.
Args:
See :meth:`ROIHeads.forward`
Returns:
list[Instances]:
length `N` list of `Instances`s containing the proposals
sampled for training. Each `Instances` has the following fields:
- proposal_boxes: the proposal boxes
- gt_boxes: the ground-truth box that the proposal is assigned to
(this is only meaningful if the proposal has a label > 0; if label = 0
then the ground-truth box is random)
Other fields such as "gt_classes" that's included in `targets`.
"""
gt_boxes = [x.gt_boxes for x in targets]
# Augment proposals with ground-truth boxes.
# In the case of learned proposals (e.g., RPN), when training starts
# the proposals will be low quality due to random initialization.
# It's possible that none of these initial
# proposals have high enough overlap with the gt objects to be used
# as positive examples for the second stage components (box head,
# cls head). Adding the gt boxes to the set of proposals
# ensures that the second stage components will have some positive
# examples from the start of training. For RPN, this augmentation improves
# convergence and empirically improves box AP on COCO by about 0.5
# points (under one tested configuration).
if self.proposal_append_gt:
proposals = add_ground_truth_to_proposals(gt_boxes, proposals)
proposals_with_gt = []
num_fg_samples = []
num_bg_samples = []
for proposals_per_image, targets_per_image in zip(proposals, targets):
has_gt = len(targets_per_image) > 0
match_quality_matrix = pairwise_iou(
targets_per_image.gt_boxes, proposals_per_image.proposal_boxes
)
matched_idxs, matched_labels = self.proposal_matcher(
match_quality_matrix
)
sampled_idxs, gt_classes = self._sample_proposals(
matched_idxs, matched_labels, targets_per_image.gt_classes
)
# Set target attributes of the sampled proposals:
proposals_per_image = proposals_per_image[sampled_idxs]
proposals_per_image.gt_classes = gt_classes
# We index all the attributes of targets that start with "gt_"
# and have not been added to proposals yet (="gt_classes").
if has_gt:
sampled_targets = matched_idxs[sampled_idxs]
# NOTE: here the indexing waste some compute, because heads
# will filter the proposals again (by foreground/background,
# etc), so we essentially index the data twice.
for (
trg_name,
trg_value,
) in targets_per_image.get_fields().items():
if trg_name.startswith(
"gt_"
) and not proposals_per_image.has(trg_name):
proposals_per_image.set(
trg_name, trg_value[sampled_targets]
)
else:
gt_boxes = Boxes(
targets_per_image.gt_boxes.tensor.new_zeros(
(len(sampled_idxs), 4)
)
)
proposals_per_image.gt_boxes = gt_boxes
num_bg_samples.append(
(gt_classes == self.num_classes).sum().item()
)
num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1])
proposals_with_gt.append(proposals_per_image)
# Log the number of fg/bg samples that are selected for training ROI heads
storage = get_event_storage()
storage.put_scalar("roi_head/num_fg_samples", np.mean(num_fg_samples))
storage.put_scalar("roi_head/num_bg_samples", np.mean(num_bg_samples))
return proposals_with_gt
def forward(self, images, features, proposals, targets=None):
"""
Args:
images (ImageList):
features (dict[str: Tensor]): input data as a mapping from feature
map name to tensor. Axis 0 represents the number of images `N` in
the input data; axes 1-3 are channels, height, and width, which may
vary between feature maps (e.g., if a feature pyramid is used).
proposals (list[Instances]): length `N` list of `Instances`s. The i-th
`Instances` contains object proposals for the i-th input image,
with fields "proposal_boxes" and "objectness_logits".
targets (list[Instances], optional): length `N` list of `Instances`s. The i-th
`Instances` contains the ground-truth per-instance annotations
for the i-th input image. Specify `targets` during training only.
It may have the following fields:
- gt_boxes: the bounding box of each instance.
- gt_classes: the label for each instance with a category ranging in [0, #class].
Returns:
results (list[Instances]): length `N` list of `Instances`s containing the
detected instances. Returned during inference only; may be []
during training.
losses (dict[str: Tensor]): mapping from a named loss to a tensor
storing the loss. Used during training only.
"""
raise NotImplementedError()
@ROI_HEADS_REGISTRY.register()
class Res5ROIHeads(ROIHeads):
"""
The ROIHeads in a typical "C4" R-CNN model, where the heads share the
cropping and the per-region feature computation by a Res5 block.
"""
def __init__(self, cfg, input_shape):
super().__init__(cfg, input_shape)
assert len(self.in_features) == 1
# fmt: off
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
pooler_scales = (1.0 / self.feature_strides[self.in_features[0]], )
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
# fmt: on
assert not cfg.MODEL.KEYPOINT_ON
self.pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
self.res5, out_channels = self._build_res5_block(cfg)
output_layer = cfg.MODEL.ROI_HEADS.OUTPUT_LAYER
self.box_predictor = ROI_HEADS_OUTPUT_REGISTRY.get(output_layer)(
cfg, out_channels, self.num_classes, self.cls_agnostic_bbox_reg
)
def _build_res5_block(self, cfg):
# fmt: off
stage_channel_factor = 2 ** 3 # res5 is 8x res2
num_groups = cfg.MODEL.RESNETS.NUM_GROUPS
width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
bottleneck_channels = num_groups * width_per_group * stage_channel_factor
out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS * stage_channel_factor
stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1
norm = cfg.MODEL.RESNETS.NORM
assert not cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE[-1], \
"Deformable conv is not yet supported in res5 head."
# fmt: on
blocks = make_stage(
BottleneckBlock,
3,
first_stride=2,
in_channels=out_channels // 2,
bottleneck_channels=bottleneck_channels,
out_channels=out_channels,
num_groups=num_groups,
norm=norm,
stride_in_1x1=stride_in_1x1,
)
return nn.Sequential(*blocks), out_channels
def _shared_roi_transform(self, features, boxes):
x = self.pooler(features, boxes)
return self.res5(x)
def forward(self, images, features, proposals, targets=None):
"""
See :class:`ROIHeads.forward`.
"""
del images
if self.training:
proposals = self.label_and_sample_proposals(proposals, targets)
del targets
proposal_boxes = [x.proposal_boxes for x in proposals]
box_features = self._shared_roi_transform(
[features[f] for f in self.in_features], proposal_boxes
)
feature_pooled = box_features.mean(dim=[2, 3]) # pooled to 1x1
pred_class_logits, pred_proposal_deltas = self.box_predictor(
feature_pooled
)
del feature_pooled
outputs = FastRCNNOutputs(
self.box2box_transform,
pred_class_logits,
pred_proposal_deltas,
proposals,
self.smooth_l1_beta,
)
if self.training:
del features
losses = outputs.losses()
return [], losses
else:
pred_instances, _ = outputs.inference(
self.test_score_thresh,
self.test_nms_thresh,
self.test_detections_per_img,
)
return pred_instances, {}
@ROI_HEADS_REGISTRY.register()
class StandardROIHeads(ROIHeads):
"""
It's "standard" in a sense that there is no ROI transform sharing
or feature sharing between tasks.
The cropped rois go to separate branches directly.
This way, it is easier to make separate abstractions for different branches.
This class is used by most models, such as FPN and C5.
To implement more models, you can subclass it and implement a different
:meth:`forward()` or a head.
"""
def __init__(self, cfg, input_shape):
super(StandardROIHeads, self).__init__(cfg, input_shape)
self._init_box_head(cfg)
def _init_box_head(self, cfg):
# fmt: off
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_scales = tuple(1.0 / self.feature_strides[k] for k in self.in_features)
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
# fmt: on
# If StandardROIHeads is applied on multiple feature maps (as in FPN),
# then we share the same predictors and therefore the channel counts must be the same
in_channels = [self.feature_channels[f] for f in self.in_features]
# Check all channel counts are equal
assert len(set(in_channels)) == 1, in_channels
in_channels = in_channels[0]
self.box_pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
# Here we split "box head" and "box predictor", which is mainly due to historical reasons.
# They are used together so the "box predictor" layers should be part of the "box head".
# New subclasses of ROIHeads do not need "box predictor"s.
self.box_head = build_box_head(
cfg,
ShapeSpec(
channels=in_channels,
height=pooler_resolution,
width=pooler_resolution,
),
)
output_layer = cfg.MODEL.ROI_HEADS.OUTPUT_LAYER
self.box_predictor = ROI_HEADS_OUTPUT_REGISTRY.get(output_layer)(
cfg,
self.box_head.output_size,
self.num_classes,
self.cls_agnostic_bbox_reg,
)
def forward(self, images, features, proposals, targets=None):
"""
See :class:`ROIHeads.forward`.
"""
del images
if self.training:
proposals = self.label_and_sample_proposals(proposals, targets)
del targets
features_list = [features[f] for f in self.in_features]
if self.training:
losses = self._forward_box(features_list, proposals)
return proposals, losses
else:
pred_instances = self._forward_box(features_list, proposals)
return pred_instances, {}
def _forward_box(self, features, proposals):
"""
Forward logic of the box prediction branch.
Args:
features (list[Tensor]): #level input features for box prediction
proposals (list[Instances]): the per-image object proposals with
their matching ground truth.
Each has fields "proposal_boxes", and "objectness_logits",
"gt_classes", "gt_boxes".
Returns:
In training, a dict of losses.
In inference, a list of `Instances`, the predicted instances.
"""
box_features = self.box_pooler(
features, [x.proposal_boxes for x in proposals]
)
box_features = self.box_head(box_features)
pred_class_logits, pred_proposal_deltas = self.box_predictor(
box_features
)
del box_features
outputs = FastRCNNOutputs(
self.box2box_transform,
pred_class_logits,
pred_proposal_deltas,
proposals,
self.smooth_l1_beta,
)
if self.training:
return outputs.losses()
else:
pred_instances, _ = outputs.inference(
self.test_score_thresh,
self.test_nms_thresh,
self.test_detections_per_img,
)
return pred_instances
@ROI_HEADS_REGISTRY.register()
class StandardROIMultiHeads(StandardROIHeads):
"""
Same as StandardROIHeads but allows for using multiple heads (e.g. different heads for base classes and novel
classes)
"""
def __init__(self, cfg, input_shape):
super(StandardROIMultiHeads, self).__init__(cfg, input_shape)
def _init_box_head(self, cfg):
# fmt: off
self.cpu_device = torch.device("cpu")
self.device = torch.device(cfg.MODEL.DEVICE)
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_scales = tuple(1.0 / self.feature_strides[k] for k in self.in_features)
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
self.num_head_classes = cfg.MODEL.ROI_HEADS.MULTIHEAD_NUM_CLASSES # classes per head
self.num_heads = cfg.MODEL.ROI_BOX_HEAD.NUM_HEADS
# Dataset names because we need the appropriate metadata to obtain the correct class indices for each head!
self.train_dataset_name = cfg.DATASETS.TRAIN[0]
self.test_dataset_name = cfg.DATASETS.TEST[0]
# fmt: on
assert self.num_classes == sum(self.num_head_classes)
# If StandardROIHeads is applied on multiple feature maps (as in FPN),
# then we share the same predictors and therefore the channel counts must be the same
in_channels = [self.feature_channels[f] for f in self.in_features]
# Check all channel counts are equal
assert len(set(in_channels)) == 1, in_channels
in_channels = in_channels[0]
self.box_pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
# Here we split "box head" and "box predictor", which is mainly due to historical reasons.
# They are used together so the "box predictor" layers should be part of the "box head".
# New subclasses of ROIHeads do not need "box predictor"s.
self.box_head = build_box_head( # TODO: probably force 'FastRCNNConvFCMultiHead'?
cfg,
ShapeSpec(
channels=in_channels,
height=pooler_resolution,
width=pooler_resolution,
),
)
output_layer = cfg.MODEL.ROI_HEADS.OUTPUT_LAYER
self.box_predictors = []
bbox_head_output_size = self.box_head.output_size
if self.num_heads > 1:
bbox_head_output_size //= self.num_heads
for i in range(self.num_heads):
box_predictor = ROI_HEADS_OUTPUT_REGISTRY.get(output_layer)(
cfg,
bbox_head_output_size,
self.num_head_classes[i],
self.cls_agnostic_bbox_reg,
)
self.add_module("box_predictor{}".format(i+1), box_predictor)
self.box_predictors.append(box_predictor)
def _get_ind_mappings(self) -> List[Dict]:
# Target indices range from 0 to 'cfg.MODEL.ROI_HEADS.NUM_CLASSES', but we here need, for each head i:
# a mapping from old index to range 0 to 'cfg.MODEL.ROI_HEADS.MULTIHEAD_NUM_CLASSES[i]'
# Expected output: List(dict(int:int)), the list is expected to have one dict per head. Each dict is expected to
# map the large index of a class (from the single head) to the index used on this small head
# Note: don't forget (for each head!) to map the background class (last index, not index 0!) to the last index
# of this head's classes! (use self.num_head_classes[i] to access the amount of classes for head i)
raise NotImplementedError
def _forward_box(self, features, proposals):
"""
Forward logic of the box prediction branch.
Args:
features (list[Tensor]): #level input features for box prediction
proposals (list[Instances]): the per-image object proposals with
their matching ground truth.
Each has fields "proposal_boxes", and "objectness_logits",
"gt_classes", "gt_boxes".
Returns:
In training, a dict of losses.
In inference, a list of `Instances`, the predicted instances.
"""
# pooled features, result size is (e.g. [512, 256, 7, 7])
# [MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE,
# MODEL.FPN.OUT_CHANNELS?,
# MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION,
# MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION]
box_features = self.box_pooler(
features, [x.proposal_boxes for x in proposals]
)
# class-agnostic per-roi feature vectors, same size for each head
# result is a list with '#heads' elements each of size
# [ROI_HEADS.BATCH_SIZE_PER_IMAGE * SOLVER.IMS_PER_BATCH, MODEL.ROI_BOX_HEAD.FC_DIM], e.g. [8192, 1024]
box_features = self.box_head(box_features)
assert len(box_features) == len(self.box_predictors) == self.num_heads, \
"box_features output should match the amount of box predictors: {}, {}"\
.format(len(box_features), len(self.box_predictors))
# class-dependent logits and bbox deltas
class_logits, proposal_deltas = [], []
for i, box_predictor in enumerate(self.box_predictors):
# pred_class_logits = [ROI_HEADS.BATCH_SIZE_PER_IMAGE * SOLVER.IMS_PER_BATCH, num_classes + 1]
# pred_proposal_deltas =
# class-agnostic: [ROI_HEADS.BATCH_SIZE_PER_IMAGE * SOLVER.IMS_PER_BATCH, 4]
# non cag: [ROI_HEADS.BATCH_SIZE_PER_IMAGE * SOLVER.IMS_PER_BATCH, 4 x num_classes] Note: not num_classes + 1!
pred_class_logits, pred_proposal_deltas = box_predictor(box_features[i])
class_logits.append(pred_class_logits)
proposal_deltas.append(pred_proposal_deltas)
del box_features
# Assumptions:
# - 'box_features'-output from box_head is class-agnostic (ans same-sized for each head!), we can't do anything
# there!
# - we use those features to obtain class-dependent activations (correct amount of target size is ensured by
# each 'predictor head'!
# - for softmax calculation, we have to compare those activations against targets, which we obtain from
# the variable 'proposals', which contains objectness score (from RPN) and gt-class (and gt-boxes)
# - those gt-data from the variable 'proposals' uses all available classes (and thus indices from
# 0 to num_classes), we then need to transform those indices to appropriate indices for each head (and need
# to remember which number we mapped to which number at which head because each single head expects numbers
# or indices starting by 0, so our mapping destroys the unique numbers!
# - we now have multiple possibilities what to do with our proposals: first of all, we decide to merge classes
# after softmax and to not merge the activations before the softmax. This would allow to skip the
# index-mapping but would also cause another problem: since each head produces background logits ans the
# final head, applying softmax on activations of all classes together just expects a single background class,
# so which of the background activations to choose and which to discard? This is a non-trivial problem and
# because of that, we choose to first apply softmax to each head and then merging the resulting class
# probabilities. We now assume wlog (without loss of generality) that we have batch-size 16 and using
# 512 rois per batch yielding 8192 rois per batch (after roi pooling)
# - we could now take the Proposals and split them, depending on the target classes. In addition to this
# technique, we would probably want to use the background class activations for each head. If we think this
# idea a while further, we note that splitting of proposals into different heads does not make sense.
# We first note that each head i itself produces [8192, num_classes[i] + 1] classification logits because
# each head obtains 8192 rois as input (because the classification head splits after roi pooling, therefore
# each head encounters the same amount of input). For that matter, we either have to remove objects
# belonging to non-target classes at both sides, at feature side (class and box logits from the predictor)
# and at proposal-side (proposals from the RPN where GT-class is known), while keeping background class
# logits at EACH head.
# - another, and possibly more sophisticated, yet more simple, approach would be to use all proposals for
# each head with a little need in modification: at each head, change the target-class (gt-class) of the
# proposals for non-target classes of this head (not counting background class!) to 0. This means,
# non-target classes equal the background class.
# (Note: at Detectron2, the Background class is not the class with first index (0), but the class with
# last index (|num_classes|)!)
# Note: For training, we don't have to transform the indices back to the original indices because we're
# just interested in the loss which is automatically calculated correctly since the produced logits are
# yet in the correct shape and the adjusted class indices are automatically transferred into one-hot
# vectors for the classification loss (e.g. Cross Entropy). Therefore, we do not need back-transformation
# because we're done after calculating the loss.
# - Inference: In contrast to training, we (of course) have not gt-annotations, therefore we cannot prepare or
# adjust the class of proposals. We don't even have to because we don't want to calculate losses. In contrast
# to the training however, we now need postprocessing of predicted classes after having calculated softmax
# probabilities because we need to know which class belongs to the highest probability for each proposal.
# In contrast to single-heads, we now have #heads predictions for each proposal because we input ALL
# proposals to each head. This could be problematic if we think of a case where for a single proposal one
# head produces a medium high confidence for an actual class (not background) and another head outputs high
# background confidence for that proposal (because it learned the target classes from different head as
# background class for itself). Probably this problem isn't an actual issue because the "Fast-RCNN"-Head
# won't output bbox predictions for Background class which would leave us with just a single valid prediction
# for that proposal (with medium confidence).
# Proposals: contains 'SOLVER.IMS_PER_BATCH' elements of type detectron2.structures.Instances
# Access elements of list 'proposals' with indices.
# Access the elements of 'Instances' with '.get_fields()', or directly with '.tensor'
# Access the tensor (which 'Boxes' wraps) with boxes.tensor
# Note: Boxes supports __getitem__ using slices, indices, boolean arrays, etc.)
# Each Instance contains following fields of following sizes:
# 'proposal_boxes': detectron2.structures.Boxes(tensor) of size [MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE, 4]
# 'objectness_logits': tensor of size [MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE]
# 'gt_classes': tensor of size [MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE]
# 'gt_boxes': detectron2.structures.Boxes(tensor) of size [MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE, 4]
# 'Proposals': (objectness logits + gt classes)
# 'box_features': (pooled roi feature put forward though the net)
# Algorithm:
# (Synopsis: we use ALL proposals for each head and just need to set non-target classes to 0 (==Background))
# 1. For Training, for each head i
heads_proposals = []
if self.training:
all_inds_to_head_inds_list = self._get_ind_mappings()
# 1.1 For each head i
for i in range(len(self.box_predictors)):
# 1.1.1 Take a copy of all Proposals, take the target categories of head i
# list of #ROI_HEADS.BATCH_SIZE_PER_IMAGE Proposal-objects, each comprising
# ROI_HEADS.BATCH_SIZE_PER_IMAGE proposals
tmp_proposals = copy.deepcopy(proposals)
all_inds_to_head_inds = all_inds_to_head_inds_list[i]
all_bg_cls = self.num_classes
head_bg_cls = self.num_head_classes[i]
assert all_bg_cls in all_inds_to_head_inds and all_inds_to_head_inds[all_bg_cls] == head_bg_cls
head_targets = list(all_inds_to_head_inds.keys())
# Note: as of 'fast_rcnn'-doc, [0, num_cls) are foreground and |num_cls| is background!
for instances in tmp_proposals:
gt_classes = instances.gt_classes # ==instances.get_fields()['gt_classes']
# 1.1.2 Set the class of the j-th proposal to background class if its not a target class
# TODO: not sure about copying the tensor to host memory but torch currently does not support
# the 'isin' function on its own...
bg_indices = np.isin(gt_classes.to(self.cpu_device), head_targets, invert=True).nonzero()
# using "all classes" background class, which is later transformed to appropriate background
# class for this head
gt_classes[bg_indices] = all_bg_cls
# 1.1.3 If proposal j is a proposal for a target class, transform its class to range
# [0, num_classes[i]]
# Note: apply_ may only be used for cpu-tensors!, so we have move it to cpu temporarily
# TODO: 'apply_' might be slow since it's not easily parallelisable
gt_classes = gt_classes.to(self.cpu_device) # move to CPU temporarily
gt_classes.apply_(lambda x: all_inds_to_head_inds[x]) # apply_ works inplace!
instances.gt_classes = gt_classes.to(self.device) # move back to GPU and override object attribute
heads_proposals.append(tmp_proposals)
else:
# 2.1 Pass all proposals to all heads
for i in range(len(self.box_predictors)):
heads_proposals.append(proposals)
# Initialize 'FastRCNNOutputs'-object, nothing more!
heads_outputs = []
for i in range(len(self.box_predictors)):
heads_outputs.append(
FastRCNNOutputs(
self.box2box_transform,
class_logits[i],
proposal_deltas[i],
heads_proposals[i],
self.smooth_l1_beta,
)
)
if self.training:
# calculate losses e.g.
# 'softmax cross entropy' on pred_class_logits ("loss_cls": self.softmax_cross_entropy_loss())
# 'smooth L1 loss' on pred_proposal_deltas ("loss_box_reg": self.smooth_l1_loss())
# Note: we don't need to transform any classes back to previous range because we're just interested in the
# loss. The gt-class (index in range of each head's target classes) will be automatically transformed to a
# one-hot vector which is sufficient to calculate the loss at each output neuron for each target class.
# We would just need to transform the categories back of we were interested in the name of each detection's
# class (as we are for inference).
losses_dicts = {}
for i, outputs in enumerate(heads_outputs):
losses_dict = outputs.losses()
for k, v in losses_dict.items():
losses_dicts[str(k) + "_" + str(i+1)] = v
del losses_dict
return losses_dicts
else:
pred_instances = []
all_inds_to_head_inds_list = self._get_ind_mappings()
for i, outputs in enumerate(heads_outputs):
tmp_pred_instances, _ = outputs.inference(
self.test_score_thresh,
self.test_nms_thresh,
self.test_detections_per_img, # TODO: problem in multi-head: detections_per_image_per_head?
)
# 2.2 After softmax, transform class of proposals back to range [0, all_classes]
all_inds_to_head_inds = all_inds_to_head_inds_list[i]
head_ind_to_ind = {v: k for k, v in all_inds_to_head_inds.items()}
# 'tmp_pred_instances' is a list of 'Instances'-objects, one object for each image
for instances in tmp_pred_instances:
# Note: at inference, this method is called once for each image, thus, |proposals| == 1
# probably it is problematic to add one 'Instances'-object per head since the returned list has
# twice the size as expected, probably, the remaining objects in the list are ignored!
# slow but ok for inference.
pred_classes = instances.pred_classes.to(self.cpu_device) # move to cpu because of method 'apply_'
pred_classes.apply_(lambda x: head_ind_to_ind[x]) # element-wise inplace transformation
instances.pred_classes = pred_classes.to(self.device) # move back to gpu and set object attribute
pred_instances.append(tmp_pred_instances)
# num images == len(proposals), where 'proposals' is the same in the list 'heads_proposals'
# pred_instances = [num_heads, num_images], but we need [num images]
# [num_heads, num_images] -> [num_images, num_heads], then concatenate all 'Instances'-objects for a single
# image
return [Instances.cat(list(x)) for x in zip(*pred_instances)]
@ROI_HEADS_REGISTRY.register()
class StandardROIDoubleHeads(StandardROIMultiHeads):
"""
Same as StandardROIMultiHeads but using exactly two heads (for base classes and novel classes)
"""
def __init__(self, cfg, input_shape):
super(StandardROIDoubleHeads, self).__init__(cfg, input_shape)
assert self.num_heads == 2, "To use Double-Head set num_heads to 2!"
assert self.box_head.split_at_fc == 2, \
"Current ckpt_surgery requires a fixed amount of fc layers as well as a firm split index of 2!"
def _get_ind_mappings(self):
dataset = self.train_dataset_name if self.training else self.test_dataset_name # classes should normally be the same...
metadata = MetadataCatalog.get(dataset)
# For now, we use this kind of head solely for fine-tuning
assert hasattr(metadata, 'novel_dataset_id_to_contiguous_id')
assert hasattr(metadata, 'base_dataset_id_to_contiguous_id')
all_id_to_inds = metadata.thing_dataset_id_to_contiguous_id
base_id_to_inds = metadata.base_dataset_id_to_contiguous_id
novel_id_to_inds = metadata.novel_dataset_id_to_contiguous_id
all_inds_to_base_inds = {v: base_id_to_inds[k] for k, v in all_id_to_inds.items() if k in base_id_to_inds.keys()}
all_inds_to_novel_inds = {v: novel_id_to_inds[k] for k, v in all_id_to_inds.items() if k in novel_id_to_inds.keys()}
# For each head, add a mapping from old background class index to each head's background class index
all_bg_ind = len(all_id_to_inds)
base_bg_ind = len(base_id_to_inds)
novel_bg_ind = len(novel_id_to_inds)
assert all_bg_ind not in all_id_to_inds.values()
assert base_bg_ind not in base_id_to_inds.values()
assert novel_bg_ind not in novel_id_to_inds.values()
all_inds_to_base_inds[all_bg_ind] = base_bg_ind
all_inds_to_novel_inds[all_bg_ind] = novel_bg_ind
return [all_inds_to_base_inds, all_inds_to_novel_inds]
```
#### File: Jonas-Meier/FrustratinglySimpleFsDet/wrapper_base_training.py
```python
import os
def main():
dataset = "coco" # coco, isaid
coco_class_split = "voc_nonvoc" # voc_nonvoc, none_all
isaid_class_split = "vehicle_nonvehicle" # vehicle_nonvehicle, none_all, experiment1, experiment2, experiment3
gpu_ids = [0]
num_threads = 2 # two threads seem to be a bit faster than just one, but four threads are as fast as two threads!
bs = 16
lr = 0.02 # 0.02 for bs=16. Set to -1 for automatic linear scaling!
layers = 50 # 50, 101
override_config = True
if dataset == "coco":
class_split = coco_class_split
elif dataset == "isaid":
class_split = isaid_class_split
else:
raise ValueError("Unknown dataset: {}".format(dataset))
run_base_training(dataset, class_split, gpu_ids, num_threads, layers, bs, lr, override_config)
def run_base_training(dataset, class_split, gpu_ids, num_threads, layers, bs, lr=-1.0, override_config=False):
base_cmd = "python3 -m tools.run_base_training"
override_config_str = ' --override-config' if override_config else ''
cmd = "{} --dataset {} --class-split {} --gpu-ids {} --num-threads {} --layers {} --bs {} --lr {}{}"\
.format(base_cmd, dataset, class_split, separate(gpu_ids, ' '), num_threads, layers, bs, lr, override_config_str)
os.system(cmd)
def separate(elements, separator):
res = ''
if not isinstance(elements, (list, tuple)):
return str(elements)
assert len(elements) > 0, "need at least one element in the collection {}!".format(elements)
if len(elements) == 1:
return str(elements[0])
for element in elements:
res += '{}{}'.format(str(element), separator)
return res[:-1] # remove trailing separator
if __name__ == '__main__':
main()
``` |
{
"source": "jonas-meng/EthereumMiddleware",
"score": 3
} |
#### File: jonas-meng/EthereumMiddleware/mockchain.py
```python
import collections
import random
import json
import hashlib
def hexhash(x):
return '0x' + hashlib.sha224(str(x)).hexdigest()[:6]
TransferEvent = collections.namedtuple('TransferEvent', 'sender, receiver, amount')
class Accounts(object):
initial_supply = 0
def __init__(self, num_accounts=0, copy_from=None):
self.balances = dict()
if copy_from:
self.balances = copy_from.balances.copy()
else:
self._gen_accounts(num_accounts)
self.initial_supply = self.supply
def _gen_accounts(self, num):
for i in range(num):
k = hexhash(i)
v = random.randint(1, 100)
self.balances[k] = v
@property
def supply(self):
return sum(self.balances.values())
def median(self):
return sorted(self.balances.values())[len(self.balances) / 2]
def transfer(self, sender, receiver, amount):
self.balances[sender] -= amount
self.balances[receiver] += amount
assert self.supply == self.initial_supply
def random_transfer(self):
"generates a valid random transfer"
while True:
sender = random.choice(self.balances.keys())
if not self.balances[sender]:
continue
receiver = random.choice(self.balances.keys())
if sender == receiver:
continue
amount = random.randint(1, self.balances[sender])
self.transfer(sender, receiver, amount)
return TransferEvent(sender, receiver, amount)
class Block(object):
def __init__(self, prevblock=None, num_accounts=0):
if not prevblock: # genesis block
self.accounts = Accounts(num_accounts=num_accounts)
self.prevhash = hexhash(-1)
self.number = 0
else:
self.accounts = Accounts(copy_from=prevblock.accounts)
self.number = prevblock.number + 1
self.prevhash = prevblock.hash
self.transfers = []
self.prevblock = prevblock
def copy_transfers(self, other, fraction=0.5):
assert isinstance(other, Block)
for t in other.transfers[:int(len(other.transfers) * fraction)]:
self.transfers.append(t)
self.accounts.transfer(t.sender, t.receiver, t.amount)
@property
def hash(self):
return hexhash(repr(self.__dict__))
def random_transfers(self, num):
for i in range(num):
self.transfers.append(self.accounts.random_transfer())
def serialize(self, include_balances=False):
s = dict(number=self.number,
hash=self.hash,
prevhash=self.prevhash,
transfers=[dict(x._asdict()) for x in self.transfers]
)
if include_balances or self.number == 0:
s['balances'] = self.accounts.balances
return s
def gen_chain(height, p_revert, num_accounts, max_transfers):
head = Block(num_accounts=num_accounts)
chain = [head]
while head.number < height:
if head.number > 0 and random.random() < p_revert:
head = head.prevblock
else:
head = Block(prevblock=head)
# check if there is a sibling (same block height)
if len(chain) > 2 and chain[-2].number == head.number:
sibling = chain[-2]
# include some of its txs
head.copy_transfers(sibling, 0.5)
head.random_transfers(random.randint(0, max_transfers / 2))
else:
head.random_transfers(random.randint(0, max_transfers))
chain.append(head)
return chain
def longest_revert(chain):
heighest = 0
longest = 0
for block in chain:
heighest = max(heighest, block.number)
longest = max(longest, heighest - block.number)
return longest
random.seed(43)
chain = gen_chain(height=10, p_revert=0.5, num_accounts=100, max_transfers=10)
serialized_blocks = [b.serialize(include_balances=False) for b in chain]
# random.shuffle(serialized_blocks)
print json.dumps(serialized_blocks, indent=4, sort_keys=True)
print 'blocks: {} max reverted:{}'.format(len(chain), longest_revert(chain))
txs = []
for block in set(chain):
txs.extend(block.transfers)
print 'total transfers:{} unique transfers:{}'.format(len(txs), len(set(txs)))
``` |
{
"source": "JonasMht/Sensha-Game-2019",
"score": 3
} |
#### File: Sensha-Game-2019/sensha_uncompiled_version05-06-2019/hangar.py
```python
from sprites import *
"""
Fonction constructor
- Fonctionnement : utilise dans la classe Vehicle et Base, elle permet de definir la struture des differents tanks en ajoutant des composants a la bibliotheque frame
"""
def constructor(self, name):
# Turret: glob, image_file, fire_range, fire_rate, rot_speed, pos_adj, layer, projectile_image, projectile_speed, projectile_damage, fire_sound, trail_anim, explosion_anim, explosion_sound
if name == "Rover_1":
self.frame['body'].append(Body(self, self.glob.sprite["obj"][2], 3))
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][41], 3))
self.frame['turret'][0].fire_range = 0.3 * self.glob.data["screen_width"]
self.frame['turret'][0].fire_rate = 2.3
self.frame['turret'][0].rot_speed = 2
self.frame['turret'][0].salvo = 3
self.frame['turret'][0].salvo_interval = 0.2
self.frame['turret'][0].pos_adj = vec(-0.002,0) * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_image = self.glob.sprite["obj"][59]
self.frame['turret'][0].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_damage = 10
self.frame['turret'][0].fire_sound = self.glob.sounds[2]
self.frame['turret'][0].trail_anim = "None"
self.frame['turret'][0].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][0].explosion_sound = self.glob.sounds[1]
self.hp = 150
self.max_acc = (0.25)
self.detection_range = (0.5) * self.glob.data["screen_width"]
self.hold_range = (0.2) * self.glob.data["screen_width"]
self.rot_speed = 1
if name == "Rover_2":
self.frame['body'].append(Body(self, self.glob.sprite["obj"][3], 3))
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][42], 3))
self.frame['turret'][0].fire_range = 0.35 * self.glob.data["screen_width"]
self.frame['turret'][0].fire_rate = 2
self.frame['turret'][0].rot_speed = 2
self.frame['turret'][0].salvo = 6
self.frame['turret'][0].salvo_interval = 0.2
self.frame['turret'][0].pos_adj = vec(-0.004,0) * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_image = self.glob.sprite["obj"][59]
self.frame['turret'][0].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_damage = 15
self.frame['turret'][0].fire_sound = self.glob.sounds[2]
self.frame['turret'][0].trail_anim = "None"
self.frame['turret'][0].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][0].explosion_sound = self.glob.sounds[1]
self.hp = 200
self.max_acc = (0.27)
self.detection_range = (0.5) * self.glob.data["screen_width"]
self.hold_range = (0.25) * self.glob.data["screen_width"]
self.rot_speed = 1
if name == "Rover_3":
self.frame['body'].append(Body(self, self.glob.sprite["obj"][4], 3))
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][43], 3))
self.frame['turret'][0].fire_range = 0.4 * self.glob.data["screen_width"]
self.frame['turret'][0].fire_rate = 2.2
self.frame['turret'][0].rot_speed = 2
self.frame['turret'][0].salvo = 4
self.frame['turret'][0].salvo_interval = 0.2
self.frame['turret'][0].pos_adj = vec(-0.004,0) * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_image = self.glob.sprite["obj"][59]
self.frame['turret'][0].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_damage = 20
self.frame['turret'][0].fire_sound = self.glob.sounds[2]
self.frame['turret'][0].trail_anim = "None"
self.frame['turret'][0].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][0].explosion_sound = self.glob.sounds[1]
self.hp = 400
self.max_acc = (0.29)
self.detection_range = (0.5) * self.glob.data["screen_width"]
self.hold_range = (0.3) * self.glob.data["screen_width"]
self.rot_speed = 1
if name == "Rover_4":
self.frame['body'].append(Body(self, self.glob.sprite["obj"][5], 3))
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][39], 3))
self.frame['turret'][0].fire_range = 0.4 * self.glob.data["screen_width"]
self.frame['turret'][0].fire_rate = 2
self.frame['turret'][0].rot_speed = 2
self.frame['turret'][0].salvo = 8
self.frame['turret'][0].salvo_interval = 0.1
self.frame['turret'][0].pos_adj = vec(0.015,0) * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_image = self.glob.sprite["obj"][60]
self.frame['turret'][0].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_damage = 10
self.frame['turret'][0].fire_sound = self.glob.sounds[2]
self.frame['turret'][0].trail_anim = "None"
self.frame['turret'][0].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][0].explosion_sound = self.glob.sounds[1]
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][44], 3))
self.frame['turret'][1].fire_range = 0.5 * self.glob.data["screen_width"]
self.frame['turret'][1].fire_rate = 2
self.frame['turret'][1].rot_speed = 3
self.frame['turret'][1].salvo = 6
self.frame['turret'][1].salvo_interval = 0.2
self.frame['turret'][1].pos_adj = vec(-0.005,0) * self.glob.data["screen_width"]
self.frame['turret'][1].projectile_image = self.glob.sprite["obj"][59]
self.frame['turret'][1].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][1].projectile_damage = 25
self.frame['turret'][1].fire_sound = self.glob.sounds[2]
self.frame['turret'][1].trail_anim = "None"
self.frame['turret'][1].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][1].explosion_sound = self.glob.sounds[1]
self.hp = 750
self.max_acc = (0.31)
self.detection_range = (0.6) * self.glob.data["screen_width"]
self.hold_range = (0.4) * self.glob.data["screen_width"]
self.rot_speed = 1
if name == "Rover_5":
self.frame['body'].append(Body(self, self.glob.sprite["obj"][6], 3))
self.frame['turret'].append(Launcher(self, self.glob.sprite["obj"][40], 3))
self.frame['turret'][0].fire_range = 0.4 * self.glob.data["screen_width"]
self.frame['turret'][0].fire_rate = 2.5
self.frame['turret'][0].salvo = 1
self.frame['turret'][0].salvo_interval = 0.2
self.frame['turret'][0].rot_speed = 2
self.frame['turret'][0].pos_adj = vec(0.01,0) * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_image = self.glob.sprite["obj"][68]
self.frame['turret'][0].projectile_speed = 0.005 * self.glob.data["screen_width"]
self.frame['turret'][0].missile_rot_speed = 4
self.frame['turret'][0].projectile_damage = 15
self.frame['turret'][0].fire_sound = self.glob.sounds[3]
self.frame['turret'][0].trail_anim = self.glob.sprite['anim_vapour_trail']
self.frame['turret'][0].explosion_anim = self.glob.sprite['anim_yellow_exp']
self.frame['turret'][0].explosion_sound = self.glob.sounds[1]
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][45], 3))
self.frame['turret'][1].fire_range = 0.5 * self.glob.data["screen_width"]
self.frame['turret'][1].fire_rate = 3
self.frame['turret'][1].rot_speed = 3
self.frame['turret'][1].salvo = 6
self.frame['turret'][1].salvo_interval = 0.2
self.frame['turret'][1].pos_adj = vec(-0.01,0) * self.glob.data["screen_width"]
self.frame['turret'][1].projectile_image = self.glob.sprite["obj"][59]
self.frame['turret'][1].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][1].projectile_damage = 30
self.frame['turret'][1].fire_sound = self.glob.sounds[2]
self.frame['turret'][1].trail_anim = "None"
self.frame['turret'][1].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][1].explosion_sound = self.glob.sounds[1]
self.hp = 1000
self.max_acc = (0.33)
self.detection_range = (0.6) * self.glob.data["screen_width"]
self.hold_range = (0.4) * self.glob.data["screen_width"]
self.rot_speed = 2
if name == "Rocket_1":
self.frame['body'].append(Body(self, self.glob.sprite["obj"][7], 3))
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][39], 3))
self.frame['turret'][0].fire_range = 0.4 * self.glob.data["screen_width"]
self.frame['turret'][0].fire_rate = 2
self.frame['turret'][0].rot_speed = 2
self.frame['turret'][0].salvo = 3
self.frame['turret'][0].salvo_interval = 0.4
self.frame['turret'][0].pos_adj = vec(0.003,0) * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_image = self.glob.sprite["obj"][60]
self.frame['turret'][0].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_damage = 15
self.frame['turret'][0].fire_sound = self.glob.sounds[2]
self.frame['turret'][0].trail_anim = "None"
self.frame['turret'][0].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][0].explosion_sound = self.glob.sounds[1]
self.frame['turret'].append(Launcher(self, self.glob.sprite["obj"][46], 3))
self.frame['turret'][1].fire_range = 0.5 * self.glob.data["screen_width"]
self.frame['turret'][1].fire_rate = 3.5
self.frame['turret'][1].salvo = 2
self.frame['turret'][1].salvo_interval = 0.2
self.frame['turret'][1].rot_speed = 2
self.frame['turret'][1].pos_adj = vec(-0.0085,0) * self.glob.data["screen_width"]
self.frame['turret'][1].projectile_image = self.glob.sprite["obj"][68]
self.frame['turret'][1].projectile_speed = 0.005 * self.glob.data["screen_width"]
self.frame['turret'][1].missile_rot_speed = 2.5
self.frame['turret'][1].projectile_damage = 35
self.frame['turret'][1].fire_sound = self.glob.sounds[3]
self.frame['turret'][1].trail_anim = self.glob.sprite['anim_vapour_trail']
self.frame['turret'][1].explosion_anim = self.glob.sprite['anim_yellow_exp']
self.frame['turret'][1].explosion_sound = self.glob.sounds[1]
self.hp = 100
self.max_acc = (0.2)
self.detection_range = (0.6) * self.glob.data["screen_width"]
self.hold_range = (0.4) * self.glob.data["screen_width"]
self.rot_speed = 2
if name == "Rocket_2":
self.frame['body'].append(Body(self, self.glob.sprite["obj"][8], 3))
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][39], 3))
self.frame['turret'][0].fire_range = 0.4 * self.glob.data["screen_width"]
self.frame['turret'][0].fire_rate = 2
self.frame['turret'][0].rot_speed = 2
self.frame['turret'][0].salvo = 3
self.frame['turret'][0].salvo_interval = 0.3
self.frame['turret'][0].pos_adj = vec(0.003,0) * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_image = self.glob.sprite["obj"][60]
self.frame['turret'][0].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_damage = 25
self.frame['turret'][0].fire_sound = self.glob.sounds[2]
self.frame['turret'][0].trail_anim = "None"
self.frame['turret'][0].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][0].explosion_sound = self.glob.sounds[1]
self.frame['turret'].append(Launcher(self, self.glob.sprite["obj"][47], 3))
self.frame['turret'][1].fire_range = 0.6 * self.glob.data["screen_width"]
self.frame['turret'][1].fire_rate = 4.0
self.frame['turret'][1].salvo = 3
self.frame['turret'][1].salvo_interval = 0.2
self.frame['turret'][1].rot_speed = 2
self.frame['turret'][1].pos_adj = vec(-0.0085,0) * self.glob.data["screen_width"]
self.frame['turret'][1].projectile_image = self.glob.sprite["obj"][68]
self.frame['turret'][1].projectile_speed = 0.005 * self.glob.data["screen_width"]
self.frame['turret'][1].missile_rot_speed = 3
self.frame['turret'][1].projectile_damage = 45
self.frame['turret'][1].fire_sound = self.glob.sounds[3]
self.frame['turret'][1].trail_anim = self.glob.sprite['anim_vapour_trail']
self.frame['turret'][1].explosion_anim = self.glob.sprite['anim_yellow_exp']
self.frame['turret'][1].explosion_sound = self.glob.sounds[1]
self.hp = 150
self.max_acc = (0.2)
self.detection_range = (0.7) * self.glob.data["screen_width"]
self.hold_range = (0.5) * self.glob.data["screen_width"]
self.rot_speed = 2
if name == "Rocket_3":
self.frame['body'].append(Body(self, self.glob.sprite["obj"][9], 3))
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][39], 3))
self.frame['turret'][0].fire_range = 0.5 * self.glob.data["screen_width"]
self.frame['turret'][0].fire_rate = 2
self.frame['turret'][0].rot_speed = 2
self.frame['turret'][0].salvo = 3
self.frame['turret'][0].salvo_interval = 0.2
self.frame['turret'][0].pos_adj = vec(0.003,0) * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_image = self.glob.sprite["obj"][60]
self.frame['turret'][0].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_damage = 30
self.frame['turret'][0].fire_sound = self.glob.sounds[2]
self.frame['turret'][0].trail_anim = "None"
self.frame['turret'][0].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][0].explosion_sound = self.glob.sounds[1]
self.frame['turret'].append(Launcher(self, self.glob.sprite["obj"][48], 3))
self.frame['turret'][1].fire_range = 0.7 * self.glob.data["screen_width"]
self.frame['turret'][1].fire_rate = 5.0
self.frame['turret'][1].salvo = 4
self.frame['turret'][1].salvo_interval = 0.2
self.frame['turret'][1].rot_speed = 2
self.frame['turret'][1].pos_adj = vec(-0.0085,0) * self.glob.data["screen_width"]
self.frame['turret'][1].projectile_image = self.glob.sprite["obj"][68]
self.frame['turret'][1].projectile_speed = 0.005 * self.glob.data["screen_width"]
self.frame['turret'][1].missile_rot_speed = 3.5
self.frame['turret'][1].projectile_damage = 50
self.frame['turret'][1].fire_sound = self.glob.sounds[3]
self.frame['turret'][1].trail_anim = self.glob.sprite['anim_vapour_trail']
self.frame['turret'][1].explosion_anim = self.glob.sprite['anim_yellow_exp']
self.frame['turret'][1].explosion_sound = self.glob.sounds[1]
self.hp = 200
self.max_acc = (0.2)
self.detection_range = (0.8) * self.glob.data["screen_width"]
self.hold_range = (0.65) * self.glob.data["screen_width"]
self.rot_speed = 2
if name == "Rocket_4":
self.frame['body'].append(Body(self, self.glob.sprite["obj"][10], 3))
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][39], 3))
self.frame['turret'][0].fire_range = 0.5 * self.glob.data["screen_width"]
self.frame['turret'][0].fire_rate = 2
self.frame['turret'][0].rot_speed = 2
self.frame['turret'][0].salvo = 4
self.frame['turret'][0].salvo_interval = 0.2
self.frame['turret'][0].pos_adj = vec(0.003,0) * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_image = self.glob.sprite["obj"][60]
self.frame['turret'][0].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_damage = 35
self.frame['turret'][0].fire_sound = self.glob.sounds[2]
self.frame['turret'][0].trail_anim = "None"
self.frame['turret'][0].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][0].explosion_sound = self.glob.sounds[1]
self.frame['turret'].append(Launcher(self, self.glob.sprite["obj"][49], 3))
self.frame['turret'][1].fire_range = 0.75 * self.glob.data["screen_width"]
self.frame['turret'][1].fire_rate = 6.0
self.frame['turret'][1].salvo = 5
self.frame['turret'][1].salvo_interval = 0.2
self.frame['turret'][1].rot_speed = 2
self.frame['turret'][1].pos_adj = vec(-0.0085,0) * self.glob.data["screen_width"]
self.frame['turret'][1].projectile_image = self.glob.sprite["obj"][68]
self.frame['turret'][1].projectile_speed = 0.005 * self.glob.data["screen_width"]
self.frame['turret'][1].missile_rot_speed = 4
self.frame['turret'][1].projectile_damage = 50
self.frame['turret'][1].fire_sound = self.glob.sounds[3]
self.frame['turret'][1].trail_anim = self.glob.sprite['anim_vapour_trail']
self.frame['turret'][1].explosion_anim = self.glob.sprite['anim_yellow_exp']
self.frame['turret'][1].explosion_sound = self.glob.sounds[1]
self.hp = 250
self.max_acc = (0.2)
self.detection_range = (0.8) * self.glob.data["screen_width"]
self.hold_range = (0.7) * self.glob.data["screen_width"]
self.rot_speed = 2
if name == "Panther_1":
self.frame['body'].append(Body(self, self.glob.sprite["obj"][11], 3))
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][50], 3))
self.frame['turret'][0].fire_range = 0.3 * self.glob.data["screen_width"]
self.frame['turret'][0].fire_rate = 2
self.frame['turret'][0].rot_speed = 2
self.frame['turret'][0].salvo = 1
self.frame['turret'][0].salvo_interval = 0.4
self.frame['turret'][0].pos_adj = vec(0.0,0) * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_image = self.glob.sprite["obj"][61]
self.frame['turret'][0].projectile_speed = 0.005 * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_damage = 30
self.frame['turret'][0].fire_sound = self.glob.sounds[2]
self.frame['turret'][0].trail_anim = self.glob.sprite['anim_bullet_flame']
self.frame['turret'][0].explosion_anim = self.glob.sprite['anim_energy_leak_exp']
self.frame['turret'][0].explosion_sound = self.glob.sounds[1]
self.hp = 350
self.max_acc = (0.2)
self.detection_range = (0.5) * self.glob.data["screen_width"]
self.hold_range = (0.25) * self.glob.data["screen_width"]
self.rot_speed = 2
if name == "Panther_2":
self.frame['body'].append(Body(self, self.glob.sprite["obj"][12], 3))
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][51], 3))
self.frame['turret'][0].fire_range = 0.35 * self.glob.data["screen_width"]
self.frame['turret'][0].fire_rate = 2
self.frame['turret'][0].rot_speed = 2
self.frame['turret'][0].salvo = 1
self.frame['turret'][0].salvo_interval = 0.4
self.frame['turret'][0].pos_adj = vec(0.0,0) * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_image = self.glob.sprite["obj"][61]
self.frame['turret'][0].projectile_speed = 0.005 * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_damage = 45
self.frame['turret'][0].fire_sound = self.glob.sounds[2]
self.frame['turret'][0].trail_anim = self.glob.sprite['anim_bullet_flame']
self.frame['turret'][0].explosion_anim = self.glob.sprite['anim_energy_leak_exp']
self.frame['turret'][0].explosion_sound = self.glob.sounds[1]
self.hp = 500
self.max_acc = (0.21)
self.detection_range = (0.5) * self.glob.data["screen_width"]
self.hold_range = (0.3) * self.glob.data["screen_width"]
self.rot_speed = 2
if name == "Panther_3":
self.frame['body'].append(Body(self, self.glob.sprite["obj"][13], 3))
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][52], 3))
self.frame['turret'][0].fire_range = 0.4 * self.glob.data["screen_width"]
self.frame['turret'][0].fire_rate = 2
self.frame['turret'][0].rot_speed = 2
self.frame['turret'][0].salvo = 1
self.frame['turret'][0].salvo_interval = 0.4
self.frame['turret'][0].pos_adj = vec(0.0,0) * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_image = self.glob.sprite["obj"][61]
self.frame['turret'][0].projectile_speed = 0.005 * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_damage = 65
self.frame['turret'][0].fire_sound = self.glob.sounds[2]
self.frame['turret'][0].trail_anim = self.glob.sprite['anim_bullet_flame']
self.frame['turret'][0].explosion_anim = self.glob.sprite['anim_energy_leak_exp']
self.frame['turret'][0].explosion_sound = self.glob.sounds[1]
self.hp = 750
self.max_acc = (0.22)
self.detection_range = (0.6) * self.glob.data["screen_width"]
self.hold_range = (0.35) * self.glob.data["screen_width"]
self.rot_speed = 2
if name == "Panther_4":
self.frame['body'].append(Body(self, self.glob.sprite["obj"][14], 3))
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][53], 3))
self.frame['turret'][0].fire_range = 0.45 * self.glob.data["screen_width"]
self.frame['turret'][0].fire_rate = 2
self.frame['turret'][0].rot_speed = 2
self.frame['turret'][0].salvo = 2
self.frame['turret'][0].salvo_interval = 0.4
self.frame['turret'][0].pos_adj = vec(0.0,0) * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_image = self.glob.sprite["obj"][61]
self.frame['turret'][0].projectile_speed = 0.005 * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_damage = 85
self.frame['turret'][0].fire_sound = self.glob.sounds[2]
self.frame['turret'][0].trail_anim = self.glob.sprite['anim_bullet_flame']
self.frame['turret'][0].explosion_anim = self.glob.sprite['anim_energy_leak_exp']
self.frame['turret'][0].explosion_sound = self.glob.sounds[1]
self.hp = 1500
self.max_acc = (0.23)
self.detection_range = (0.6) * self.glob.data["screen_width"]
self.hold_range = (0.4) * self.glob.data["screen_width"]
self.rot_speed = 2
if name == "Flak_1":
self.frame['body'].append(Body(self, self.glob.sprite["obj"][11], 3))
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][54], 3))
self.frame['turret'][0].fire_range = 0.3 * self.glob.data["screen_width"]
self.frame['turret'][0].fire_rate = 2
self.frame['turret'][0].rot_speed = 2
self.frame['turret'][0].salvo = 2
self.frame['turret'][0].salvo_interval = 0.2
self.frame['turret'][0].pos_adj = vec(0.0,0) * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_image = self.glob.sprite["obj"][61]
self.frame['turret'][0].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_damage = 10
self.frame['turret'][0].fire_sound = self.glob.sounds[2]
self.frame['turret'][0].trail_anim = self.glob.sprite['anim_bullet_flame']
self.frame['turret'][0].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][0].explosion_sound = self.glob.sounds[1]
self.hp = 300
self.max_acc = (0.22)
self.detection_range = (0.5) * self.glob.data["screen_width"]
self.hold_range = (0.25) * self.glob.data["screen_width"]
self.rot_speed = 2
if name == "Flak_2":
self.frame['body'].append(Body(self, self.glob.sprite["obj"][12], 3))
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][55], 3))
self.frame['turret'][0].fire_range = 0.35 * self.glob.data["screen_width"]
self.frame['turret'][0].fire_rate = 2
self.frame['turret'][0].rot_speed = 2
self.frame['turret'][0].salvo = 3
self.frame['turret'][0].salvo_interval = 0.2
self.frame['turret'][0].pos_adj = vec(0.0,0) * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_image = self.glob.sprite["obj"][61]
self.frame['turret'][0].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_damage = 15
self.frame['turret'][0].fire_sound = self.glob.sounds[2]
self.frame['turret'][0].trail_anim = self.glob.sprite['anim_bullet_flame']
self.frame['turret'][0].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][0].explosion_sound = self.glob.sounds[1]
self.hp = 400
self.max_acc = (0.24)
self.detection_range = (0.5) * self.glob.data["screen_width"]
self.hold_range = (0.3) * self.glob.data["screen_width"]
self.rot_speed = 2
if name == "Flak_3":
self.frame['body'].append(Body(self, self.glob.sprite["obj"][13], 3))
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][56], 3))
self.frame['turret'][0].fire_range = 0.4 * self.glob.data["screen_width"]
self.frame['turret'][0].fire_rate = 2
self.frame['turret'][0].rot_speed = 2
self.frame['turret'][0].salvo = 3
self.frame['turret'][0].salvo_interval = 0.1
self.frame['turret'][0].pos_adj = vec(0.0,0) * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_image = self.glob.sprite["obj"][61]
self.frame['turret'][0].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_damage = 20
self.frame['turret'][0].fire_sound = self.glob.sounds[2]
self.frame['turret'][0].trail_anim = self.glob.sprite['anim_bullet_flame']
self.frame['turret'][0].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][0].explosion_sound = self.glob.sounds[1]
self.hp = 600
self.max_acc = (0.26)
self.detection_range = (0.5) * self.glob.data["screen_width"]
self.hold_range = (0.35) * self.glob.data["screen_width"]
self.rot_speed = 2
if name == "Flak_4":
self.frame['body'].append(Body(self, self.glob.sprite["obj"][14], 3))
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][57], 3))
self.frame['turret'][0].fire_range = 0.45 * self.glob.data["screen_width"]
self.frame['turret'][0].fire_rate = 2
self.frame['turret'][0].rot_speed = 2
self.frame['turret'][0].salvo = 4
self.frame['turret'][0].salvo_interval = 0.2
self.frame['turret'][0].pos_adj = vec(0.0,0) * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_image = self.glob.sprite["obj"][61]
self.frame['turret'][0].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_damage = 30
self.frame['turret'][0].fire_sound = self.glob.sounds[2]
self.frame['turret'][0].trail_anim = self.glob.sprite['anim_bullet_flame']
self.frame['turret'][0].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][0].explosion_sound = self.glob.sounds[1]
self.hp = 900
self.max_acc = (0.28)
self.detection_range = (0.6) * self.glob.data["screen_width"]
self.hold_range = (0.4) * self.glob.data["screen_width"]
self.rot_speed = 2
if name == "Flak_5":
self.frame['body'].append(Body(self, self.glob.sprite["obj"][14], 3))
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][58], 3))
self.frame['turret'][0].fire_range = 0.5 * self.glob.data["screen_width"]
self.frame['turret'][0].fire_rate = 3
self.frame['turret'][0].rot_speed = 2
self.frame['turret'][0].salvo = 5
self.frame['turret'][0].salvo_interval = 0.4
self.frame['turret'][0].pos_adj = vec(0.0,0) * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_image = self.glob.sprite["obj"][61]
self.frame['turret'][0].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_damage = 40
self.frame['turret'][0].fire_sound = self.glob.sounds[2]
self.frame['turret'][0].trail_anim = self.glob.sprite['anim_bullet_flame']
self.frame['turret'][0].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][0].explosion_sound = self.glob.sounds[1]
self.hp = 1250
self.max_acc = (0.30)
self.detection_range = (0.6) * self.glob.data["screen_width"]
self.hold_range = (0.45) * self.glob.data["screen_width"]
self.rot_speed = 2
if name == "Tanker_1":
self.frame['body'].append(Body(self, self.glob.sprite["obj"][15], 3))
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][39], 3))
self.frame['turret'][0].fire_range = 0.2 * self.glob.data["screen_width"]
self.frame['turret'][0].fire_rate = 4
self.frame['turret'][0].rot_speed = 2
self.frame['turret'][0].salvo = 3
self.frame['turret'][0].salvo_interval = 0.2
self.frame['turret'][0].pos_adj = vec(0.0,0) * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_image = self.glob.sprite["obj"][60]
self.frame['turret'][0].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_damage = 10
self.frame['turret'][0].fire_sound = self.glob.sounds[2]
self.frame['turret'][0].trail_anim = "None"
self.frame['turret'][0].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][0].explosion_sound = self.glob.sounds[1]
self.hp = 600
self.max_acc = (0.35)
self.detection_range = (0.5) * self.glob.data["screen_width"]
self.hold_range = (0.1) * self.glob.data["screen_width"]
self.rot_speed = 2
if name == "Tanker_2":
self.frame['body'].append(Body(self, self.glob.sprite["obj"][16], 3))
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][39], 3))
self.frame['turret'][0].fire_range = 0.2 * self.glob.data["screen_width"]
self.frame['turret'][0].fire_rate = 4
self.frame['turret'][0].rot_speed = 2
self.frame['turret'][0].salvo = 3
self.frame['turret'][0].salvo_interval = 0.2
self.frame['turret'][0].pos_adj = vec(0.0,0) * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_image = self.glob.sprite["obj"][60]
self.frame['turret'][0].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_damage = 15
self.frame['turret'][0].fire_sound = self.glob.sounds[2]
self.frame['turret'][0].trail_anim = "None"
self.frame['turret'][0].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][0].explosion_sound = self.glob.sounds[1]
self.hp = 1000
self.max_acc = (0.35)
self.detection_range = (0.5) * self.glob.data["screen_width"]
self.hold_range = (0.1) * self.glob.data["screen_width"]
self.rot_speed = 2
if name == "Tanker_3":
self.frame['body'].append(Body(self, self.glob.sprite["obj"][17], 3))
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][39], 3))
self.frame['turret'][0].fire_range = 0.2 * self.glob.data["screen_width"]
self.frame['turret'][0].fire_rate = 4
self.frame['turret'][0].rot_speed = 2
self.frame['turret'][0].salvo = 3
self.frame['turret'][0].salvo_interval = 0.2
self.frame['turret'][0].pos_adj = vec(0.0,0) * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_image = self.glob.sprite["obj"][60]
self.frame['turret'][0].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_damage = 20
self.frame['turret'][0].fire_sound = self.glob.sounds[2]
self.frame['turret'][0].trail_anim = "None"
self.frame['turret'][0].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][0].explosion_sound = self.glob.sounds[1]
self.hp = 1500
self.max_acc = (0.35)
self.detection_range = (0.5) * self.glob.data["screen_width"]
self.hold_range = (0.1) * self.glob.data["screen_width"]
self.rot_speed = 2
if name == "Tanker_4":
self.frame['body'].append(Body(self, self.glob.sprite["obj"][18], 3))
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][39], 3))
self.frame['turret'][0].fire_range = 0.2 * self.glob.data["screen_width"]
self.frame['turret'][0].fire_rate = 4
self.frame['turret'][0].rot_speed = 2
self.frame['turret'][0].salvo = 3
self.frame['turret'][0].salvo_interval = 0.2
self.frame['turret'][0].pos_adj = vec(0.0,0) * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_image = self.glob.sprite["obj"][60]
self.frame['turret'][0].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_damage = 30
self.frame['turret'][0].fire_sound = self.glob.sounds[2]
self.frame['turret'][0].trail_anim = "None"
self.frame['turret'][0].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][0].explosion_sound = self.glob.sounds[1]
self.hp = 2200
self.max_acc = (0.35)
self.detection_range = (0.5) * self.glob.data["screen_width"]
self.hold_range = (0.1) * self.glob.data["screen_width"]
self.rot_speed = 2
if name == "E_1":
self.frame['body'].append(Body(self, self.glob.sprite["obj"][19], 3))
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][33], 3))
self.frame['turret'][0].fire_range = 0.3 * self.glob.data["screen_width"]
self.frame['turret'][0].fire_rate = 4 - int(2 * (self.glob.game_lvl / 99))
self.frame['turret'][0].rot_speed = 1 + int(2 * (self.glob.game_lvl / 99))
self.frame['turret'][0].salvo = 3 + int(3 * (self.glob.game_lvl / 99))
self.frame['turret'][0].salvo_interval = 0.2
self.frame['turret'][0].pos_adj = vec(0.008,0) * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_image = self.glob.sprite["obj"][59]
self.frame['turret'][0].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_damage = 10 + int(10 * (self.glob.game_lvl / 99))
self.frame['turret'][0].fire_sound = self.glob.sounds[2]
self.frame['turret'][0].trail_anim = "None"
self.frame['turret'][0].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][0].explosion_sound = self.glob.sounds[1]
self.hp = 100 + int(100 * (self.glob.game_lvl / 99))
self.max_acc = (0.2) + (0.1 * (self.glob.game_lvl / 99))
self.detection_range = (0.5) * self.glob.data["screen_width"]
self.hold_range = (0.2) * self.glob.data["screen_width"]
self.rot_speed = 3
if name == "E_2":
self.frame['body'].append(Body(self, self.glob.sprite["obj"][20], 3))
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][34], 3))
self.frame['turret'][0].fire_range = 0.5 * self.glob.data["screen_width"]
self.frame['turret'][0].fire_rate = 5 - int(2 * (self.glob.game_lvl / 99))
self.frame['turret'][0].rot_speed = 3 - int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][0].salvo = 1 + int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][0].salvo_interval = 0.4
self.frame['turret'][0].pos_adj = vec(0.0,0) * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_image = self.glob.sprite["obj"][67]
self.frame['turret'][0].projectile_speed = 0.005 * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_damage = 85 + int(100 * (self.glob.game_lvl / 99))
self.frame['turret'][0].fire_sound = self.glob.sounds[2]
self.frame['turret'][0].trail_anim = self.glob.sprite['anim_bullet_flame']
self.frame['turret'][0].explosion_anim = self.glob.sprite['anim_energy_leak_exp']
self.frame['turret'][0].explosion_sound = self.glob.sounds[1]
self.hp = 200 + int(100 * (self.glob.game_lvl / 99))
self.max_acc = (0.15) + (0.1 * (self.glob.game_lvl / 99))
self.detection_range = (0.7) * self.glob.data["screen_width"]
self.hold_range = (0.5) * self.glob.data["screen_width"]
self.rot_speed = 3
if name == "E_3":
self.frame['body'].append(Body(self, self.glob.sprite["obj"][21], 3))
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][32], 3))
self.frame['turret'][0].fire_range = 0.4 * self.glob.data["screen_width"]
self.frame['turret'][0].fire_rate = 2 + int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][0].rot_speed = 1
self.frame['turret'][0].salvo = 2 + int(2 * (self.glob.game_lvl / 99))
self.frame['turret'][0].salvo_interval = 0.5
self.frame['turret'][0].pos_adj = vec(0.01,0) * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_image = self.glob.sprite["obj"][61]
self.frame['turret'][0].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_damage = 15 + int(25 * (self.glob.game_lvl / 99))
self.frame['turret'][0].fire_sound = self.glob.sounds[2]
self.frame['turret'][0].trail_anim = "None"
self.frame['turret'][0].explosion_anim = self.glob.sprite['anim_energy_leak_exp']
self.frame['turret'][0].explosion_sound = self.glob.sounds[1]
self.frame['turret'].append(Launcher(self, self.glob.sprite["obj"][30], 3))
self.frame['turret'][1].fire_range = (0.9) * self.glob.data["screen_width"]
self.frame['turret'][1].fire_rate = 10 - int(5 * (self.glob.game_lvl / 99))
self.frame['turret'][1].salvo = 3 + int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][1].salvo_interval = 0.2
self.frame['turret'][1].rot_speed = 1 + int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][1].pos_adj = vec(-0.02,0) * self.glob.data["screen_width"]
self.frame['turret'][1].projectile_image = self.glob.sprite["obj"][68]
self.frame['turret'][1].projectile_speed = 0.005 * self.glob.data["screen_width"]
self.frame['turret'][1].missile_rot_speed = 4
self.frame['turret'][1].projectile_damage = 45 + int(150 * (self.glob.game_lvl / 99))
self.frame['turret'][1].fire_sound = self.glob.sounds[3]
self.frame['turret'][1].trail_anim = self.glob.sprite['anim_vapour_trail']
self.frame['turret'][1].explosion_anim = self.glob.sprite['anim_yellow_exp']
self.frame['turret'][1].explosion_sound = self.glob.sounds[1]
self.hp = 75 + int(100 * (self.glob.game_lvl / 99))
self.max_acc = (0.1)
self.detection_range = (1.1) * self.glob.data["screen_width"]
self.hold_range = (0.9) * self.glob.data["screen_width"]
self.rot_speed = 3
if name == "E_4":
self.frame['body'].append(Body(self, self.glob.sprite["obj"][22], 3))
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][57], 3))
self.frame['turret'][0].fire_range = 0.5 * self.glob.data["screen_width"]
self.frame['turret'][0].fire_rate = 4 - int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][0].rot_speed = 2 + int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][0].salvo = 5 + int(5 * (self.glob.game_lvl / 99))
self.frame['turret'][0].salvo_interval = 0.2
self.frame['turret'][0].pos_adj = vec(0.0,0) * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_image = self.glob.sprite["obj"][60]
self.frame['turret'][0].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_damage = 25 + int(10 * (self.glob.game_lvl / 99))
self.frame['turret'][0].fire_sound = self.glob.sounds[2]
self.frame['turret'][0].trail_anim = "None"
self.frame['turret'][0].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][0].explosion_sound = self.glob.sounds[1]
self.hp = 200 + int(100 * (self.glob.game_lvl / 99))
self.max_acc = (0.17)
self.detection_range = (0.7) * self.glob.data["screen_width"]
self.hold_range = (0.5) * self.glob.data["screen_width"]
self.rot_speed = 3
if name == "E_5":
self.frame['body'].append(Body(self, self.glob.sprite["obj"][23], 3))
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][33], 3))
self.frame['turret'][0].fire_range = 0.3 * self.glob.data["screen_width"]
self.frame['turret'][0].fire_rate = 3 - int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][0].rot_speed = 2 + int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][0].salvo = 3 + int(2 * (self.glob.game_lvl / 99))
self.frame['turret'][0].salvo_interval = 0.2
self.frame['turret'][0].pos_adj = vec(0.012,0.019) * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_image = self.glob.sprite["obj"][59]
self.frame['turret'][0].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_damage = 10 + int(5 * (self.glob.game_lvl / 99))
self.frame['turret'][0].fire_sound = self.glob.sounds[2]
self.frame['turret'][0].trail_anim = "None"
self.frame['turret'][0].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][0].explosion_sound = self.glob.sounds[1]
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][33], 3))
self.frame['turret'][1].fire_range = 0.3 * self.glob.data["screen_width"]
self.frame['turret'][1].fire_rate = 3 - int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][1].rot_speed = 2 + int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][1].salvo = 3 + int(2 * (self.glob.game_lvl / 99))
self.frame['turret'][1].salvo_interval = 0.2
self.frame['turret'][1].pos_adj = vec(0.012,-0.019) * self.glob.data["screen_width"]
self.frame['turret'][1].projectile_image = self.glob.sprite["obj"][59]
self.frame['turret'][1].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][1].projectile_damage = 15 + int(5 * (self.glob.game_lvl / 99))
self.frame['turret'][1].fire_sound = self.glob.sounds[2]
self.frame['turret'][1].trail_anim = "None"
self.frame['turret'][1].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][1].explosion_sound = self.glob.sounds[1]
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][38], 3))
self.frame['turret'][2].fire_range = 0.5 * self.glob.data["screen_width"]
self.frame['turret'][2].fire_rate = 5 - int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][2].rot_speed = 1 + int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][2].salvo = 1 + int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][2].salvo_interval = 0.4
self.frame['turret'][2].pos_adj = vec(-0.01,0) * self.glob.data["screen_width"]
self.frame['turret'][2].projectile_image = self.glob.sprite["obj"][67]
self.frame['turret'][2].projectile_speed = 0.005 * self.glob.data["screen_width"]
self.frame['turret'][2].projectile_damage = 85 + int(100 * (self.glob.game_lvl / 99))
self.frame['turret'][2].fire_sound = self.glob.sounds[2]
self.frame['turret'][2].trail_anim = self.glob.sprite['anim_bullet_flame']
self.frame['turret'][2].explosion_anim = self.glob.sprite['anim_energy_leak_exp']
self.frame['turret'][2].explosion_sound = self.glob.sounds[1]
self.hp = 250 + int(100 * (self.glob.game_lvl / 99))
self.max_acc = (0.15)
self.detection_range = (0.7) * self.glob.data["screen_width"]
self.hold_range = (0.5) * self.glob.data["screen_width"]
self.rot_speed = 3
if name == "E_6":
self.frame['body'].append(Body(self, self.glob.sprite["obj"][24], 3))
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][31], 3))
self.frame['turret'][0].fire_range = 0.5 * self.glob.data["screen_width"]
self.frame['turret'][0].fire_rate = 3 - int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][0].rot_speed = 2 + int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][0].salvo = 3 + int(2 * (self.glob.game_lvl / 99))
self.frame['turret'][0].salvo_interval = 0.2
self.frame['turret'][0].pos_adj = vec(0.013,0.0175) * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_image = self.glob.sprite["obj"][59]
self.frame['turret'][0].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_damage = 10 + int(5 * (self.glob.game_lvl / 99))
self.frame['turret'][0].fire_sound = self.glob.sounds[2]
self.frame['turret'][0].trail_anim = "None"
self.frame['turret'][0].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][0].explosion_sound = self.glob.sounds[1]
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][31], 3))
self.frame['turret'][1].fire_range = 0.5 * self.glob.data["screen_width"]
self.frame['turret'][1].fire_rate = 3 - int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][1].rot_speed = 2 + int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][1].salvo = 3 + int(2 * (self.glob.game_lvl / 99))
self.frame['turret'][1].salvo_interval = 0.2
self.frame['turret'][1].pos_adj = vec(0.013,-0.0175) * self.glob.data["screen_width"]
self.frame['turret'][1].projectile_image = self.glob.sprite["obj"][59]
self.frame['turret'][1].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][1].projectile_damage = 10 + int(5 * (self.glob.game_lvl / 99))
self.frame['turret'][1].fire_sound = self.glob.sounds[2]
self.frame['turret'][1].trail_anim = "None"
self.frame['turret'][1].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][1].explosion_sound = self.glob.sounds[1]
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][56], 3))
self.frame['turret'][2].fire_range = 0.5 * self.glob.data["screen_width"]
self.frame['turret'][2].fire_rate = 4 - int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][2].rot_speed = 2
self.frame['turret'][2].salvo = 4 + int(4 * (self.glob.game_lvl / 99))
self.frame['turret'][2].salvo_interval = 0.2
self.frame['turret'][2].pos_adj = vec(-0.005,0) * self.glob.data["screen_width"]
self.frame['turret'][2].projectile_image = self.glob.sprite["obj"][67]
self.frame['turret'][2].projectile_speed = 0.005 * self.glob.data["screen_width"]
self.frame['turret'][2].projectile_damage = 25 + int(25 * (self.glob.game_lvl / 99))
self.frame['turret'][2].fire_sound = self.glob.sounds[2]
self.frame['turret'][2].trail_anim = "None"
self.frame['turret'][2].explosion_anim = self.glob.sprite['anim_energy_leak_exp']
self.frame['turret'][2].explosion_sound = self.glob.sounds[1]
self.hp = 250 + int(100 * (self.glob.game_lvl / 99))
self.max_acc = (0.14)
self.detection_range = (0.7) * self.glob.data["screen_width"]
self.hold_range = (0.5) * self.glob.data["screen_width"]
self.rot_speed = 3
if name == "E_7":
self.frame['body'].append(Body(self, self.glob.sprite["obj"][25], 3))
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][39], 3))
self.frame['turret'][0].fire_range = 0.5 * self.glob.data["screen_width"]
self.frame['turret'][0].fire_rate = 3 - int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][0].rot_speed = 2 + int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][0].salvo = 3 + int(2 * (self.glob.game_lvl / 99))
self.frame['turret'][0].salvo_interval = 0.2
self.frame['turret'][0].pos_adj = vec(0.014,0.019) * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_image = self.glob.sprite["obj"][59]
self.frame['turret'][0].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_damage = 10 + int(5 * (self.glob.game_lvl / 99))
self.frame['turret'][0].fire_sound = self.glob.sounds[2]
self.frame['turret'][0].trail_anim = "None"
self.frame['turret'][0].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][0].explosion_sound = self.glob.sounds[1]
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][39], 3))
self.frame['turret'][1].fire_range = 0.5 * self.glob.data["screen_width"]
self.frame['turret'][1].fire_rate = 3 - int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][1].rot_speed = 2 + int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][1].salvo = 3 + int(2 * (self.glob.game_lvl / 99))
self.frame['turret'][1].salvo_interval = 0.2
self.frame['turret'][1].pos_adj = vec(0.014,-0.019) * self.glob.data["screen_width"]
self.frame['turret'][1].projectile_image = self.glob.sprite["obj"][59]
self.frame['turret'][1].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][1].projectile_damage = 10 + int(5 * (self.glob.game_lvl / 99))
self.frame['turret'][1].fire_sound = self.glob.sounds[2]
self.frame['turret'][1].trail_anim = "None"
self.frame['turret'][1].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][1].explosion_sound = self.glob.sounds[1]
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][39], 3))
self.frame['turret'][2].fire_range = 0.5 * self.glob.data["screen_width"]
self.frame['turret'][2].fire_rate = 3 - int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][2].rot_speed = 2 + int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][2].salvo = 3 + int(2 * (self.glob.game_lvl / 99))
self.frame['turret'][2].salvo_interval = 0.2
self.frame['turret'][2].pos_adj = vec(-0.022,0.0185) * self.glob.data["screen_width"]
self.frame['turret'][2].projectile_image = self.glob.sprite["obj"][59]
self.frame['turret'][2].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][2].projectile_damage = 10 + int(5 * (self.glob.game_lvl / 99))
self.frame['turret'][2].fire_sound = self.glob.sounds[2]
self.frame['turret'][2].trail_anim = "None"
self.frame['turret'][2].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][2].explosion_sound = self.glob.sounds[1]
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][39], 3))
self.frame['turret'][3].fire_range = 0.5 * self.glob.data["screen_width"]
self.frame['turret'][3].fire_rate = 3 - int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][3].rot_speed = 2 + int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][3].salvo = 3 + int(2 * (self.glob.game_lvl / 99))
self.frame['turret'][3].salvo_interval = 0.2
self.frame['turret'][3].pos_adj = vec(-0.022,-0.0185) * self.glob.data["screen_width"]
self.frame['turret'][3].projectile_image = self.glob.sprite["obj"][59]
self.frame['turret'][3].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][3].projectile_damage = 10 + int(5 * (self.glob.game_lvl / 99))
self.frame['turret'][3].fire_sound = self.glob.sounds[2]
self.frame['turret'][3].trail_anim = "None"
self.frame['turret'][3].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][3].explosion_sound = self.glob.sounds[1]
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][51], 3))
self.frame['turret'][4].fire_range = 0.5 * self.glob.data["screen_width"]
self.frame['turret'][4].fire_rate = 5 - int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][4].rot_speed = 1 + int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][4].salvo = 1 + int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][4].salvo_interval = 0.4
self.frame['turret'][4].pos_adj = vec(0,0) * self.glob.data["screen_width"]
self.frame['turret'][4].projectile_image = self.glob.sprite["obj"][67]
self.frame['turret'][4].projectile_speed = 0.005 * self.glob.data["screen_width"]
self.frame['turret'][4].projectile_damage = 85 + int(100 * (self.glob.game_lvl / 99))
self.frame['turret'][4].fire_sound = self.glob.sounds[2]
self.frame['turret'][4].trail_anim = "None"
self.frame['turret'][4].explosion_anim = self.glob.sprite['anim_energy_leak_exp']
self.frame['turret'][4].explosion_sound = self.glob.sounds[1]
self.hp = 300 + int(100 * (self.glob.game_lvl / 99))
self.max_acc = (0.12)
self.detection_range = (0.7) * self.glob.data["screen_width"]
self.hold_range = (0.5) * self.glob.data["screen_width"]
self.rot_speed = 3
if name == "E_8":
self.frame['body'].append(Body(self, self.glob.sprite["obj"][26], 3))
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][39], 3))
self.frame['turret'][0].fire_range = 0.5 * self.glob.data["screen_width"]
self.frame['turret'][0].fire_rate = 3 - int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][0].rot_speed = 2 + int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][0].salvo = 3 + int(2 * (self.glob.game_lvl / 99))
self.frame['turret'][0].salvo_interval = 0.2
self.frame['turret'][0].pos_adj = vec(0.04,0.01) * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_image = self.glob.sprite["obj"][59]
self.frame['turret'][0].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_damage = 10 + int(5 * (self.glob.game_lvl / 99))
self.frame['turret'][0].fire_sound = self.glob.sounds[2]
self.frame['turret'][0].trail_anim = "None"
self.frame['turret'][0].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][0].explosion_sound = self.glob.sounds[1]
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][39], 3))
self.frame['turret'][1].fire_range = 0.5 * self.glob.data["screen_width"]
self.frame['turret'][1].fire_rate = 3 - int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][1].rot_speed = 2 + int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][1].salvo = 3 + int(2 * (self.glob.game_lvl / 99))
self.frame['turret'][1].salvo_interval = 0.2
self.frame['turret'][1].pos_adj = vec(0.04,-0.01) * self.glob.data["screen_width"]
self.frame['turret'][1].projectile_image = self.glob.sprite["obj"][59]
self.frame['turret'][1].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][1].projectile_damage = 10 + int(5 * (self.glob.game_lvl / 99))
self.frame['turret'][1].fire_sound = self.glob.sounds[2]
self.frame['turret'][1].trail_anim = "None"
self.frame['turret'][1].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][1].explosion_sound = self.glob.sounds[1]
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][33], 3))
self.frame['turret'][2].fire_range = 0.5 * self.glob.data["screen_width"]
self.frame['turret'][2].fire_rate = 3 - int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][2].rot_speed = 2 + int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][2].salvo = 3 + int(2 * (self.glob.game_lvl / 99))
self.frame['turret'][2].salvo_interval = 0.2
self.frame['turret'][2].pos_adj = vec(0.0135,0.0185) * self.glob.data["screen_width"]
self.frame['turret'][2].projectile_image = self.glob.sprite["obj"][59]
self.frame['turret'][2].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][2].projectile_damage = 10 + int(5 * (self.glob.game_lvl / 99))
self.frame['turret'][2].fire_sound = self.glob.sounds[2]
self.frame['turret'][2].trail_anim = "None"
self.frame['turret'][2].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][2].explosion_sound = self.glob.sounds[1]
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][33], 3))
self.frame['turret'][3].fire_range = 0.5 * self.glob.data["screen_width"]
self.frame['turret'][3].fire_rate = 3 - int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][3].rot_speed = 2 + int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][3].salvo = 3 + int(2 * (self.glob.game_lvl / 99))
self.frame['turret'][3].salvo_interval = 0.2
self.frame['turret'][3].pos_adj = vec(0.0135,-0.0185) * self.glob.data["screen_width"]
self.frame['turret'][3].projectile_image = self.glob.sprite["obj"][59]
self.frame['turret'][3].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][3].projectile_damage = 10 + int(5 * (self.glob.game_lvl / 99))
self.frame['turret'][3].fire_sound = self.glob.sounds[2]
self.frame['turret'][3].trail_anim = "None"
self.frame['turret'][3].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][3].explosion_sound = self.glob.sounds[1]
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][39], 3))
self.frame['turret'][4].fire_range = 0.5 * self.glob.data["screen_width"]
self.frame['turret'][4].fire_rate = 3 - int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][4].rot_speed = 2 + int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][4].salvo = 3 + int(2 * (self.glob.game_lvl / 99))
self.frame['turret'][4].salvo_interval = 0.2
self.frame['turret'][4].pos_adj = vec(-0.0225,0.0185) * self.glob.data["screen_width"]
self.frame['turret'][4].projectile_image = self.glob.sprite["obj"][59]
self.frame['turret'][4].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][4].projectile_damage = 10 + int(5 * (self.glob.game_lvl / 99))
self.frame['turret'][4].fire_sound = self.glob.sounds[2]
self.frame['turret'][4].trail_anim = "None"
self.frame['turret'][4].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][4].explosion_sound = self.glob.sounds[1]
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][39], 3))
self.frame['turret'][5].fire_range = 0.5 * self.glob.data["screen_width"]
self.frame['turret'][5].fire_rate = 3 - int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][5].rot_speed = 2 + int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][5].salvo = 3 + int(2 * (self.glob.game_lvl / 99))
self.frame['turret'][5].salvo_interval = 0.2
self.frame['turret'][5].pos_adj = vec(-0.0225,-0.0185) * self.glob.data["screen_width"]
self.frame['turret'][5].projectile_image = self.glob.sprite["obj"][59]
self.frame['turret'][5].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][5].projectile_damage = 10 + int(5 * (self.glob.game_lvl / 99))
self.frame['turret'][5].fire_sound = self.glob.sounds[2]
self.frame['turret'][5].trail_anim = "None"
self.frame['turret'][5].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][5].explosion_sound = self.glob.sounds[1]
self.frame['turret'].append(Turret(self, self.glob.sprite["obj"][53], 3))
self.frame['turret'][6].fire_range = 0.5 * self.glob.data["screen_width"]
self.frame['turret'][6].fire_rate = 10 - int(5 * (self.glob.game_lvl / 99))
self.frame['turret'][6].rot_speed = 1 + int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][6].salvo = 2
self.frame['turret'][6].salvo_interval = 0.4
self.frame['turret'][6].pos_adj = vec(-0.002,0) * self.glob.data["screen_width"]
self.frame['turret'][6].projectile_image = self.glob.sprite["obj"][67]
self.frame['turret'][6].projectile_speed = 0.005 * self.glob.data["screen_width"]
self.frame['turret'][6].projectile_damage = 85 + int(100 * (self.glob.game_lvl / 99))
self.frame['turret'][6].fire_sound = self.glob.sounds[2]
self.frame['turret'][6].trail_anim = self.glob.sprite['anim_bullet_flame']
self.frame['turret'][6].explosion_anim = self.glob.sprite['anim_energy_leak_exp']
self.frame['turret'][6].explosion_sound = self.glob.sounds[1]
self.hp = 400 + int(100 * (self.glob.game_lvl / 99))
self.max_acc = (0.11)
self.detection_range = (0.7) * self.glob.data["screen_width"]
self.hold_range = (0.5) * self.glob.data["screen_width"]
self.rot_speed = 3
if name == "Base":
self.frame['body'][0] = Body(self, self.glob.sprite["obj"][0], 4)
if self.glob.data["cannon_1_lvl"] > 0:
self.frame['turret'][0] = Turret(self, self.glob.sprite["obj"][29], 4)
self.frame['turret'][0].fire_range = (0.6 + (0.4 * (self.glob.data["cannon_1_lvl"] / 9)) ) * self.glob.data["screen_width"]
self.frame['turret'][0].fire_rate = 5 - int(1 * (self.glob.data["cannon_1_lvl"] / 9))
self.frame['turret'][0].salvo = 5 + int(3 * (self.glob.data["cannon_1_lvl"] / 9))
self.frame['turret'][0].salvo_interval = 0.2
self.frame['turret'][0].rot_speed = 2 + int(1 * (self.glob.data["cannon_1_lvl"] / 9))
self.frame['turret'][0].pos_adj = vec(0.01,-0.19) * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_image = self.glob.sprite["obj"][61]
self.frame['turret'][0].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_damage = 5 + int(25 * (self.glob.data["cannon_1_lvl"] / 9))
self.frame['turret'][0].fire_sound = self.glob.sounds[2]
self.frame['turret'][0].trail_anim = "None"
self.frame['turret'][0].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][0].explosion_sound = self.glob.sounds[1]
if self.glob.data["cannon_2_lvl"] > 0:
self.frame['turret'][1] = Turret(self, self.glob.sprite["obj"][29], 4)
self.frame['turret'][1].fire_range = (0.6 + (0.4 * (self.glob.data["cannon_2_lvl"] / 9)) ) * self.glob.data["screen_width"]
self.frame['turret'][1].fire_rate = 5 - int(1 * (self.glob.data["cannon_2_lvl"] / 9))
self.frame['turret'][1].salvo = 5 + int(3 * (self.glob.data["cannon_2_lvl"] / 9))
self.frame['turret'][1].salvo_interval = 0.2
self.frame['turret'][1].rot_speed = 2 + int(1 * (self.glob.data["cannon_2_lvl"] / 9))
self.frame['turret'][1].pos_adj = vec(0.01,-0.105) * self.glob.data["screen_width"]
self.frame['turret'][1].projectile_image = self.glob.sprite["obj"][61]
self.frame['turret'][1].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][1].projectile_damage = 5 + int(25 * (self.glob.data["cannon_2_lvl"] / 9))
self.frame['turret'][1].fire_sound = self.glob.sounds[2]
self.frame['turret'][1].trail_anim = "None"
self.frame['turret'][1].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][1].explosion_sound = self.glob.sounds[1]
if self.glob.data["cannon_3_lvl"] > 0:
self.frame['turret'][2] = Turret(self, self.glob.sprite["obj"][28], 4)
self.frame['turret'][2].fire_range = (0.6 + (0.4 * (self.glob.data["cannon_3_lvl"] / 9)) ) * self.glob.data["screen_width"]
self.frame['turret'][2].fire_rate = 10 - int(5 * (self.glob.data["cannon_3_lvl"] / 9))
self.frame['turret'][2].salvo = 1 + int(1 * (self.glob.data["cannon_3_lvl"] / 9))
self.frame['turret'][2].salvo_interval = 2
self.frame['turret'][2].rot_speed = 1 + int(1 * (self.glob.data["cannon_3_lvl"] / 9))
self.frame['turret'][2].pos_adj = vec(0.01,0.105) * self.glob.data["screen_width"]
self.frame['turret'][2].projectile_image = self.glob.sprite["obj"][67]
self.frame['turret'][2].projectile_speed = 0.005 * self.glob.data["screen_width"]
self.frame['turret'][2].projectile_damage = 100 + int(500 * (self.glob.data["cannon_3_lvl"] / 9))
self.frame['turret'][2].fire_sound = self.glob.sounds[0]
self.frame['turret'][2].trail_anim = self.glob.sprite['anim_bullet_flame']
self.frame['turret'][2].explosion_anim = self.glob.sprite['anim_energy_leak_exp']
self.frame['turret'][2].explosion_sound = self.glob.sounds[1]
if self.glob.data["cannon_4_lvl"] > 0:
self.frame['turret'][3] = Launcher(self, self.glob.sprite["obj"][30], 4)
self.frame['turret'][3].fire_range = (0.6 + (0.4 * (self.glob.data["cannon_4_lvl"] / 9)) ) * self.glob.data["screen_width"]
self.frame['turret'][3].fire_rate = 10 - int(5 * (self.glob.data["cannon_4_lvl"] / 9))
self.frame['turret'][3].salvo = 3 + int(5 * (self.glob.data["cannon_4_lvl"] / 9))
self.frame['turret'][3].salvo_interval = 0.2
self.frame['turret'][3].rot_speed = 1 + int(1 * (self.glob.data["cannon_4_lvl"] / 9))
self.frame['turret'][3].pos_adj = vec(0.01,0.19) * self.glob.data["screen_width"]
self.frame['turret'][3].projectile_image = self.glob.sprite["obj"][69]
self.frame['turret'][3].projectile_speed = 0.005 * self.glob.data["screen_width"]
self.frame['turret'][3].missile_rot_speed = 2 + int(0.3 * self.glob.data["cannon_4_lvl"])
self.frame['turret'][3].projectile_damage = 25 + int(100 * (self.glob.data["cannon_4_lvl"] / 9))
self.frame['turret'][3].fire_sound = self.glob.sounds[3]
self.frame['turret'][3].trail_anim = self.glob.sprite['anim_vapour_trail']
self.frame['turret'][3].explosion_anim = self.glob.sprite['anim_yellow_exp']
self.frame['turret'][3].explosion_sound = self.glob.sounds[1]
self.hp = 2000 + 500 * self.glob.data["base_shielding_lvl"]
self.hp_max = self.hp
self.detection_range = (1.1) * self.glob.data["screen_width"]
if name == "E_Base":
self.frame['body'][0] = Body(self, self.glob.sprite["obj"][1], 4)
if self.glob.game_lvl > 5:
self.frame['turret'][0] = Turret(self, self.glob.sprite["obj"][29], 4)
self.frame['turret'][0].fire_range = 0.9 * self.glob.data["screen_width"]
self.frame['turret'][0].fire_rate = 7 - int(2 * (self.glob.game_lvl / 99))
self.frame['turret'][0].salvo = 3 + int(6 * (self.glob.game_lvl / 99))
self.frame['turret'][0].salvo_interval = 0.2
self.frame['turret'][0].rot_speed = 1 + int(2 * (self.glob.game_lvl / 99))
self.frame['turret'][0].pos_adj = vec(0.01,-0.105) * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_image = self.glob.sprite["obj"][61]
self.frame['turret'][0].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][0].projectile_damage = 5 + int(20 * (self.glob.game_lvl / 99))
self.frame['turret'][0].fire_sound = self.glob.sounds[2]
self.frame['turret'][0].trail_anim = "None"
self.frame['turret'][0].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][0].explosion_sound = self.glob.sounds[1]
if self.glob.game_lvl > 20:
self.frame['turret'][1] = Turret(self, self.glob.sprite["obj"][29], 4)
self.frame['turret'][1].fire_range = 0.9 * self.glob.data["screen_width"]
self.frame['turret'][1].fire_rate = 7 - int(2 * (self.glob.game_lvl / 99))
self.frame['turret'][1].salvo = 3 + int(6 * (self.glob.game_lvl / 99))
self.frame['turret'][1].salvo_interval = 0.2
self.frame['turret'][1].rot_speed = 1 + int(2 * (self.glob.game_lvl / 99))
self.frame['turret'][1].pos_adj = vec(0.01,0.105) * self.glob.data["screen_width"]
self.frame['turret'][1].projectile_image = self.glob.sprite["obj"][61]
self.frame['turret'][1].projectile_speed = 0.01 * self.glob.data["screen_width"]
self.frame['turret'][1].projectile_damage = 5 + int(20 * (self.glob.game_lvl / 99))
self.frame['turret'][1].fire_sound = self.glob.sounds[2]
self.frame['turret'][1].trail_anim = "None"
self.frame['turret'][1].explosion_anim = self.glob.sprite['anim_spark']
self.frame['turret'][1].explosion_sound = self.glob.sounds[1]
if self.glob.game_lvl > 40:
self.frame['turret'][2] = Turret(self, self.glob.sprite["obj"][28], 4)
self.frame['turret'][2].fire_range = 0.9 * self.glob.data["screen_width"]
self.frame['turret'][2].fire_rate = 10 - int(5 * (self.glob.game_lvl / 99))
self.frame['turret'][2].salvo = 1
self.frame['turret'][2].salvo_interval = 0.2
self.frame['turret'][2].rot_speed = 1 + int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][2].pos_adj = vec(0.01,0.19) * self.glob.data["screen_width"]
self.frame['turret'][2].projectile_image = self.glob.sprite["obj"][67]
self.frame['turret'][2].projectile_speed = 0.005 * self.glob.data["screen_width"]
self.frame['turret'][2].projectile_damage = 100 + int(150 * (self.glob.game_lvl / 99))
self.frame['turret'][2].fire_sound = self.glob.sounds[0]
self.frame['turret'][2].trail_anim = self.glob.sprite['anim_bullet_flame']
self.frame['turret'][2].explosion_anim = self.glob.sprite['anim_energy_leak_exp']
self.frame['turret'][2].explosion_sound = self.glob.sounds[1]
if self.glob.game_lvl > 60:
self.frame['turret'][3] = Launcher(self, self.glob.sprite["obj"][30], 4)
self.frame['turret'][3].fire_range = 0.9 * self.glob.data["screen_width"]
self.frame['turret'][3].fire_rate = 5 - int(2 * (self.glob.game_lvl / 99))
self.frame['turret'][3].salvo = 2 + int(3 * (self.glob.game_lvl / 99))
self.frame['turret'][3].salvo_interval = 0.2
self.frame['turret'][3].rot_speed = 1 + int(1 * (self.glob.game_lvl / 99))
self.frame['turret'][3].pos_adj = vec(0.01,-0.19) * self.glob.data["screen_width"]
self.frame['turret'][3].projectile_image = self.glob.sprite["obj"][69]
self.frame['turret'][3].projectile_speed = 0.005 * self.glob.data["screen_width"]
self.frame['turret'][3].missile_rot_speed = 2 + int(5 * (self.glob.game_lvl / 99))
self.frame['turret'][3].projectile_damage = 10 + int(100 * (self.glob.game_lvl / 99))
self.frame['turret'][3].fire_sound = self.glob.sounds[3]
self.frame['turret'][3].trail_anim = self.glob.sprite['anim_vapour_trail']
self.frame['turret'][3].explosion_anim = self.glob.sprite['anim_yellow_exp']
self.frame['turret'][3].explosion_sound = self.glob.sounds[1]
self.hp = 2000 + int(5500 * (self.glob.game_lvl / 99))
self.hp_max = self.hp
self.detection_range = (1.1) * self.glob.data["screen_width"]
"""
Classe Vehicle
- But : assembler des composants frame du tank
- Fonctionnement : partage des variables et des vecteurs avec les classes contenues dans self.frame
- Utilisation : lorsqu'elle est appellee, la classe fait apparaitre le vehicule souhaite
"""
class Vehicle():
def __init__(self, glob, vehicle, allegiance):
self.glob = glob
self._layer = 2
self._type = "func_prime"
self.banner = allegiance
self.vehicle = vehicle
self.hp = 0
self.frame = {}
self.frame['body'] = []
self.frame['turret'] = []
self.pos = vec(0,0)
self.spd = vec(0, 0)
self.acc = vec(0, 0)
self.death_timer = Timer()
self.remove_turret = False
self.rot_target = 0
self.rot_tracker = 0
self.target = False
self.traget_dist = 0
self.pin = vec(0,0)
if self.banner == "Ally":
self.rot = 0
else:
self.rot = 180
# Different vehivles function:
constructor(self, self.vehicle)
# Collision radius
self.radius = int(self.frame['body'][0].rect.w/2)
#Position spawn
if self.banner == "Ally":
self.pos.x = self.glob.map_instance.pos.x - self.radius * 2
else:
self.pos.x = self.glob.map_instance.pos.x + self.radius * 2 + self.glob.map_instance.rect.w
self.pos.y = random.randint(self.glob.map_instance.pos.y, self.glob.map_instance.pos.y + 0.825 * self.glob.data["screen_height"])
self.glob.all_virtuals.add(Framer(self))
"""
Fonction update
- Fonctionnement : mets a jour la position du vehicule a chaque boucle et verifie son etat (toujours en vie?) / IA (conduit le tank, gestion de la collision, detection ennemi)
"""
def update(self):
self.pos.x += self.glob.map_instance.map_offset
self.acc = vec(0, 0)
Troop_AI(self)
if self.hp <= 0:
if self.remove_turret == False:
pos = vec(self.pos.x/self.glob.data["screen_width"], self.pos.y/self.glob.data["screen_height"])
Animation_Player(self.glob, self.glob.sprite['anim_fire'], 0.01, pos, self.rot, 0.002 * self.radius, 0.00355 * self.radius, 6)
self.glob.all_virtuals.remove(self.frame["turret"])
self.remove_turret = True
if self.death_timer.chrono(2):
self.glob.all_virtuals.remove(self.frame["body"])
self.glob.all_virtuals.remove([self])
self.acc += (self.spd * -0.2)
self.spd += self.acc
self.pos += self.spd.rotate(-self.rot) * self.glob.fps_stab * (self.glob.data["screen_height"]/720)
"""
Fonction prime_update
- Fonctionnement : sera lue quand la classe est gelee / change la position du tank si la resolution de l'ecran est modifiee
"""
def prime_update(self):
if self.glob.Resol_Check.change:
if self.glob.data["screen_height"] == 720:
self.pos.x = self.pos.x * (2/3)
self.pos.y = self.pos.y * (2/3)
else:
self.pos.x = self.pos.x * (3/2)
self.pos.y = self.pos.y * (3/2)
"""
Classe Base
- But : assembler des composants frame de la base
- Fonctionnement : partage des variables et des vecteurs avec les classes contenues dans self.frame
- Utilisation : lorsqu'elle est appellee, la classe fait apparaitre la souhaitee (base ennemie ou alliee)
"""
class Base():
def __init__(self, glob, base, allegiance):
self.glob = glob
self._layer = 1
self._type = "func_prime"
self.banner = allegiance
self.base = base
self.hp = 0
self.hp_max = 0
self.frame = {}
self.frame['body'] = ["None"]
self.frame['turret'] = ["None","None","None","None"]
self.death_timer = Timer()
self.timer = Timer()
self.pos = vec(0,0)
self.target = False
self.traget_dist = 0
self.pin = vec(0,0)
if self.banner == "Ally":
self.rot = 0
else:
self.rot = 180
# Different bases function:
constructor(self, self.base)
if self.banner == "Ally":
self.turrets = []
for i in range(1,4+1):
self.turrets.append(self.glob.data["cannon_{}_lvl".format(i)])
# Collision radius
self.radius = int(self.frame['body'][0].rect.w/2)
#Position spawn
if self.banner == "Ally":
self.pos.x = self.glob.map_instance.pos.x + self.radius
else:
self.pos.x = self.glob.map_instance.pos.x + self.glob.map_instance.rect.w - self.radius
self.pos.y = self.glob.map_instance.pos.y + ( 0.825 * self.glob.data["screen_height"]/2)
if self.banner == "Ally":
self.life_bar = Progression_Bar(self.glob, vec(0.25 ,0.085), 0.5, 0.01, GREEN, "Left", 7)
else:
self.life_bar = Progression_Bar(self.glob, vec(0.75 ,0.085), 0.5, 0.01, RED, "Right", 7)
self.glob.all_virtuals.add(Framer(self))
"""
Fonction update
- Fonctionnement : sera lue a chaque boucle et change la position de la base si la carte bouge / gere la barre de vie des bases
"""
def update(self):
self.pos.x += self.glob.map_instance.map_offset
Base_AI(self)
if self.hp/self.hp_max <= 0.25:
if self.timer.chrono(1):
Animation_Player(self.glob, self.glob.sprite['anim_fire'], 0.01, vec(random.uniform(self.pos.x/self.glob.data["screen_width"]- 0.01 ,self.pos.x/self.glob.data["screen_width"] + 0.01) ,random.uniform(0.075,0.9)), self.rot, 0.002 * self.radius, 0.00355 * self.radius, 6)
self.life_bar.progression = self.hp/self.hp_max
self.life_bar.change()
"""
Fonction prime_update
- Fonctionnement : sera lue quand la classe est gelee / change la position de la base si la carte bouge et gere l'apparition de nouvelles tourelles si la base est amelioree.
"""
def prime_update(self):
if self.glob.Resol_Check.change:
if self.glob.data["screen_height"] == 720:
self.pos.x = int(self.pos.x * (2/3))
self.pos.y = int(self.pos.y * (2/3))
else:
self.pos.x = int(self.pos.x * (3/2))
self.pos.y = int(self.pos.y * (3/2))
if self.banner == "Ally":
if self.hp_max != 2000 + 500 * self.glob.data["base_shielding_lvl"]:
self.hp_max = 2000 + 500 * self.glob.data["base_shielding_lvl"]
for i in range(1,4 + 1):
if self.glob.data["cannon_{}_lvl".format(i)] != self.turrets[i - 1]:
ls = []
for k in self.frame:
for j in self.frame[k]:
if j != "None":
ls.append(j)
j = "None"
print("Bin: ",ls)
self.glob.all_virtuals.remove(ls)
constructor(self, self.base)
self.glob.all_virtuals.add(Framer2(self))
all_frames = Framer2(self)
for frame in all_frames:
frame.update()
self.turrets[i - 1] = self.glob.data["cannon_{}_lvl".format(i)]
``` |
{
"source": "JonasMie/taskflow",
"score": 3
} |
#### File: conductors/backends/impl_nonblocking.py
```python
import futurist
import six
from taskflow.conductors.backends import impl_executor
from taskflow.utils import threading_utils as tu
class NonBlockingConductor(impl_executor.ExecutorConductor):
"""Non-blocking conductor that processes job(s) using a thread executor.
NOTE(harlowja): A custom executor factory can be provided via keyword
argument ``executor_factory``, if provided it will be
invoked at
:py:meth:`~taskflow.conductors.base.Conductor.run` time
with one positional argument (this conductor) and it must
return a compatible `executor`_ which can be used
to submit jobs to. If ``None`` is a provided a thread pool
backed executor is selected by default (it will have
an equivalent number of workers as this conductors
simultaneous job count).
.. _executor: https://docs.python.org/dev/library/\
concurrent.futures.html#executor-objects
"""
MAX_SIMULTANEOUS_JOBS = tu.get_optimal_thread_count()
"""
Default maximum number of jobs that can be in progress at the same time.
"""
def _default_executor_factory(self):
max_simultaneous_jobs = self._max_simultaneous_jobs
if max_simultaneous_jobs <= 0:
max_workers = tu.get_optimal_thread_count()
else:
max_workers = max_simultaneous_jobs
return futurist.ThreadPoolExecutor(max_workers=max_workers)
def __init__(self, name, jobboard,
persistence=None, engine=None,
engine_options=None, wait_timeout=None,
log=None, max_simultaneous_jobs=MAX_SIMULTANEOUS_JOBS,
executor_factory=None):
super(NonBlockingConductor, self).__init__(
name, jobboard,
persistence=persistence, engine=engine,
engine_options=engine_options, wait_timeout=wait_timeout,
log=log, max_simultaneous_jobs=max_simultaneous_jobs)
if executor_factory is None:
self._executor_factory = self._default_executor_factory
else:
if not six.callable(executor_factory):
raise ValueError("Provided keyword argument 'executor_factory'"
" must be callable")
self._executor_factory = executor_factory
```
#### File: action_engine/actions/retry.py
```python
from taskflow.engines.action_engine.actions import base
from taskflow import retry as retry_atom
from taskflow import states
from taskflow.types import failure
class RetryAction(base.Action):
"""An action that handles executing, state changes, ... of retry atoms."""
def __init__(self, storage, notifier, retry_executor):
super(RetryAction, self).__init__(storage, notifier)
self._retry_executor = retry_executor
def _get_retry_args(self, retry, revert=False, addons=None):
if revert:
arguments = self._storage.fetch_mapped_args(
retry.revert_rebind,
atom_name=retry.name,
optional_args=retry.revert_optional
)
else:
arguments = self._storage.fetch_mapped_args(
retry.rebind,
atom_name=retry.name,
optional_args=retry.optional
)
history = self._storage.get_retry_history(retry.name)
arguments[retry_atom.EXECUTE_REVERT_HISTORY] = history
if addons:
arguments.update(addons)
return arguments
def change_state(self, retry, state, result=base.Action.NO_RESULT):
old_state = self._storage.get_atom_state(retry.name)
if state in self.SAVE_RESULT_STATES:
save_result = None
if result is not self.NO_RESULT:
save_result = result
self._storage.save(retry.name, save_result, state)
# TODO(harlowja): combine this with the save to avoid a call
# back into the persistence layer...
if state == states.REVERTED:
self._storage.cleanup_retry_history(retry.name, state)
else:
if state == old_state:
# NOTE(imelnikov): nothing really changed, so we should not
# write anything to storage and run notifications.
return
self._storage.set_atom_state(retry.name, state)
retry_uuid = self._storage.get_atom_uuid(retry.name)
details = {
'retry_name': retry.name,
'retry_uuid': retry_uuid,
'old_state': old_state,
}
if result is not self.NO_RESULT:
details['result'] = result
self._notifier.notify(state, details)
def schedule_execution(self, retry):
self.change_state(retry, states.RUNNING)
return self._retry_executor.execute_retry(
retry, self._get_retry_args(retry))
def complete_reversion(self, retry, result):
if isinstance(result, failure.Failure):
self.change_state(retry, states.REVERT_FAILURE, result=result)
else:
self.change_state(retry, states.REVERTED, result=result)
def complete_execution(self, retry, result):
if isinstance(result, failure.Failure):
self.change_state(retry, states.FAILURE, result=result)
else:
self.change_state(retry, states.SUCCESS, result=result)
def schedule_reversion(self, retry):
self.change_state(retry, states.REVERTING)
arg_addons = {
retry_atom.REVERT_FLOW_FAILURES: self._storage.get_failures(),
}
return self._retry_executor.revert_retry(
retry, self._get_retry_args(retry, addons=arg_addons, revert=True))
def on_failure(self, retry, atom, last_failure):
self._storage.save_retry_failure(retry.name, atom.name, last_failure)
arguments = self._get_retry_args(retry)
return retry.on_failure(**arguments)
```
#### File: engines/action_engine/traversal.py
```python
import collections
import enum
from taskflow.engines.action_engine import compiler as co
class Direction(enum.Enum):
"""Traversal direction enum."""
#: Go through successors.
FORWARD = 1
#: Go through predecessors.
BACKWARD = 2
def _extract_connectors(execution_graph, starting_node, direction,
through_flows=True, through_retries=True,
through_tasks=True):
if direction == Direction.FORWARD:
connected_iter = execution_graph.successors
else:
connected_iter = execution_graph.predecessors
connected_to_functors = {}
if through_flows:
connected_to_functors[co.FLOW] = connected_iter
connected_to_functors[co.FLOW_END] = connected_iter
if through_retries:
connected_to_functors[co.RETRY] = connected_iter
if through_tasks:
connected_to_functors[co.TASK] = connected_iter
return connected_iter(starting_node), connected_to_functors
def breadth_first_iterate(execution_graph, starting_node, direction,
through_flows=True, through_retries=True,
through_tasks=True):
"""Iterates connected nodes in execution graph (from starting node).
Does so in a breadth first manner.
Jumps over nodes with ``noop`` attribute (does not yield them back).
"""
initial_nodes_iter, connected_to_functors = _extract_connectors(
execution_graph, starting_node, direction,
through_flows=through_flows, through_retries=through_retries,
through_tasks=through_tasks)
q = collections.deque(initial_nodes_iter)
while q:
node = q.popleft()
node_attrs = execution_graph.nodes[node]
if not node_attrs.get('noop'):
yield node
try:
node_kind = node_attrs['kind']
connected_to_functor = connected_to_functors[node_kind]
except KeyError:
pass
else:
q.extend(connected_to_functor(node))
def depth_first_iterate(execution_graph, starting_node, direction,
through_flows=True, through_retries=True,
through_tasks=True):
"""Iterates connected nodes in execution graph (from starting node).
Does so in a depth first manner.
Jumps over nodes with ``noop`` attribute (does not yield them back).
"""
initial_nodes_iter, connected_to_functors = _extract_connectors(
execution_graph, starting_node, direction,
through_flows=through_flows, through_retries=through_retries,
through_tasks=through_tasks)
stack = list(initial_nodes_iter)
while stack:
node = stack.pop()
node_attrs = execution_graph.nodes[node]
if not node_attrs.get('noop'):
yield node
try:
node_kind = node_attrs['kind']
connected_to_functor = connected_to_functors[node_kind]
except KeyError:
pass
else:
stack.extend(connected_to_functor(node))
def depth_first_reverse_iterate(node, start_from_idx=-1):
"""Iterates connected (in reverse) **tree** nodes (from starting node).
Jumps through nodes with ``noop`` attribute (does not yield them back).
"""
# Always go left to right, since right to left is the pattern order
# and we want to go backwards and not forwards through that ordering...
if start_from_idx == -1:
# All of them...
children_iter = node.reverse_iter()
else:
children_iter = reversed(node[0:start_from_idx])
for child in children_iter:
if child.metadata.get('noop'):
# Jump through these...
for grand_child in child.dfs_iter(right_to_left=False):
if grand_child.metadata['kind'] in co.ATOMS:
yield grand_child.item
else:
yield child.item
```
#### File: taskflow/engines/helpers.py
```python
import contextlib
from oslo_utils import importutils
from oslo_utils import reflection
import six
import stevedore.driver
from taskflow import exceptions as exc
from taskflow import logging
from taskflow.persistence import backends as p_backends
from taskflow.utils import misc
from taskflow.utils import persistence_utils as p_utils
LOG = logging.getLogger(__name__)
# NOTE(imelnikov): this is the entrypoint namespace, not the module namespace.
ENGINES_NAMESPACE = 'taskflow.engines'
# The default entrypoint engine type looked for when it is not provided.
ENGINE_DEFAULT = 'default'
def _extract_engine(engine, **kwargs):
"""Extracts the engine kind and any associated options."""
kind = engine
if not kind:
kind = ENGINE_DEFAULT
# See if it's a URI and if so, extract any further options...
options = {}
try:
uri = misc.parse_uri(kind)
except (TypeError, ValueError):
pass
else:
kind = uri.scheme
options = misc.merge_uri(uri, options.copy())
# Merge in any leftover **kwargs into the options, this makes it so
# that the provided **kwargs override any URI/engine specific
# options.
options.update(kwargs)
return (kind, options)
def _fetch_factory(factory_name):
try:
return importutils.import_class(factory_name)
except (ImportError, ValueError) as e:
raise ImportError("Could not import factory %r: %s"
% (factory_name, e))
def _fetch_validate_factory(flow_factory):
if isinstance(flow_factory, six.string_types):
factory_fun = _fetch_factory(flow_factory)
factory_name = flow_factory
else:
factory_fun = flow_factory
factory_name = reflection.get_callable_name(flow_factory)
try:
reimported = _fetch_factory(factory_name)
assert reimported == factory_fun
except (ImportError, AssertionError):
raise ValueError('Flow factory %r is not reimportable by name %s'
% (factory_fun, factory_name))
return (factory_name, factory_fun)
def load(flow, store=None, flow_detail=None, book=None,
backend=None, namespace=ENGINES_NAMESPACE,
engine=ENGINE_DEFAULT, **kwargs):
"""Load a flow into an engine.
This function creates and prepares an engine to run the provided flow. All
that is left after this returns is to run the engine with the
engines :py:meth:`~taskflow.engines.base.Engine.run` method.
Which engine to load is specified via the ``engine`` parameter. It
can be a string that names the engine type to use, or a string that
is a URI with a scheme that names the engine type to use and further
options contained in the URI's host, port, and query parameters...
Which storage backend to use is defined by the backend parameter. It
can be backend itself, or a dictionary that is passed to
:py:func:`~taskflow.persistence.backends.fetch` to obtain a
viable backend.
:param flow: flow to load
:param store: dict -- data to put to storage to satisfy flow requirements
:param flow_detail: FlowDetail that holds the state of the flow (if one is
not provided then one will be created for you in the provided backend)
:param book: LogBook to create flow detail in if flow_detail is None
:param backend: storage backend to use or configuration that defines it
:param namespace: driver namespace for stevedore (or empty for default)
:param engine: string engine type or URI string with scheme that contains
the engine type and any URI specific components that will
become part of the engine options.
:param kwargs: arbitrary keyword arguments passed as options (merged with
any extracted ``engine``), typically used for any engine
specific options that do not fit as any of the
existing arguments.
:returns: engine
"""
kind, options = _extract_engine(engine, **kwargs)
if isinstance(backend, dict):
backend = p_backends.fetch(backend)
if flow_detail is None:
flow_detail = p_utils.create_flow_detail(flow, book=book,
backend=backend)
LOG.debug('Looking for %r engine driver in %r', kind, namespace)
try:
mgr = stevedore.driver.DriverManager(
namespace, kind,
invoke_on_load=True,
invoke_args=(flow, flow_detail, backend, options))
engine = mgr.driver
except RuntimeError as e:
raise exc.NotFound("Could not find engine '%s'" % (kind), e)
else:
if store:
engine.storage.inject(store)
return engine
def run(flow, store=None, flow_detail=None, book=None,
backend=None, namespace=ENGINES_NAMESPACE,
engine=ENGINE_DEFAULT, **kwargs):
"""Run the flow.
This function loads the flow into an engine (with the :func:`load() <load>`
function) and runs the engine.
The arguments are interpreted as for :func:`load() <load>`.
:returns: dictionary of all named
results (see :py:meth:`~.taskflow.storage.Storage.fetch_all`)
"""
engine = load(flow, store=store, flow_detail=flow_detail, book=book,
backend=backend, namespace=namespace,
engine=engine, **kwargs)
engine.run()
return engine.storage.fetch_all()
def save_factory_details(flow_detail,
flow_factory, factory_args, factory_kwargs,
backend=None):
"""Saves the given factories reimportable attributes into the flow detail.
This function saves the factory name, arguments, and keyword arguments
into the given flow details object and if a backend is provided it will
also ensure that the backend saves the flow details after being updated.
:param flow_detail: FlowDetail that holds state of the flow to load
:param flow_factory: function or string: function that creates the flow
:param factory_args: list or tuple of factory positional arguments
:param factory_kwargs: dict of factory keyword arguments
:param backend: storage backend to use or configuration
"""
if not factory_args:
factory_args = []
if not factory_kwargs:
factory_kwargs = {}
factory_name, _factory_fun = _fetch_validate_factory(flow_factory)
factory_data = {
'factory': {
'name': factory_name,
'args': factory_args,
'kwargs': factory_kwargs,
},
}
if not flow_detail.meta:
flow_detail.meta = factory_data
else:
flow_detail.meta.update(factory_data)
if backend is not None:
if isinstance(backend, dict):
backend = p_backends.fetch(backend)
with contextlib.closing(backend.get_connection()) as conn:
conn.update_flow_details(flow_detail)
def load_from_factory(flow_factory, factory_args=None, factory_kwargs=None,
store=None, book=None, backend=None,
namespace=ENGINES_NAMESPACE, engine=ENGINE_DEFAULT,
**kwargs):
"""Loads a flow from a factory function into an engine.
Gets flow factory function (or name of it) and creates flow with
it. Then, the flow is loaded into an engine with the :func:`load() <load>`
function, and the factory function fully qualified name is saved to flow
metadata so that it can be later resumed.
:param flow_factory: function or string: function that creates the flow
:param factory_args: list or tuple of factory positional arguments
:param factory_kwargs: dict of factory keyword arguments
Further arguments are interpreted as for :func:`load() <load>`.
:returns: engine
"""
_factory_name, factory_fun = _fetch_validate_factory(flow_factory)
if not factory_args:
factory_args = []
if not factory_kwargs:
factory_kwargs = {}
flow = factory_fun(*factory_args, **factory_kwargs)
if isinstance(backend, dict):
backend = p_backends.fetch(backend)
flow_detail = p_utils.create_flow_detail(flow, book=book, backend=backend)
save_factory_details(flow_detail,
flow_factory, factory_args, factory_kwargs,
backend=backend)
return load(flow=flow, store=store, flow_detail=flow_detail, book=book,
backend=backend, namespace=namespace,
engine=engine, **kwargs)
def flow_from_detail(flow_detail):
"""Reloads a flow previously saved.
Gets the flow factories name and any arguments and keyword arguments from
the flow details metadata, and then calls that factory to recreate the
flow.
:param flow_detail: FlowDetail that holds state of the flow to load
"""
try:
factory_data = flow_detail.meta['factory']
except (KeyError, AttributeError, TypeError):
raise ValueError('Cannot reconstruct flow %s %s: '
'no factory information saved.'
% (flow_detail.name, flow_detail.uuid))
try:
factory_fun = _fetch_factory(factory_data['name'])
except (KeyError, ImportError):
raise ImportError('Could not import factory for flow %s %s'
% (flow_detail.name, flow_detail.uuid))
args = factory_data.get('args', ())
kwargs = factory_data.get('kwargs', {})
return factory_fun(*args, **kwargs)
def load_from_detail(flow_detail, store=None, backend=None,
namespace=ENGINES_NAMESPACE, engine=ENGINE_DEFAULT,
**kwargs):
"""Reloads an engine previously saved.
This reloads the flow using the
:func:`flow_from_detail() <flow_from_detail>` function and then calls
into the :func:`load() <load>` function to create an engine from that flow.
:param flow_detail: FlowDetail that holds state of the flow to load
Further arguments are interpreted as for :func:`load() <load>`.
:returns: engine
"""
flow = flow_from_detail(flow_detail)
return load(flow, flow_detail=flow_detail,
store=store, backend=backend,
namespace=namespace, engine=engine, **kwargs)
```
#### File: taskflow/examples/dump_memory_backend.py
```python
import logging
import os
import sys
logging.basicConfig(level=logging.ERROR)
self_dir = os.path.abspath(os.path.dirname(__file__))
top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir,
os.pardir))
sys.path.insert(0, top_dir)
sys.path.insert(0, self_dir)
from taskflow import engines
from taskflow.patterns import linear_flow as lf
from taskflow import task
# INTRO: in this example we create a dummy flow with a dummy task, and run
# it using a in-memory backend and pre/post run we dump out the contents
# of the in-memory backends tree structure (which can be quite useful to
# look at for debugging or other analysis).
class PrintTask(task.Task):
def execute(self):
print("Running '%s'" % self.name)
# Make a little flow and run it...
f = lf.Flow('root')
for alpha in ['a', 'b', 'c']:
f.add(PrintTask(alpha))
e = engines.load(f)
e.compile()
e.prepare()
# After prepare the storage layer + backend can now be accessed safely...
backend = e.storage.backend
print("----------")
print("Before run")
print("----------")
print(backend.memory.pformat())
print("----------")
e.run()
print("---------")
print("After run")
print("---------")
for path in backend.memory.ls_r(backend.memory.root_path, absolute=True):
value = backend.memory[path]
if value:
print("%s -> %s" % (path, value))
else:
print("%s" % (path))
```
#### File: taskflow/examples/echo_listener.py
```python
import logging
import os
import sys
logging.basicConfig(level=logging.DEBUG)
top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir,
os.pardir))
sys.path.insert(0, top_dir)
from taskflow import engines
from taskflow.listeners import logging as logging_listener
from taskflow.patterns import linear_flow as lf
from taskflow import task
# INTRO: This example walks through a miniature workflow which will do a
# simple echo operation; during this execution a listener is associated with
# the engine to receive all notifications about what the flow has performed,
# this example dumps that output to the stdout for viewing (at debug level
# to show all the information which is possible).
class Echo(task.Task):
def execute(self):
print(self.name)
# Generate the work to be done (but don't do it yet).
wf = lf.Flow('abc')
wf.add(Echo('a'))
wf.add(Echo('b'))
wf.add(Echo('c'))
# This will associate the listener with the engine (the listener
# will automatically register for notifications with the engine and deregister
# when the context is exited).
e = engines.load(wf)
with logging_listener.DynamicLoggingListener(e):
e.run()
```
#### File: taskflow/examples/jobboard_produce_consume_colors.py
```python
import collections
import contextlib
import logging
import os
import random
import sys
import threading
import time
logging.basicConfig(level=logging.ERROR)
top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir,
os.pardir))
sys.path.insert(0, top_dir)
import six
from six.moves import range as compat_range
from zake import fake_client
from taskflow import exceptions as excp
from taskflow.jobs import backends
from taskflow.utils import threading_utils
# In this example we show how a jobboard can be used to post work for other
# entities to work on. This example creates a set of jobs using one producer
# thread (typically this would be split across many machines) and then having
# other worker threads with their own jobboards select work using a given
# filters [red/blue] and then perform that work (and consuming or abandoning
# the job after it has been completed or failed).
# Things to note:
# - No persistence layer is used (or logbook), just the job details are used
# to determine if a job should be selected by a worker or not.
# - This example runs in a single process (this is expected to be atypical
# but this example shows that it can be done if needed, for testing...)
# - The iterjobs(), claim(), consume()/abandon() worker workflow.
# - The post() producer workflow.
SHARED_CONF = {
'path': "/taskflow/jobs",
'board': 'zookeeper',
}
# How many workers and producers of work will be created (as threads).
PRODUCERS = 3
WORKERS = 5
# How many units of work each producer will create.
PRODUCER_UNITS = 10
# How many units of work are expected to be produced (used so workers can
# know when to stop running and shutdown, typically this would not be a
# a value but we have to limit this example's execution time to be less than
# infinity).
EXPECTED_UNITS = PRODUCER_UNITS * PRODUCERS
# Delay between producing/consuming more work.
WORKER_DELAY, PRODUCER_DELAY = (0.5, 0.5)
# To ensure threads don't trample other threads output.
STDOUT_LOCK = threading.Lock()
def dispatch_work(job):
# This is where the jobs contained work *would* be done
time.sleep(1.0)
def safe_print(name, message, prefix=""):
with STDOUT_LOCK:
if prefix:
print("%s %s: %s" % (prefix, name, message))
else:
print("%s: %s" % (name, message))
def worker(ident, client, consumed):
# Create a personal board (using the same client so that it works in
# the same process) and start looking for jobs on the board that we want
# to perform.
name = "W-%s" % (ident)
safe_print(name, "started")
claimed_jobs = 0
consumed_jobs = 0
abandoned_jobs = 0
with backends.backend(name, SHARED_CONF.copy(), client=client) as board:
while len(consumed) != EXPECTED_UNITS:
favorite_color = random.choice(['blue', 'red'])
for job in board.iterjobs(ensure_fresh=True, only_unclaimed=True):
# See if we should even bother with it...
if job.details.get('color') != favorite_color:
continue
safe_print(name, "'%s' [attempting claim]" % (job))
try:
board.claim(job, name)
claimed_jobs += 1
safe_print(name, "'%s' [claimed]" % (job))
except (excp.NotFound, excp.UnclaimableJob):
safe_print(name, "'%s' [claim unsuccessful]" % (job))
else:
try:
dispatch_work(job)
board.consume(job, name)
safe_print(name, "'%s' [consumed]" % (job))
consumed_jobs += 1
consumed.append(job)
except Exception:
board.abandon(job, name)
abandoned_jobs += 1
safe_print(name, "'%s' [abandoned]" % (job))
time.sleep(WORKER_DELAY)
safe_print(name,
"finished (claimed %s jobs, consumed %s jobs,"
" abandoned %s jobs)" % (claimed_jobs, consumed_jobs,
abandoned_jobs), prefix=">>>")
def producer(ident, client):
# Create a personal board (using the same client so that it works in
# the same process) and start posting jobs on the board that we want
# some entity to perform.
name = "P-%s" % (ident)
safe_print(name, "started")
with backends.backend(name, SHARED_CONF.copy(), client=client) as board:
for i in compat_range(0, PRODUCER_UNITS):
job_name = "%s-%s" % (name, i)
details = {
'color': random.choice(['red', 'blue']),
}
job = board.post(job_name, book=None, details=details)
safe_print(name, "'%s' [posted]" % (job))
time.sleep(PRODUCER_DELAY)
safe_print(name, "finished", prefix=">>>")
def main():
if six.PY3:
# TODO(harlowja): Hack to make eventlet work right, remove when the
# following is fixed: https://github.com/eventlet/eventlet/issues/230
from taskflow.utils import eventlet_utils as _eu # noqa
try:
import eventlet as _eventlet # noqa
except ImportError:
pass
with contextlib.closing(fake_client.FakeClient()) as c:
created = []
for i in compat_range(0, PRODUCERS):
p = threading_utils.daemon_thread(producer, i + 1, c)
created.append(p)
p.start()
consumed = collections.deque()
for i in compat_range(0, WORKERS):
w = threading_utils.daemon_thread(worker, i + 1, c, consumed)
created.append(w)
w.start()
while created:
t = created.pop()
t.join()
# At the end there should be nothing leftover, let's verify that.
board = backends.fetch('verifier', SHARED_CONF.copy(), client=c)
board.connect()
with contextlib.closing(board):
if board.job_count != 0 or len(consumed) != EXPECTED_UNITS:
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
```
#### File: taskflow/examples/wbe_event_sender.py
```python
import logging
import os
import string
import sys
import time
top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir,
os.pardir))
sys.path.insert(0, top_dir)
from six.moves import range as compat_range
from taskflow import engines
from taskflow.engines.worker_based import worker
from taskflow.patterns import linear_flow as lf
from taskflow import task
from taskflow.types import notifier
from taskflow.utils import threading_utils
ANY = notifier.Notifier.ANY
# INTRO: These examples show how to use a remote worker's event notification
# attribute to proxy back task event notifications to the controlling process.
#
# In this case a simple set of events is triggered by a worker running a
# task (simulated to be remote by using a kombu memory transport and threads).
# Those events that the 'remote worker' produces will then be proxied back to
# the task that the engine is running 'remotely', and then they will be emitted
# back to the original callbacks that exist in the originating engine
# process/thread. This creates a one-way *notification* channel that can
# transparently be used in-process, outside-of-process using remote workers and
# so-on that allows tasks to signal to its controlling process some sort of
# action that has occurred that the task may need to tell others about (for
# example to trigger some type of response when the task reaches 50% done...).
def event_receiver(event_type, details):
"""This is the callback that (in this example) doesn't do much..."""
print("Recieved event '%s'" % event_type)
print("Details = %s" % details)
class EventReporter(task.Task):
"""This is the task that will be running 'remotely' (not really remote)."""
EVENTS = tuple(string.ascii_uppercase)
EVENT_DELAY = 0.1
def execute(self):
for i, e in enumerate(self.EVENTS):
details = {
'leftover': self.EVENTS[i:],
}
self.notifier.notify(e, details)
time.sleep(self.EVENT_DELAY)
BASE_SHARED_CONF = {
'exchange': 'taskflow',
'transport': 'memory',
'transport_options': {
'polling_interval': 0.1,
},
}
# Until https://github.com/celery/kombu/issues/398 is resolved it is not
# recommended to run many worker threads in this example due to the types
# of errors mentioned in that issue.
MEMORY_WORKERS = 1
WORKER_CONF = {
'tasks': [
# Used to locate which tasks we can run (we don't want to allow
# arbitrary code/tasks to be ran by any worker since that would
# open up a variety of vulnerabilities).
'%s:EventReporter' % (__name__),
],
}
def run(engine_options):
reporter = EventReporter()
reporter.notifier.register(ANY, event_receiver)
flow = lf.Flow('event-reporter').add(reporter)
eng = engines.load(flow, engine='worker-based', **engine_options)
eng.run()
if __name__ == "__main__":
logging.basicConfig(level=logging.ERROR)
# Setup our transport configuration and merge it into the worker and
# engine configuration so that both of those objects use it correctly.
worker_conf = dict(WORKER_CONF)
worker_conf.update(BASE_SHARED_CONF)
engine_options = dict(BASE_SHARED_CONF)
workers = []
# These topics will be used to request worker information on; those
# workers will respond with their capabilities which the executing engine
# will use to match pending tasks to a matched worker, this will cause
# the task to be sent for execution, and the engine will wait until it
# is finished (a response is received) and then the engine will either
# continue with other tasks, do some retry/failure resolution logic or
# stop (and potentially re-raise the remote workers failure)...
worker_topics = []
try:
# Create a set of worker threads to simulate actual remote workers...
print('Running %s workers.' % (MEMORY_WORKERS))
for i in compat_range(0, MEMORY_WORKERS):
# Give each one its own unique topic name so that they can
# correctly communicate with the engine (they will all share the
# same exchange).
worker_conf['topic'] = 'worker-%s' % (i + 1)
worker_topics.append(worker_conf['topic'])
w = worker.Worker(**worker_conf)
runner = threading_utils.daemon_thread(w.run)
runner.start()
w.wait()
workers.append((runner, w.stop))
# Now use those workers to do something.
print('Executing some work.')
engine_options['topics'] = worker_topics
result = run(engine_options)
print('Execution finished.')
finally:
# And cleanup.
print('Stopping workers.')
while workers:
r, stopper = workers.pop()
stopper()
r.join()
```
#### File: taskflow/examples/wbe_mandelbrot.py
```python
import logging
import math
import os
import sys
top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir,
os.pardir))
sys.path.insert(0, top_dir)
from six.moves import range as compat_range
from taskflow import engines
from taskflow.engines.worker_based import worker
from taskflow.patterns import unordered_flow as uf
from taskflow import task
from taskflow.utils import threading_utils
# INTRO: This example walks through a workflow that will in parallel compute
# a mandelbrot result set (using X 'remote' workers) and then combine their
# results together to form a final mandelbrot fractal image. It shows a usage
# of taskflow to perform a well-known embarrassingly parallel problem that has
# the added benefit of also being an elegant visualization.
#
# NOTE(harlowja): this example simulates the expected larger number of workers
# by using a set of threads (which in this example simulate the remote workers
# that would typically be running on other external machines).
#
# NOTE(harlowja): to have it produce an image run (after installing pillow):
#
# $ python taskflow/examples/wbe_mandelbrot.py output.png
BASE_SHARED_CONF = {
'exchange': 'taskflow',
}
WORKERS = 2
WORKER_CONF = {
# These are the tasks the worker can execute, they *must* be importable,
# typically this list is used to restrict what workers may execute to
# a smaller set of *allowed* tasks that are known to be safe (one would
# not want to allow all python code to be executed).
'tasks': [
'%s:MandelCalculator' % (__name__),
],
}
ENGINE_CONF = {
'engine': 'worker-based',
}
# Mandelbrot & image settings...
IMAGE_SIZE = (512, 512)
CHUNK_COUNT = 8
MAX_ITERATIONS = 25
class MandelCalculator(task.Task):
def execute(self, image_config, mandelbrot_config, chunk):
"""Returns the number of iterations before the computation "escapes".
Given the real and imaginary parts of a complex number, determine if it
is a candidate for membership in the mandelbrot set given a fixed
number of iterations.
"""
# Parts borrowed from (credit to <NAME> and <NAME>).
#
# http://nbviewer.ipython.org/gist/harrism/f5707335f40af9463c43
def mandelbrot(x, y, max_iters):
c = complex(x, y)
z = 0.0j
for i in compat_range(max_iters):
z = z * z + c
if (z.real * z.real + z.imag * z.imag) >= 4:
return i
return max_iters
min_x, max_x, min_y, max_y, max_iters = mandelbrot_config
height, width = image_config['size']
pixel_size_x = (max_x - min_x) / width
pixel_size_y = (max_y - min_y) / height
block = []
for y in compat_range(chunk[0], chunk[1]):
row = []
imag = min_y + y * pixel_size_y
for x in compat_range(0, width):
real = min_x + x * pixel_size_x
row.append(mandelbrot(real, imag, max_iters))
block.append(row)
return block
def calculate(engine_conf):
# Subdivide the work into X pieces, then request each worker to calculate
# one of those chunks and then later we will write these chunks out to
# an image bitmap file.
# And unordered flow is used here since the mandelbrot calculation is an
# example of an embarrassingly parallel computation that we can scatter
# across as many workers as possible.
flow = uf.Flow("mandelbrot")
# These symbols will be automatically given to tasks as input to their
# execute method, in this case these are constants used in the mandelbrot
# calculation.
store = {
'mandelbrot_config': [-2.0, 1.0, -1.0, 1.0, MAX_ITERATIONS],
'image_config': {
'size': IMAGE_SIZE,
}
}
# We need the task names to be in the right order so that we can extract
# the final results in the right order (we don't care about the order when
# executing).
task_names = []
# Compose our workflow.
height, _width = IMAGE_SIZE
chunk_size = int(math.ceil(height / float(CHUNK_COUNT)))
for i in compat_range(0, CHUNK_COUNT):
chunk_name = 'chunk_%s' % i
task_name = "calculation_%s" % i
# Break the calculation up into chunk size pieces.
rows = [i * chunk_size, i * chunk_size + chunk_size]
flow.add(
MandelCalculator(task_name,
# This ensures the storage symbol with name
# 'chunk_name' is sent into the tasks local
# symbol 'chunk'. This is how we give each
# calculator its own correct sequence of rows
# to work on.
rebind={'chunk': chunk_name}))
store[chunk_name] = rows
task_names.append(task_name)
# Now execute it.
eng = engines.load(flow, store=store, engine_conf=engine_conf)
eng.run()
# Gather all the results and order them for further processing.
gather = []
for name in task_names:
gather.extend(eng.storage.get(name))
points = []
for y, row in enumerate(gather):
for x, color in enumerate(row):
points.append(((x, y), color))
return points
def write_image(results, output_filename=None):
print("Gathered %s results that represents a mandelbrot"
" image (using %s chunks that are computed jointly"
" by %s workers)." % (len(results), CHUNK_COUNT, WORKERS))
if not output_filename:
return
# Pillow (the PIL fork) saves us from writing our own image writer...
try:
from PIL import Image
except ImportError as e:
# To currently get this (may change in the future),
# $ pip install Pillow
raise RuntimeError("Pillow is required to write image files: %s" % e)
# Limit to 255, find the max and normalize to that...
color_max = 0
for _point, color in results:
color_max = max(color, color_max)
# Use gray scale since we don't really have other colors.
img = Image.new('L', IMAGE_SIZE, "black")
pixels = img.load()
for (x, y), color in results:
if color_max == 0:
color = 0
else:
color = int((float(color) / color_max) * 255.0)
pixels[x, y] = color
img.save(output_filename)
def create_fractal():
logging.basicConfig(level=logging.ERROR)
# Setup our transport configuration and merge it into the worker and
# engine configuration so that both of those use it correctly.
shared_conf = dict(BASE_SHARED_CONF)
shared_conf.update({
'transport': 'memory',
'transport_options': {
'polling_interval': 0.1,
},
})
if len(sys.argv) >= 2:
output_filename = sys.argv[1]
else:
output_filename = None
worker_conf = dict(WORKER_CONF)
worker_conf.update(shared_conf)
engine_conf = dict(ENGINE_CONF)
engine_conf.update(shared_conf)
workers = []
worker_topics = []
print('Calculating your mandelbrot fractal of size %sx%s.' % IMAGE_SIZE)
try:
# Create a set of workers to simulate actual remote workers.
print('Running %s workers.' % (WORKERS))
for i in compat_range(0, WORKERS):
worker_conf['topic'] = 'calculator_%s' % (i + 1)
worker_topics.append(worker_conf['topic'])
w = worker.Worker(**worker_conf)
runner = threading_utils.daemon_thread(w.run)
runner.start()
w.wait()
workers.append((runner, w.stop))
# Now use those workers to do something.
engine_conf['topics'] = worker_topics
results = calculate(engine_conf)
print('Execution finished.')
finally:
# And cleanup.
print('Stopping workers.')
while workers:
r, stopper = workers.pop()
stopper()
r.join()
print("Writing image...")
write_image(results, output_filename=output_filename)
if __name__ == "__main__":
create_fractal()
```
#### File: taskflow/patterns/linear_flow.py
```python
from taskflow import flow
from taskflow.types import graph as gr
class Flow(flow.Flow):
"""Linear flow pattern.
A linear (potentially nested) flow of *tasks/flows* that can be
applied in order as one unit and rolled back as one unit using
the reverse order that the *tasks/flows* have been applied in.
"""
_no_last_item = object()
"""Sentinel object used to denote no last item has been assigned.
This is used to track no last item being added, since at creation there
is no last item, but since the :meth:`.add` routine can take any object
including none, we have to use a different object to be able to
distinguish the lack of any last item...
"""
def __init__(self, name, retry=None):
super(Flow, self).__init__(name, retry)
self._graph = gr.OrderedDiGraph(name=name)
self._last_item = self._no_last_item
def add(self, *items):
"""Adds a given task/tasks/flow/flows to this flow."""
for item in items:
if not self._graph.has_node(item):
self._graph.add_node(item)
if self._last_item is not self._no_last_item:
self._graph.add_edge(self._last_item, item,
attr_dict={flow.LINK_INVARIANT: True})
self._last_item = item
return self
def __len__(self):
return len(self._graph)
def __iter__(self):
for item in self._graph.nodes:
yield item
@property
def requires(self):
requires = set()
prior_provides = set()
if self._retry is not None:
requires.update(self._retry.requires)
prior_provides.update(self._retry.provides)
for item in self:
requires.update(item.requires - prior_provides)
prior_provides.update(item.provides)
return frozenset(requires)
def iter_nodes(self):
for (n, n_data) in self._graph.nodes(data=True):
yield (n, n_data)
def iter_links(self):
for (u, v, e_data) in self._graph.edges(data=True):
yield (u, v, e_data)
```
#### File: unit/jobs/test_redis_job.py
```python
import time
from oslo_utils import uuidutils
import six
import testtools
from taskflow import exceptions as excp
from taskflow.jobs.backends import impl_redis
from taskflow import states
from taskflow import test
from taskflow.tests.unit.jobs import base
from taskflow.tests import utils as test_utils
from taskflow.utils import persistence_utils as p_utils
from taskflow.utils import redis_utils as ru
REDIS_AVAILABLE = test_utils.redis_available(
impl_redis.RedisJobBoard.MIN_REDIS_VERSION)
@testtools.skipIf(not REDIS_AVAILABLE, 'redis is not available')
class RedisJobboardTest(test.TestCase, base.BoardTestMixin):
def close_client(self, client):
client.close()
def create_board(self, persistence=None):
namespace = uuidutils.generate_uuid()
client = ru.RedisClient()
config = {
'namespace': six.b("taskflow-%s" % namespace),
}
kwargs = {
'client': client,
'persistence': persistence,
}
board = impl_redis.RedisJobBoard('test-board', config, **kwargs)
self.addCleanup(board.close)
self.addCleanup(self.close_client, client)
return (client, board)
def test_posting_claim_expiry(self):
with base.connect_close(self.board):
with self.flush(self.client):
self.board.post('test', p_utils.temporary_log_book())
self.assertEqual(1, self.board.job_count)
possible_jobs = list(self.board.iterjobs(only_unclaimed=True))
self.assertEqual(1, len(possible_jobs))
j = possible_jobs[0]
self.assertEqual(states.UNCLAIMED, j.state)
with self.flush(self.client):
self.board.claim(j, self.board.name, expiry=0.5)
self.assertEqual(self.board.name, self.board.find_owner(j))
self.assertEqual(states.CLAIMED, j.state)
time.sleep(0.6)
self.assertEqual(states.UNCLAIMED, j.state)
possible_jobs = list(self.board.iterjobs(only_unclaimed=True))
self.assertEqual(1, len(possible_jobs))
def test_posting_claim_same_owner(self):
with base.connect_close(self.board):
with self.flush(self.client):
self.board.post('test', p_utils.temporary_log_book())
self.assertEqual(1, self.board.job_count)
possible_jobs = list(self.board.iterjobs(only_unclaimed=True))
self.assertEqual(1, len(possible_jobs))
j = possible_jobs[0]
self.assertEqual(states.UNCLAIMED, j.state)
with self.flush(self.client):
self.board.claim(j, self.board.name)
possible_jobs = list(self.board.iterjobs())
self.assertEqual(1, len(possible_jobs))
with self.flush(self.client):
self.assertRaises(excp.UnclaimableJob, self.board.claim,
possible_jobs[0], self.board.name)
possible_jobs = list(self.board.iterjobs(only_unclaimed=True))
self.assertEqual(0, len(possible_jobs))
def setUp(self):
super(RedisJobboardTest, self).setUp()
self.client, self.board = self.create_board()
```
#### File: tests/unit/test_formatters.py
```python
from taskflow import engines
from taskflow import formatters
from taskflow.listeners import logging as logging_listener
from taskflow.patterns import linear_flow
from taskflow import states
from taskflow import test
from taskflow.test import mock
from taskflow.test import utils as test_utils
class FormattersTest(test.TestCase):
@staticmethod
def _broken_atom_matcher(node):
return node.item.name == 'Broken'
def _make_test_flow(self):
b = test_utils.TaskWithFailure("Broken")
h_1 = test_utils.ProgressingTask("Happy-1")
h_2 = test_utils.ProgressingTask("Happy-2")
flo = linear_flow.Flow("test")
flo.add(h_1, h_2, b)
return flo
def test_exc_info_format(self):
flo = self._make_test_flow()
e = engines.load(flo)
self.assertRaises(RuntimeError, e.run)
fails = e.storage.get_execute_failures()
self.assertEqual(1, len(fails))
self.assertIn('Broken', fails)
fail = fails['Broken']
f = formatters.FailureFormatter(e)
(exc_info, details) = f.format(fail, self._broken_atom_matcher)
self.assertEqual(3, len(exc_info))
self.assertEqual("", details)
@mock.patch('taskflow.formatters.FailureFormatter._format_node')
def test_exc_info_with_details_format(self, mock_format_node):
mock_format_node.return_value = 'A node'
flo = self._make_test_flow()
e = engines.load(flo)
self.assertRaises(RuntimeError, e.run)
fails = e.storage.get_execute_failures()
self.assertEqual(1, len(fails))
self.assertIn('Broken', fails)
fail = fails['Broken']
# Doing this allows the details to be shown...
e.storage.set_atom_intention("Broken", states.EXECUTE)
f = formatters.FailureFormatter(e)
(exc_info, details) = f.format(fail, self._broken_atom_matcher)
self.assertEqual(3, len(exc_info))
self.assertTrue(mock_format_node.called)
@mock.patch('taskflow.storage.Storage.get_execute_result')
def test_exc_info_with_details_format_hidden(self, mock_get_execute):
flo = self._make_test_flow()
e = engines.load(flo)
self.assertRaises(RuntimeError, e.run)
fails = e.storage.get_execute_failures()
self.assertEqual(1, len(fails))
self.assertIn('Broken', fails)
fail = fails['Broken']
# Doing this allows the details to be shown...
e.storage.set_atom_intention("Broken", states.EXECUTE)
hide_inputs_outputs_of = ['Broken', "Happy-1", "Happy-2"]
f = formatters.FailureFormatter(
e, hide_inputs_outputs_of=hide_inputs_outputs_of)
(exc_info, details) = f.format(fail, self._broken_atom_matcher)
self.assertEqual(3, len(exc_info))
self.assertFalse(mock_get_execute.called)
@mock.patch('taskflow.formatters.FailureFormatter._format_node')
def test_formatted_via_listener(self, mock_format_node):
mock_format_node.return_value = 'A node'
flo = self._make_test_flow()
e = engines.load(flo)
with logging_listener.DynamicLoggingListener(e):
self.assertRaises(RuntimeError, e.run)
self.assertTrue(mock_format_node.called)
```
#### File: taskflow/types/graph.py
```python
import collections
import os
import networkx as nx
from networkx.drawing import nx_pydot
import six
def _common_format(g, edge_notation):
lines = []
lines.append("Name: %s" % g.name)
lines.append("Type: %s" % type(g).__name__)
lines.append("Frozen: %s" % nx.is_frozen(g))
lines.append("Density: %0.3f" % nx.density(g))
lines.append("Nodes: %s" % g.number_of_nodes())
for n, n_data in g.nodes(data=True):
if n_data:
lines.append(" - %s (%s)" % (n, n_data))
else:
lines.append(" - %s" % n)
lines.append("Edges: %s" % g.number_of_edges())
for (u, v, e_data) in g.edges(data=True):
if e_data:
lines.append(" %s %s %s (%s)" % (u, edge_notation, v, e_data))
else:
lines.append(" %s %s %s" % (u, edge_notation, v))
return lines
class Graph(nx.Graph):
"""A graph subclass with useful utility functions."""
def __init__(self, incoming_graph_data=None, name=''):
super(Graph, self).__init__(incoming_graph_data=incoming_graph_data,
name=name)
self.frozen = False
def freeze(self):
"""Freezes the graph so that no more mutations can occur."""
if not self.frozen:
nx.freeze(self)
return self
def export_to_dot(self):
"""Exports the graph to a dot format (requires pydot library)."""
return nx_pydot.to_pydot(self).to_string()
def pformat(self):
"""Pretty formats your graph into a string."""
return os.linesep.join(_common_format(self, "<->"))
def add_edge(self, u, v, attr_dict=None, **attr):
"""Add an edge between u and v."""
if attr_dict is not None:
return super(Graph, self).add_edge(u, v, **attr_dict)
return super(Graph, self).add_edge(u, v, **attr)
def add_node(self, n, attr_dict=None, **attr):
"""Add a single node n and update node attributes."""
if attr_dict is not None:
return super(Graph, self).add_node(n, **attr_dict)
return super(Graph, self).add_node(n, **attr)
def fresh_copy(self):
"""Return a fresh copy graph with the same data structure.
A fresh copy has no nodes, edges or graph attributes. It is
the same data structure as the current graph. This method is
typically used to create an empty version of the graph.
"""
return Graph()
class DiGraph(nx.DiGraph):
"""A directed graph subclass with useful utility functions."""
def __init__(self, incoming_graph_data=None, name=''):
super(DiGraph, self).__init__(incoming_graph_data=incoming_graph_data,
name=name)
self.frozen = False
def freeze(self):
"""Freezes the graph so that no more mutations can occur."""
if not self.frozen:
nx.freeze(self)
return self
def get_edge_data(self, u, v, default=None):
"""Returns a *copy* of the edge attribute dictionary between (u, v).
NOTE(harlowja): this differs from the networkx get_edge_data() as that
function does not return a copy (but returns a reference to the actual
edge data).
"""
try:
return dict(self.adj[u][v])
except KeyError:
return default
def topological_sort(self):
"""Return a list of nodes in this graph in topological sort order."""
return nx.topological_sort(self)
def pformat(self):
"""Pretty formats your graph into a string.
This pretty formatted string representation includes many useful
details about your graph, including; name, type, frozeness, node count,
nodes, edge count, edges, graph density and graph cycles (if any).
"""
lines = _common_format(self, "->")
cycles = list(nx.cycles.recursive_simple_cycles(self))
lines.append("Cycles: %s" % len(cycles))
for cycle in cycles:
buf = six.StringIO()
buf.write("%s" % (cycle[0]))
for i in range(1, len(cycle)):
buf.write(" --> %s" % (cycle[i]))
buf.write(" --> %s" % (cycle[0]))
lines.append(" %s" % buf.getvalue())
return os.linesep.join(lines)
def export_to_dot(self):
"""Exports the graph to a dot format (requires pydot library)."""
return nx_pydot.to_pydot(self).to_string()
def is_directed_acyclic(self):
"""Returns if this graph is a DAG or not."""
return nx.is_directed_acyclic_graph(self)
def no_successors_iter(self):
"""Returns an iterator for all nodes with no successors."""
for n in self.nodes:
if not len(list(self.successors(n))):
yield n
def no_predecessors_iter(self):
"""Returns an iterator for all nodes with no predecessors."""
for n in self.nodes:
if not len(list(self.predecessors(n))):
yield n
def bfs_predecessors_iter(self, n):
"""Iterates breadth first over *all* predecessors of a given node.
This will go through the nodes predecessors, then the predecessor nodes
predecessors and so on until no more predecessors are found.
NOTE(harlowja): predecessor cycles (if they exist) will not be iterated
over more than once (this prevents infinite iteration).
"""
visited = set([n])
queue = collections.deque(self.predecessors(n))
while queue:
pred = queue.popleft()
if pred not in visited:
yield pred
visited.add(pred)
for pred_pred in self.predecessors(pred):
if pred_pred not in visited:
queue.append(pred_pred)
def add_edge(self, u, v, attr_dict=None, **attr):
"""Add an edge between u and v."""
if attr_dict is not None:
return super(DiGraph, self).add_edge(u, v, **attr_dict)
return super(DiGraph, self).add_edge(u, v, **attr)
def add_node(self, n, attr_dict=None, **attr):
"""Add a single node n and update node attributes."""
if attr_dict is not None:
return super(DiGraph, self).add_node(n, **attr_dict)
return super(DiGraph, self).add_node(n, **attr)
def fresh_copy(self):
"""Return a fresh copy graph with the same data structure.
A fresh copy has no nodes, edges or graph attributes. It is
the same data structure as the current graph. This method is
typically used to create an empty version of the graph.
"""
return DiGraph()
class OrderedDiGraph(DiGraph):
"""A directed graph subclass with useful utility functions.
This derivative retains node, edge, insertion and iteration
ordering (so that the iteration order matches the insertion
order).
"""
node_dict_factory = collections.OrderedDict
adjlist_outer_dict_factory = collections.OrderedDict
adjlist_inner_dict_factory = collections.OrderedDict
edge_attr_dict_factory = collections.OrderedDict
def fresh_copy(self):
"""Return a fresh copy graph with the same data structure.
A fresh copy has no nodes, edges or graph attributes. It is
the same data structure as the current graph. This method is
typically used to create an empty version of the graph.
"""
return OrderedDiGraph()
class OrderedGraph(Graph):
"""A graph subclass with useful utility functions.
This derivative retains node, edge, insertion and iteration
ordering (so that the iteration order matches the insertion
order).
"""
node_dict_factory = collections.OrderedDict
adjlist_outer_dict_factory = collections.OrderedDict
adjlist_inner_dict_factory = collections.OrderedDict
edge_attr_dict_factory = collections.OrderedDict
def fresh_copy(self):
"""Return a fresh copy graph with the same data structure.
A fresh copy has no nodes, edges or graph attributes. It is
the same data structure as the current graph. This method is
typically used to create an empty version of the graph.
"""
return OrderedGraph()
def merge_graphs(graph, *graphs, **kwargs):
"""Merges a bunch of graphs into a new graph.
If no additional graphs are provided the first graph is
returned unmodified otherwise the merged graph is returned.
"""
tmp_graph = graph
allow_overlaps = kwargs.get('allow_overlaps', False)
overlap_detector = kwargs.get('overlap_detector')
if overlap_detector is not None and not six.callable(overlap_detector):
raise ValueError("Overlap detection callback expected to be callable")
elif overlap_detector is None:
overlap_detector = (lambda to_graph, from_graph:
len(to_graph.subgraph(from_graph.nodes)))
for g in graphs:
# This should ensure that the nodes to be merged do not already exist
# in the graph that is to be merged into. This could be problematic if
# there are duplicates.
if not allow_overlaps:
# Attempt to induce a subgraph using the to be merged graphs nodes
# and see if any graph results.
overlaps = overlap_detector(graph, g)
if overlaps:
raise ValueError("Can not merge graph %s into %s since there "
"are %s overlapping nodes (and we do not "
"support merging nodes)" % (g, graph,
overlaps))
graph = nx.algorithms.compose(graph, g)
# Keep the first graphs name.
if graphs:
graph.name = tmp_graph.name
return graph
```
#### File: taskflow/types/tree.py
```python
import collections
import itertools
import os
import six
from taskflow.types import graph
from taskflow.utils import iter_utils
from taskflow.utils import misc
class FrozenNode(Exception):
"""Exception raised when a frozen node is modified."""
def __init__(self):
super(FrozenNode, self).__init__("Frozen node(s) can't be modified")
class _DFSIter(object):
"""Depth first iterator (non-recursive) over the child nodes."""
def __init__(self, root, include_self=False, right_to_left=True):
self.root = root
self.right_to_left = bool(right_to_left)
self.include_self = bool(include_self)
def __iter__(self):
stack = []
if self.include_self:
stack.append(self.root)
else:
if self.right_to_left:
stack.extend(self.root.reverse_iter())
else:
# Traverse the left nodes first to the right nodes.
stack.extend(iter(self.root))
while stack:
# Visit the node.
node = stack.pop()
yield node
if self.right_to_left:
stack.extend(node.reverse_iter())
else:
# Traverse the left nodes first to the right nodes.
stack.extend(iter(node))
class _BFSIter(object):
"""Breadth first iterator (non-recursive) over the child nodes."""
def __init__(self, root, include_self=False, right_to_left=False):
self.root = root
self.right_to_left = bool(right_to_left)
self.include_self = bool(include_self)
def __iter__(self):
q = collections.deque()
if self.include_self:
q.append(self.root)
else:
if self.right_to_left:
q.extend(iter(self.root))
else:
# Traverse the left nodes first to the right nodes.
q.extend(self.root.reverse_iter())
while q:
# Visit the node.
node = q.popleft()
yield node
if self.right_to_left:
q.extend(iter(node))
else:
# Traverse the left nodes first to the right nodes.
q.extend(node.reverse_iter())
class Node(object):
"""A n-ary node class that can be used to create tree structures."""
#: Default string prefix used in :py:meth:`.pformat`.
STARTING_PREFIX = ""
#: Default string used to create empty space used in :py:meth:`.pformat`.
EMPTY_SPACE_SEP = " "
HORIZONTAL_CONN = "__"
"""
Default string used to horizontally connect a node to its
parent (used in :py:meth:`.pformat`.).
"""
VERTICAL_CONN = "|"
"""
Default string used to vertically connect a node to its
parent (used in :py:meth:`.pformat`).
"""
#: Default line separator used in :py:meth:`.pformat`.
LINE_SEP = os.linesep
def __init__(self, item, **kwargs):
self.item = item
self.parent = None
self.metadata = dict(kwargs)
self.frozen = False
self._children = []
def freeze(self):
if not self.frozen:
# This will DFS until all children are frozen as well, only
# after that works do we freeze ourselves (this makes it so
# that we don't become frozen if a child node fails to perform
# the freeze operation).
for n in self:
n.freeze()
self.frozen = True
@misc.disallow_when_frozen(FrozenNode)
def add(self, child):
"""Adds a child to this node (appends to left of existing children).
NOTE(harlowja): this will also set the childs parent to be this node.
"""
child.parent = self
self._children.append(child)
def empty(self):
"""Returns if the node is a leaf node."""
return self.child_count() == 0
def path_iter(self, include_self=True):
"""Yields back the path from this node to the root node."""
if include_self:
node = self
else:
node = self.parent
while node is not None:
yield node
node = node.parent
def find_first_match(self, matcher, only_direct=False, include_self=True):
"""Finds the *first* node that matching callback returns true.
This will search not only this node but also any children nodes (in
depth first order, from right to left) and finally if nothing is
matched then ``None`` is returned instead of a node object.
:param matcher: callback that takes one positional argument (a node)
and returns true if it matches desired node or false
if not.
:param only_direct: only look at current node and its
direct children (implies that this does not
search using depth first).
:param include_self: include the current node during searching.
:returns: the node that matched (or ``None``)
"""
if only_direct:
if include_self:
it = itertools.chain([self], self.reverse_iter())
else:
it = self.reverse_iter()
else:
it = self.dfs_iter(include_self=include_self)
return iter_utils.find_first_match(it, matcher)
def find(self, item, only_direct=False, include_self=True):
"""Returns the *first* node for an item if it exists in this node.
This will search not only this node but also any children nodes (in
depth first order, from right to left) and finally if nothing is
matched then ``None`` is returned instead of a node object.
:param item: item to look for.
:param only_direct: only look at current node and its
direct children (implies that this does not
search using depth first).
:param include_self: include the current node during searching.
:returns: the node that matched provided item (or ``None``)
"""
return self.find_first_match(lambda n: n.item == item,
only_direct=only_direct,
include_self=include_self)
@misc.disallow_when_frozen(FrozenNode)
def disassociate(self):
"""Removes this node from its parent (if any).
:returns: occurrences of this node that were removed from its parent.
"""
occurrences = 0
if self.parent is not None:
p = self.parent
self.parent = None
# Remove all instances of this node from its parent.
while True:
try:
p._children.remove(self)
except ValueError:
break
else:
occurrences += 1
return occurrences
@misc.disallow_when_frozen(FrozenNode)
def remove(self, item, only_direct=False, include_self=True):
"""Removes a item from this nodes children.
This will search not only this node but also any children nodes and
finally if nothing is found then a value error is raised instead of
the normally returned *removed* node object.
:param item: item to lookup.
:param only_direct: only look at current node and its
direct children (implies that this does not
search using depth first).
:param include_self: include the current node during searching.
"""
node = self.find(item, only_direct=only_direct,
include_self=include_self)
if node is None:
raise ValueError("Item '%s' not found to remove" % item)
else:
node.disassociate()
return node
def __contains__(self, item):
"""Returns whether item exists in this node or this nodes children.
:returns: if the item exists in this node or nodes children,
true if the item exists, false otherwise
:rtype: boolean
"""
return self.find(item) is not None
def __getitem__(self, index):
# NOTE(harlowja): 0 is the right most index, len - 1 is the left most
return self._children[index]
def pformat(self, stringify_node=None,
linesep=LINE_SEP, vertical_conn=VERTICAL_CONN,
horizontal_conn=HORIZONTAL_CONN, empty_space=EMPTY_SPACE_SEP,
starting_prefix=STARTING_PREFIX):
"""Formats this node + children into a nice string representation.
**Example**::
>>> from taskflow.types import tree
>>> yahoo = tree.Node("CEO")
>>> yahoo.add(tree.Node("Infra"))
>>> yahoo[0].add(tree.Node("Boss"))
>>> yahoo[0][0].add(tree.Node("Me"))
>>> yahoo.add(tree.Node("Mobile"))
>>> yahoo.add(tree.Node("Mail"))
>>> print(yahoo.pformat())
CEO
|__Infra
| |__Boss
| |__Me
|__Mobile
|__Mail
"""
if stringify_node is None:
# Default to making a unicode string out of the nodes item...
stringify_node = lambda node: six.text_type(node.item)
expected_lines = self.child_count(only_direct=False) + 1
buff = six.StringIO()
conn = vertical_conn + horizontal_conn
stop_at_parent = self
for i, node in enumerate(self.dfs_iter(include_self=True), 1):
prefix = []
connected_to_parent = False
last_node = node
# Walk through *most* of this nodes parents, and form the expected
# prefix that each parent should require, repeat this until we
# hit the root node (self) and use that as our nodes prefix
# string...
parent_node_it = iter_utils.while_is_not(
node.path_iter(include_self=True), stop_at_parent)
for j, parent_node in enumerate(parent_node_it):
if parent_node is stop_at_parent:
if j > 0:
if not connected_to_parent:
prefix.append(conn)
connected_to_parent = True
else:
# If the node was connected already then it must
# have had more than one parent, so we want to put
# the right final starting prefix on (which may be
# a empty space or another vertical connector)...
last_node = self._children[-1]
m = last_node.find_first_match(lambda n: n is node,
include_self=False,
only_direct=False)
if m is not None:
prefix.append(empty_space)
else:
prefix.append(vertical_conn)
elif parent_node is node:
# Skip ourself... (we only include ourself so that
# we can use the 'j' variable to determine if the only
# node requested is ourself in the first place); used
# in the first conditional here...
pass
else:
if not connected_to_parent:
prefix.append(conn)
spaces = len(horizontal_conn)
connected_to_parent = True
else:
# If we have already been connected to our parent
# then determine if this current node is the last
# node of its parent (and in that case just put
# on more spaces), otherwise put a vertical connector
# on and less spaces...
if parent_node[-1] is not last_node:
prefix.append(vertical_conn)
spaces = len(horizontal_conn)
else:
spaces = len(conn)
prefix.append(empty_space * spaces)
last_node = parent_node
prefix.append(starting_prefix)
for prefix_piece in reversed(prefix):
buff.write(prefix_piece)
buff.write(stringify_node(node))
if i != expected_lines:
buff.write(linesep)
return buff.getvalue()
def child_count(self, only_direct=True):
"""Returns how many children this node has.
This can be either only the direct children of this node or inclusive
of all children nodes of this node (children of children and so-on).
NOTE(harlowja): it does not account for the current node in this count.
"""
if not only_direct:
return iter_utils.count(self.dfs_iter())
return len(self._children)
def __iter__(self):
"""Iterates over the direct children of this node (right->left)."""
for c in self._children:
yield c
def reverse_iter(self):
"""Iterates over the direct children of this node (left->right)."""
for c in reversed(self._children):
yield c
def index(self, item):
"""Finds the child index of a given item, searches in added order."""
index_at = None
for (i, child) in enumerate(self._children):
if child.item == item:
index_at = i
break
if index_at is None:
raise ValueError("%s is not contained in any child" % (item))
return index_at
def dfs_iter(self, include_self=False, right_to_left=True):
"""Depth first iteration (non-recursive) over the child nodes."""
return _DFSIter(self,
include_self=include_self,
right_to_left=right_to_left)
def bfs_iter(self, include_self=False, right_to_left=False):
"""Breadth first iteration (non-recursive) over the child nodes."""
return _BFSIter(self,
include_self=include_self,
right_to_left=right_to_left)
def to_digraph(self):
"""Converts this node + its children into a ordered directed graph.
The graph returned will have the same structure as the
this node and its children (and tree node metadata will be translated
into graph node metadata).
:returns: a directed graph
:rtype: :py:class:`taskflow.types.graph.OrderedDiGraph`
"""
g = graph.OrderedDiGraph()
for node in self.bfs_iter(include_self=True, right_to_left=True):
g.add_node(node.item, **node.metadata)
if node is not self:
g.add_edge(node.parent.item, node.item)
return g
```
#### File: taskflow/tools/schema_generator.py
```python
import contextlib
import re
import six
import tabulate
from taskflow.persistence.backends import impl_sqlalchemy
NAME_MAPPING = {
'flowdetails': 'Flow details',
'atomdetails': 'Atom details',
'logbooks': 'Logbooks',
}
CONN_CONF = {
# This uses an in-memory database (aka nothing is written)
"connection": "sqlite://",
}
TABLE_QUERY = "SELECT name, sql FROM sqlite_master WHERE type='table'"
SCHEMA_QUERY = "pragma table_info(%s)"
def to_bool_string(val):
if isinstance(val, (int, bool)):
return six.text_type(bool(val))
if not isinstance(val, six.string_types):
val = six.text_type(val)
if val.lower() in ('0', 'false'):
return 'False'
if val.lower() in ('1', 'true'):
return 'True'
raise ValueError("Unknown boolean input '%s'" % (val))
def main():
backend = impl_sqlalchemy.SQLAlchemyBackend(CONN_CONF)
with contextlib.closing(backend) as backend:
# Make the schema exist...
with contextlib.closing(backend.get_connection()) as conn:
conn.upgrade()
# Now make a prettier version of that schema...
tables = backend.engine.execute(TABLE_QUERY)
table_names = [r[0] for r in tables]
for i, table_name in enumerate(table_names):
pretty_name = NAME_MAPPING.get(table_name, table_name)
print("*" + pretty_name + "*")
# http://www.sqlite.org/faq.html#q24
table_name = table_name.replace("\"", "\"\"")
rows = []
for r in backend.engine.execute(SCHEMA_QUERY % table_name):
# Cut out the numbers from things like VARCHAR(12) since
# this is not very useful to show users who just want to
# see the basic schema...
row_type = re.sub(r"\(.*?\)", "", r['type']).strip()
if not row_type:
raise ValueError("Row %s of table '%s' was empty after"
" cleaning" % (r['cid'], table_name))
rows.append([r['name'], row_type, to_bool_string(r['pk'])])
contents = tabulate.tabulate(
rows, headers=['Name', 'Type', 'Primary Key'],
tablefmt="rst")
print("\n%s" % contents.strip())
if i + 1 != len(table_names):
print("")
if __name__ == '__main__':
main()
``` |
{
"source": "jonas-mika/ml-project",
"score": 2
} |
#### File: src/evaluate/assert_nn2.py
```python
import os
import sys
sys.path.insert(0, os.path.abspath(''))
# external libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris, make_moons, make_circles
from sklearn.preprocessing import StandardScaler
# custom imports
from scripts.models import NeuralNetworkClassifier
from scripts.models.neural_net import DenseLayer
from scripts.metrics import accuracy_score, confusion_matrix
from scripts.plotting import plot_2d_decision_regions
# global configs
np.random.seed(1)
SAVE = True
SHOW = True
SAVEPATH = './data/figures'
def main():
# ------ loading and preprocessing data ------
iris_X, iris_y = load_iris(return_X_y=True)
iris_X = iris_X[:, :2]
#moons_X, moons_y = make_moons(random_state=1)
circles_X, circles_y = make_circles(random_state=1)
scaler = StandardScaler()
iris_X = scaler.fit_transform(iris_X)
#moons_X = scaler.fit_transform(moons_X)
circles_X = scaler.fit_transform(circles_X)
data = {'iris': [iris_X, iris_y],
#'moons': [moons_X, moons_y],
'circles': [circles_X, circles_y]}
# ----- plotting --------
all_epochs = [[1, 5, 50], [50, 100, 200]]
print('starting training')
fig, axes = plt.subplots(nrows = len(data), ncols = len(all_epochs[0]), figsize = (5*len(all_epochs[0]), 5*len(data)))
for i, info in enumerate(zip(data.keys(), [3, 2], all_epochs)):
dataset, k, epochs = info
X, y = data[dataset]
for j in range(len(epochs)):
clf = NeuralNetworkClassifier(
layers = [DenseLayer(n_in=2, n_out=20, activation='relu', name='fc1'),
DenseLayer(n_in=20, n_out=k, activation='softmax', name='output')],
loss='cross_entropy',
name=f'Simple NN'
)
clf.fit(X, y, epochs=epochs[j], lr=0.01, num_batches=10, verbose=1)
plot_2d_decision_regions(X, y, clf, ax=axes[i][j], title=f'NN (Epochs: {epochs[j]})')
if j == 0:
axes[i][j].set_ylabel(f'{dataset.title()}')
fig.tight_layout()
if SHOW:
plt.show()
if input('SAVE? (y/n)') == 'y':
fig.savefig(f'{SAVEPATH}/assert_nn_toydata.pdf')
print(f'Saved PDF to {SAVEPATH}/assert_nn_toydata.pdf')
if __name__ == '__main__':
main()
```
#### File: src/models/custom_dt.py
```python
import os
import sys
sys.path.insert(0, os.path.abspath('.')) # resetting python path to access scripts module
# external libraries
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
from sklearn.metrics import classification_report
from sklearn.decomposition import PCA
# custom imports
from scripts.models import DecisionTreeClassifier
from scripts.metrics import accuracy_score, confusion_matrix
from scripts.plotting import plot_2d_decision_regions
from scripts.utils import get_data, generate_summary
# global configs
np.random.seed(1)
DO_PCA = False
SHOW = True
def run_custom_dt():
# ------ loading and preprocessing ------
X_train, X_test, y_train, y_test = get_data(raw=True, scaled=True)
if DO_PCA:
pca = PCA()
X_train = pca.fit_transform(X_train)
# ------ fitting model ------
# initialise and train model
clf = DecisionTreeClassifier(criterion='entropy', max_depth=5, max_features=8) # most generalising; can achieve 1.0 accuracy for depth >= 8
clf.fit(X_train, y_train)
# ------ evaluate model ------
train_preds = clf.predict(X_train)
test_preds = clf.predict(X_test)
train_acc = accuracy_score(y_train, train_preds)
test_acc = accuracy_score(y_test, test_preds)
conf_matrix = confusion_matrix(y_test, test_preds, as_frame=True, normalised=False)
report = classification_report(y_test, test_preds)
# ------ show and save results ------
if SHOW:
print(f'Training Accuracy: {train_acc}')
print(f'Test Accuracy: {test_acc}')
print(conf_matrix)
print(report)
if input('SAVE? (y/n)' ) == 'y':
generate_summary(filepath = './data/results', name='custom_dt',
training_accuracy = train_acc,
test_accuracy = test_acc,
confusion_matrix = conf_matrix,
classification_report = report)
if __name__ == '__main__':
run_custom_dt()
```
#### File: src/models/sklearn_dt.py
```python
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
# external libraries
import numpy as np
import pandas as pd
import graphviz
import pydotplus
from matplotlib import pyplot as plt
from sklearn.tree import DecisionTreeClassifier
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.tree import plot_tree, export_graphviz
from sklearn.metrics import classification_report
# custom imports
from scripts.plotting import plot_2d_decision_regions
from scripts.metrics import accuracy_score, confusion_matrix
from scripts.utils import get_data, generate_summary
# global configs
np.random.seed(1)
DO_PCA = False
POLYNOMIAL_FEATURES = False
SHOW = True
def run_sklearn_dt():
# ------ loading and preprocessing ------
# load and split data
X_train, X_test, y_train, y_test = get_data(raw=True, scaled=False)
if POLYNOMIAL_FEATURES:
engineer = PolynomialFeatures(degree = 3)
X_train = engineer.fit_transform(X_train)
X_test = engineer.fit_transform(X_test)
scaler = StandardScaler()
scaler.fit(X_train)
if DO_PCA:
pca = PCA()
pca.fit(X_train)
# ------ constructing model ------
if DO_PCA:
pipe = Pipeline(steps=[('scaler', scaler),
('pca', pca),
('decision_tree', DecisionTreeClassifier(random_state=1))])
# define hyper parameters to grid search
params = {
'pca__n_components': list(range(1, X_train.shape[1]+1)),
'decision_tree__criterion': ['gini', 'entropy'],
'decision_tree__max_depth': list(range(1, 10)),
'decision_tree__splitter': ['best', 'random']
}
else:
pipe = Pipeline(steps=[('scaler', scaler),
('decision_tree', DecisionTreeClassifier(random_state=1))])
# define hyper parameters to grid search
params = {
'decision_tree__criterion': ['gini', 'entropy'],
'decision_tree__max_depth': list(range(1, 10)),
'decision_tree__max_features': list(range(1, 10)),
'decision_tree__splitter': ['best', 'random']
}
# define and train on grid
grid = GridSearchCV(pipe, params, n_jobs=-1, verbose=1)
grid.fit(X_train, y_train)
# ------ report best hyper parameters ------
# report back best combination of hyperparameters
print('-'*5 + ' Best Hyperparameters ' + '-'*5)
best_criterion = grid.best_estimator_.get_params()['decision_tree__criterion']
best_max_depth = grid.best_estimator_.get_params()['decision_tree__max_depth']
best_splitter = grid.best_estimator_.get_params()['decision_tree__splitter']
if DO_PCA:
best_n_components = grid.best_estimator_.get_params()['pca__n_components']
else:
best_max_features = grid.best_estimator_.get_params()['decision_tree__max_features']
if SHOW:
print('Best Criterion: ', best_criterion)
print('Best Max Depth: ', best_max_depth)
print('Best Splitter: ', best_splitter)
if DO_PCA:
print('Best PCA Components:', best_n_components)
else:
print('Best Max Features:', best_max_features)
# final model
clf = grid.best_estimator_
# ------ evaluate performance ------
train_preds = clf.predict(X_train)
test_preds = clf.predict(X_test)
train_acc = round(accuracy_score(y_train, train_preds), 2)*100
val_acc = round(grid.best_score_, 2)*100
test_acc = round(accuracy_score(y_test, test_preds), 2)*100
conf_matrix = confusion_matrix(y_test, test_preds, as_frame=True, normalised=False)
report = classification_report(y_test, test_preds)
# ------ show and save results ------
if SHOW:
print('-'*5 + ' Evaluation of Performance ' + '-'*5)
print(f'Training Accuracy: {train_acc}%')
print(f'Validation Accuracy (during 5-fold CV): {val_acc}%')
print(f'Test Accuracy: {test_acc}%'); print();
print(conf_matrix)
print(report)
if input('SAVE? (y/n)' ) == 'y':
generate_summary(filepath = './data/results', name='sklearn_dt',
best_criterion = best_criterion,
best_max_depth = best_max_depth,
best_splitter = best_splitter,
best_max_features = best_max_features,
training_accuracy = train_acc,
validation_accuracy = val_acc,
test_accuracy = test_acc,
confusion_matrix = conf_matrix,
classification_report = report)
# ------ show and save decision tree visualisation ------
if input('Plot DT? (y/n)') == 'y':
if DO_PCA:
FEATURE_NAMES = [f'PC {i}' for i in range(grid.best_estimator_.get_params()['pca__n_components'])]
else:
FEATURE_NAMES = ['RI', 'Na', 'Mg', 'Al', 'Si', 'K', 'Ca', 'Ba', 'Fe']
CLASS_NAMES = ['Window from Building (float-processed)',
'Window from Building (non-float processed)',
'Window from Vehicle',
'Container',
'Tableware',
'Headlamp']
dot_data = export_graphviz(clf['decision_tree'], out_file=None,
feature_names=FEATURE_NAMES,
class_names=CLASS_NAMES,
filled=True,
rounded=True)
graph = graphviz.Source(dot_data, format="png")
#pydot_graph = pydotplus.graph_from_dot_data(dot_data)
#pydot_graph.write_png('original_tree.png')
#pydot_graph.set_size('"1000,500!"')
# graph.set_size('"10,5!"')
if SHOW:
plt.show()
if input('SAVE? (y/n)' ) == 'y':
SAVEPATH = './data/figures'
FILENAME = 'graphviz_sklearn_dt'
if DO_PCA:
FILENAME += '_pca'
#pydot_graph.write_png(f'{SAVEPATH}/{FILENAME}.png')
graph.render(f'{SAVEPATH}/{FILENAME}')
print('saved')
if __name__ == '__main__':
run_sklearn_dt()
```
#### File: src/preprocess/_preprocess.py
```python
import os
import sys
sys.path.insert(0, '.') # make runable from src/
# external libraries
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
# For path referencing
from config.definitions import ROOT_DIR
# Python's built in libs
from collections import Counter
# Global constants
features_m = {
'RI': 'refractive_index',
'Na': 'sodium',
'Mg': 'magnesium',
'Al': 'aluminium',
'Si': 'silicone',
'K': 'potassium',
'Ca': 'calcium',
'Ba': 'barium',
'Fe': 'iron'
}
features_names = ['refractive_index', 'sodium', 'magnesium', 'aluminium', 'silicone', 'potassium', 'calcium', 'barium', 'iron']
classes_m = {
1: 'window_from_building_(float_processed)',
2: 'window_from_building_(non_float_processed)',
3: 'window_from_vehicle',
5: 'container',
6: 'tableware',
7: 'headlamp'
}
def run_preprocessing():
# Save the info about the process into a specified file
old = sys.stdout
out_path = os.path.join(ROOT_DIR, 'data', 'metadata', 'inspect_clean_transform_info.txt')
sys.stdout = open(out_path, 'w')
# Load data
train = pd.read_csv(os.path.join(ROOT_DIR, 'data', 'raw', 'df_train.csv'), delimiter=',', header=0)
test = pd.read_csv(os.path.join(ROOT_DIR, 'data', 'raw', 'df_test.csv'), delimiter=',', header=0)
# Initial inspection
print('-- Initial inspection ', end='-'*50 + '\n')
print('Training data')
print(train.head())
print(end='\n\n')
print('Test data')
print(test.head())
print(end='\n\n')
print(f'Training data shape: {train.shape} | Test data shape: {test.shape}')
print(f'There is in total {len(np.unique(train["type"]))} classes labeled as: {np.unique(train["type"])}')
print(end='\n\n')
# Split the data
x_train, x_val, y_train, y_val = train_test_split(train.iloc[:, :-1],
train.iloc[:, -1],
test_size=0.33,
random_state=42)
x_test, y_test = test.iloc[:, :-1], test.iloc[:, -1]
# Define transformations method
scaler = StandardScaler(with_mean=True, with_std=True) # Mean zero, unit variance
pca = PCA(random_state=42)
# Transform
print('-- Label distribution ', end='-'*50 + '\n')
print('\nMap from key to actual name:')
print('-'*40)
for k, v in classes_m.items():
print(f'{k} --> {v}')
print('-'*40)
data = [[x_train, y_train, 'train'], [x_val, y_val, 'val'], [x_test, y_test, 'test']]
expl_var = dict()
for t in data:
# Load and transform
X, y, path = t
X_scaled = scaler.fit_transform(X)
X_pca = pca.fit_transform(X_scaled)
expl_var[path] = pca.explained_variance_ratio_
# Save
X.to_csv(os.path.join(ROOT_DIR, 'data', 'transformed', path, 'X_org.csv'), index=False)
pd.DataFrame(X_scaled).to_csv(os.path.join(ROOT_DIR, 'data', 'transformed', path, 'X_scaled.csv'), index=False, header=False)
pd.DataFrame(X_pca).to_csv(os.path.join(ROOT_DIR, 'data', 'transformed', path, 'X_pca.csv'), index=False, header=False)
y.to_csv(os.path.join(ROOT_DIR, 'data', 'transformed', path, 'y.csv'), index=False, header=False)
# Show info about the process
print('\n' + f"{path.title()} dataset sorted according to perc\n" + '-'*40)
c = Counter(y)
k_cn = [(k, cn,) for k, cn in c.items()]
k_cn_sort = sorted(k_cn, key=lambda x: x[1], reverse=True)
for t2 in k_cn_sort:
k, cn = t2
print(f'Key: {k} | Percent: {round(cn/y.shape[0]*100, 2)} %')
print('-'*40 + '\n')
# Info about transformation
print('-- Transformation info ', end='-'*50 + '\n')
print('Finished succesfully data transformation using standard scaling and pca.')
print(f"Percentage of explained variance by first two components: {round(expl_var['train'][:2].sum()*100, 2)} %")
print(end='\n\n')
# End tracking the process
sys.stdout.close()
sys.stdout = old
```
#### File: scripts/base/_base_classifier.py
```python
from ._base import BaseModel
from ..utils import ModelNotFittedError
from ..metrics import accuracy_score
class BaseClassifier(BaseModel):
"""Implement methods and attributes common for all classifiers.
"""
def __init__(self):
super().__init__()
self.k = None
self.label = {}
self.intcode = {}
def classes(self):
"""Return unique classes from the given training dataset.
Returns
-------
Iterable
Python iterable with all the unique classses.
Raises
------
:class:`ModelNotFittedError`
If the model has not been fitter yet.
"""
if not self.is_fitted():
raise ModelNotFittedError(f'{self._model_name} is not fitted yet.')
return self.label.values()
def number_of_classes(self):
"""
Return number of unique classes based on the provided training dataset.
Returns
-------
int
Number of unique classses.
Raises
------
:class:`ModelNotFittedError`
If the model has not been fitter yet.
"""
if not self.is_fitted():
raise ModelNotFittedError(f'{self._model_name} is not fitted yet.')
return self.k
def score(self):
"""Return training accuracy score.
Returns
-------
float
Training accuracy score.
Raises
------
:class:`ModelNotFittedError`
If the model has not been fitter yet.
"""
if not self.is_fitted():
raise ModelNotFittedError(f'{self._model_name} is not fitted yet.')
training_preds = self.predict(self.X)
return accuracy_score(self.y, training_preds)
```
#### File: scripts/metrics/_loss.py
```python
import numpy as np
def se(y, p):
"""Squared error.
Sqaured error can be defined as follows:
.. math::
\sum_i^n (y_i - p_i)^2
where :math:`n` is the number of provided records.
Parameters
----------
y : :class:`ndarray`
One dimensional array with ground truth values.
p : :class:`ndarray`
One dimensional array with predicted values.
Returns
-------
float
Squared error as desribed above.
Notes
-----
Usually used for regression problems.
"""
return np.sum((y - p)**2)
def ae(y, p):
"""Absolute error.
Absolute error can be defined as follows:
.. math::
\sum_i^n abs(y_i - p_i)
where :math:`n` is the number of provided records.
Parameters
----------
y : :class:`ndarray`
One dimensional array with ground truth values.
p : :class:`ndarray`
One dimensional array with predicted values.
Returns
-------
float
Absolute error as desribed above.
"""
return np.abs(y-p).sum()
def zero_one_loss(y, p):
"""Number of incorrectly classified records.
Parameters
----------
y : :class:`ndarray`
One dimensional array with ground truth values.
p : :class:`ndarray`
One dimensional array with predicted values.
Returns
-------
int
Number of misclassified records.
"""
return np.sum(y != p)
def cross_entropy(y, p):
"""
Cross-entropy is a measure of the difference between two probability distributions for a given random variable or set of events.
(`source <https://machinelearningmastery.com/cross-entropy-for-machine-learning/>`_)
Parameters
----------
y : :class:`ndarray`
One dimensional array with ground truth values.
p : :class:`ndarray`
One dimensional array with predicted values.
Returns
-------
float
Cross entropy score.
"""
return -np.sum(np.log(p) * y)
```
#### File: models/neural_net/_dense_layer.py
```python
import numpy as np
from ._autograd import Var
from ._helper import convert_to_var, softmax
class DenseLayer:
"""DenseLayer data structure that works as a building block for the Neural Network class.
Its main functionality is to take an input X and produce an output Y as a linear combination of the weights and the input (passed through a non linear activation function)
Parameters
----------
n_in : int
The number of inputs that the layer expects (e.g. number of
features for the first layer)
n_out : int
The number of output neurons
activation : str
The non-linear activation function that is used when forwarding
an input through the layer.
Possible values are 'relu', 'tanh' or 'softmax'
name : str, optional
Name of the layer
"""
def __init__(self, n_in, n_out, activation, name='DenseLayer'):
# name of dense layer
self.name = name
# randomly initialise weight matrix for dense layer
self.weights = np.random.rand(n_in, n_out)
self.weights = convert_to_var(self.weights)
self.bias = np.random.rand(n_out)
self.bias = convert_to_var(self.bias)
# vectorised activation functions
if activation == 'relu':
self.activation = np.vectorize(lambda x: x.relu())
elif activation == 'tanh':
self.activation = np.vectorize(lambda x: x.tanh())
elif activation == 'softmax':
self.activation = softmax # not working yet
else:
raise NotImplementedError("Cannot find Activation Function. Choose from ['relu', 'tanh']")
def neurons(self):
"""Return the number of neurons
Returns
-------
int
number of neurons
"""
return len(self.bias)
def dim(self):
"""Returns the dimensions of the weights matrix and the bias vector
Returns
-------
weights : tuple
weights dimensions
biases : tuple
bias dimensions
"""
return self.weights.shape, self.bias.shape
def parameters(self):
"""Returns all the vars of the layer (weights + biases) as a
single flat list
Returns
-------
1d array
n x 1 where n is a sum of the number of weights and the number of
biases
"""
return np.hstack((self.weights.flatten(), self.bias))
def num_params(self):
"""Returns the number of parameters
Returns
-------
int
number of parameters in the layer
"""
return len(self.parameters())
def forward(self, X):
"""Computes a forward pass through the layer
Parameters
----------
X : 2d array
n x n_in where n is the number of provided samples
Returns
-------
2d array
n x n_out array of Vars where n is the number of provided samples and n_out is the number of neurons in the layer.
"""
assert X.shape[1] == self.weights.shape[0], f'Mismatch in second X dimension;'\
f'tried {X.shape}x{self.weights.shape}'
#print(f'Passing through {self.name}: {X}x{self.weights} + {self.bias}')
return self.activation(X @ self.weights + self.bias)
def __repr__(self):
return f'{self.name}\t\t{self.weights.shape}\t\t{self.bias.shape}\t\t{self.num_params()}'
``` |
{
"source": "JonasMok/bats_wind_farms",
"score": 3
} |
#### File: JonasMok/bats_wind_farms/model_v17_merge_graph_v2.py
```python
import pandas as pd
import numpy as np
import os
from collections import Iterable
import time
start = time.time()
def flatten(lis):
'''convert nested list into one dimensional list'''
for item in lis:
if isinstance(item, Iterable) and not isinstance(item, str):
for x in flatten(item):
yield x
else:
yield item
def spectral_centroid(x, samplerate):
''' source: https://stackoverflow.com/questions/24354279/python-spectral-centroid-for-a-wav-file'''
magnitudes = np.abs(np.fft.rfft(x)) # magnitudes of positive frequencies
length = len(x)
freqs = np.abs(np.fft.fftfreq(length, 1.0/samplerate)[:length//2+1]) # positive frequencies
return np.sum(magnitudes*freqs) / np.sum(magnitudes) # return weighted mean
dt = pd.read_csv('PIPI_control.csv')
dt2 = pd.read_csv('PIPI_ground_1.csv')
dt3 = pd.read_csv('PIPI_ground_4.csv')
dt4 = pd.read_csv('PIPI_ground_5.csv')
dt5 = pd.read_csv('PIPI_ground_6.csv')
dt6 = pd.read_csv('PIPI_ground_7.csv')
dt7 = pd.read_csv('PIPI_nacelle_1.csv')
dt_v1 = pd.concat([dt, dt2, dt3, dt4, dt5, dt6, dt7], ignore_index=True)
files = dt_v1['filename'].values.tolist()
#replace the end of file name from 'wav' to ''
files_2=[]
for i in files:
new = i.replace('.wav','')
files_2.append(new)
dt_v1['filename_2'] = files_2
dt_v1 = dt_v1.set_index('filename_2')
#cleaning the columns to prepare to merge
dt_v1 = dt_v1.drop(dt_v1.columns[[0,1,2]], axis=1)
#print(dt_v1.head())
#print(dt_v1.tail())
#print(dt_v1.shape)
#print(len(files_2))
#import file
pipi = pd.read_csv('PIPI_PIPY_folder.csv')
#list of name files
pipi = pipi.drop(pipi.columns[[0]], axis=1)
pipi = pipi.rename({'OUT FILE': 'filename_2'}, axis=1) #rename the name of the column
pipi = pipi.set_index('filename_2')
#print(pipi.head())
pipi_2 = pd.merge(dt_v1, pipi, left_index =True, right_index=True)
#print(pipi_2.head())
#print(pipi_2)
#print(pipi_2.shape)
dt4 = pipi_2[['spec_centroid','PULSES','PIPI','PIPY','ratio']]
from sklearn.cluster import KMeans
# Create a KMeans instance with 3 clusters: model
model = KMeans(n_clusters=3, random_state=0)
# Fit model to points
model.fit(dt4)
# Determine the cluster labels of new_points: labels
labels = model.predict(dt4)
print(type(labels))
folder = list(flatten(pipi_2[['FOLDER']].to_numpy()))
df_test = pd.DataFrame({'labels': labels, 'turbine': folder})
ct = pd.crosstab(df_test['labels'], df_test['turbine'])
print(ct)
#ct.to_csv('PIPI_total_table_v2.csv')
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# Assign the columns of new_points: xs and ys
y = dt4['ratio']
x = dt4['PULSES']
z = dt4['spec_centroid']
# Creating figure
fig = plt.figure(figsize = (16, 9))
ax = plt.axes(projection ="3d")
# Add x, y gridlines
ax.grid(b = True, color ='grey',
linestyle ='-.', linewidth = 0.3,
alpha = 0.2)
# Creating color map
#my_cmap = plt.get_cmap('hsv')
# Creating plot
sctt = ax.scatter3D(x, y, z, alpha = 0.8, c = labels, marker ='^')
plt.title("K-means scatter plot - 3 Clusters by ratio, matching and Spec. centroid")
ax.set_xlabel('X-axis - Pulses', fontweight ='bold')
ax.set_ylabel('Y-axis - Ratio (matching / Pulses)', fontweight ='bold')
ax.set_zlabel('Z-axis - Spectral Centroid', fontweight ='bold')
#fig.colorbar(sctt, ax = ax, shrink = 0.5, aspect = 5)
ax.legend(*sctt.legend_elements(),loc="lower right", title="Clusters")
# show plot
plt.show()
print("it took", time.time() - start, "seconds.")
``` |
{
"source": "jonasmotiejauskas/NLP-task-effort",
"score": 2
} |
#### File: jonasmotiejauskas/NLP-task-effort/bert.py
```python
import tensorflow as tf
import torch
device = torch.device("cuda")
sentences = data.summary.values
labels = data.hrsactual.values
from transformers import BertTokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
max_len = 0
for sent in sentences:
input_ids = tokenizer.encode(sent, add_special_tokens=True)
max_len = max(max_len, len(input_ids))
input_ids = []
attention_masks = []
for sent in sentences:
encoded_dict = tokenizer.encode_plus(
sent,
add_special_tokens = True,
max_length = 64,
pad_to_max_length = True,
return_attention_mask = True,
return_tensors = 'pt',
)
input_ids.append(encoded_dict['input_ids'])
attention_masks.append(encoded_dict['attention_mask'])
input_ids = torch.cat(input_ids, dim=0)
attention_masks = torch.cat(attention_masks, dim=0)
labels = torch.tensor(labels)
from torch.utils.data import TensorDataset, random_split
dataset = TensorDataset(input_ids, attention_masks, labels)
train_size = int(0.9 * len(dataset))
val_size = len(dataset) - train_size
train_dataset, val_dataset = random_split(dataset, [train_size, val_size])
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
batch_size = 32
train_dataloader = DataLoader(
train_dataset,
sampler = RandomSampler(train_dataset),
batch_size = batch_size
)
validation_dataloader = DataLoader(
val_dataset,
sampler = SequentialSampler(val_dataset),
batch_size = batch_size
)
from transformers import BertForSequenceClassification, AdamW, BertConfig
model = BertForSequenceClassification.from_pretrained(
"bert-base-uncased",
num_labels = 3, # kai kuriais atvejais 4
output_attentions = False,
output_hidden_states = False,
)
model.cuda()
params = list(model.named_parameters())
for p in params[0:5]:
print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size()))))
for p in params[5:21]:
print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size()))))
for p in params[-4:]:
print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size()))))
optimizer = AdamW(model.parameters(),
lr = 2e-5,
eps = 1e-8
)
from transformers import get_linear_schedule_with_warmup
epochs = 4
total_steps = len(train_dataloader) * epochs
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps = 0,
num_training_steps = total_steps)
import numpy as np
def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
import time
import datetime
def format_time(elapsed):
elapsed_rounded = int(round((elapsed)))
return str(datetime.timedelta(seconds=elapsed_rounded))
import random
import numpy as np
seed_val = 42
random.seed(seed_val)
np.random.seed(seed_val)
torch.manual_seed(seed_val)
torch.cuda.manual_seed_all(seed_val)
training_stats = []
total_t0 = time.time()
for epoch_i in range(0, epochs):
t0 = time.time()
total_train_loss = 0
model.train()
for step, batch in enumerate(train_dataloader):
if step % 40 == 0 and not step == 0:
elapsed = format_time(time.time() - t0)
b_input_ids = batch[0].to(device)
b_input_mask = batch[1].to(device)
b_labels = batch[2].to(device)
model.zero_grad()
loss, logits = model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask,
labels=b_labels)
total_train_loss += loss.item()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
scheduler.step()
avg_train_loss = total_train_loss / len(train_dataloader)
training_time = format_time(time.time() - t0)
print("")
print(" Average training loss: {0:.2f}".format(avg_train_loss))
print(" Training epcoh took: {:}".format(training_time))
t0 = time.time()
model.eval()
total_eval_accuracy = 0
total_eval_loss = 0
nb_eval_steps = 0
for batch in validation_dataloader:
b_input_ids = batch[0].to(device)
b_input_mask = batch[1].to(device)
b_labels = batch[2].to(device)
with torch.no_grad():
(loss, logits) = model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask,
labels=b_labels)
total_eval_loss += loss.item()
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
total_eval_accuracy += flat_accuracy(logits, label_ids)
avg_val_accuracy = total_eval_accuracy / len(validation_dataloader)
print(" Accuracy: {0:.2f}".format(avg_val_accuracy))
avg_val_loss = total_eval_loss / len(validation_dataloader)
validation_time = format_time(time.time() - t0)
print(" Validation Loss: {0:.2f}".format(avg_val_loss))
print(" Validation took: {:}".format(validation_time))
training_stats.append(
{
'epoch': epoch_i + 1,
'Training Loss': avg_train_loss,
'Valid. Loss': avg_val_loss,
'Valid. Accur.': avg_val_accuracy,
'Training Time': training_time,
'Validation Time': validation_time
}
)
```
#### File: jonasmotiejauskas/NLP-task-effort/xlnet.py
```python
!pip install transformers
import tensorflow as tf
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
from transformers import XLNetModel, XLNetTokenizer, XLNetForSequenceClassification
from transformers import AdamW
from tqdm import tqdm, trange
import pandas as pd
import io
import numpy as np
import matplotlib.pyplot as plt
device = torch.device("cuda")
tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased', do_lower_case=True)
tokenized_texts = [tokenizer.tokenize(sent) for sent in sentences]
attention_masks = []
for seq in input_ids:
seq_mask = [float(i>0) for i in seq]
attention_masks.append(seq_mask)
train_inputs, validation_inputs, train_labels, validation_labels = train_test_split(input_ids, labels, random_state=2018, test_size=0.1)
train_masks, validation_masks, _, _ = train_test_split(attention_masks, input_ids, random_state=2018, test_size=0.1)
train_inputs = torch.tensor(train_inputs)
validation_inputs = torch.tensor(validation_inputs)
train_labels = torch.tensor(train_labels)
validation_labels = torch.tensor(validation_labels)
train_masks = torch.tensor(train_masks)
validation_masks = torch.tensor(validation_masks)
batch_size = 32
train_data = TensorDataset(train_inputs, train_masks, train_labels)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)
validation_data = TensorDataset(validation_inputs, validation_masks, validation_labels)
validation_sampler = SequentialSampler(validation_data)
validation_dataloader = DataLoader(validation_data, sampler=validation_sampler, batch_size=batch_size)
model = XLNetForSequenceClassification.from_pretrained("xlnet-base-cased", num_labels=3) # gali buti ir 4 klases
model.cuda()
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=2e-5)
def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
train_loss_set = []
epochs = 4
for _ in trange(epochs, desc="Epoch"):
model.train()
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(train_dataloader):
batch = tuple(t.to(device) for t in batch)
b_input_ids, b_input_mask, b_labels = batch
optimizer.zero_grad()
outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels)
loss = outputs[0]
logits = outputs[1]
train_loss_set.append(loss.item())
loss.backward()
optimizer.step()
tr_loss += loss.item()
nb_tr_examples += b_input_ids.size(0)
nb_tr_steps += 1
print("Train loss: {}".format(tr_loss/nb_tr_steps))
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
for batch in validation_dataloader:
batch = tuple(t.to(device) for t in batch)
b_input_ids, b_input_mask, b_labels = batch
with torch.no_grad():
output = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask)
logits = output[0]
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
tmp_eval_accuracy = flat_accuracy(logits, label_ids)
eval_accuracy += tmp_eval_accuracy
nb_eval_steps += 1
print("Validation Accuracy: {}".format(eval_accuracy/nb_eval_steps))
``` |
{
"source": "Jonasmpi/py-solc",
"score": 2
} |
#### File: core/compilation/test_compile_from_source_code.py
```python
import pytest
from solc import compile_source
pytestmark = pytest.mark.usefixtures('supported_solc_version')
def test_source_code_compilation(FOO_SOURCE, is_new_key_format):
output = compile_source(FOO_SOURCE, optimize=True)
assert output
if is_new_key_format:
contact_key = '<stdin>:Foo'
else:
contact_key = 'Foo'
assert contact_key in output
foo_contract_data = output[contact_key]
assert 'bin' in foo_contract_data
assert 'bin-runtime' in foo_contract_data
```
#### File: core/utility/test_is_executable_available.py
```python
from solc.utils.filesystem import (
is_executable_available,
)
def test_ls_is_available():
assert is_executable_available('ls') is True
def test_for_unavailable_executable():
assert is_executable_available('there_should_not_be_an_executable_by_this_name') is False
```
#### File: core/utility/test_solc_version.py
```python
from solc import get_solc_version
import semantic_version
def test_get_solc_version():
version = get_solc_version()
assert isinstance(version, semantic_version.Version)
``` |
{
"source": "jonasmr/voltron",
"score": 2
} |
#### File: voltron/examples/command.py
```python
import blessed
import voltron
from voltron.plugin import CommandPlugin
from voltron.command import VoltronCommand
class ExampleCommand(VoltronCommand):
def invoke(self, *args):
regs = voltron.debugger.registers()
reg_list = ['rax','rbx','rcx','rdx','rbp','rsp','rdi','rsi','rip',
'r8','r9','r10','r11','r12','r13','r14','r15']
for name in reg_list:
print("{t.bold}{:3} {t.normal}{:0=16X}".format(name, regs[name], t=blessed.Terminal()))
class ExampleCommandPlugin(CommandPlugin):
name = 'example'
command_class = ExampleCommand
```
#### File: voltron/tests/api_message_tests.py
```python
import time
from nose.tools import *
import voltron
from voltron.core import *
from voltron.api import *
from voltron.plugin import *
log = logging.getLogger('tests')
class APITestRequest(APIRequest):
_fields = {'target_id': False, 'address': False, 'count': True}
_types = {'target_id': int, 'address': int, 'count': int}
target_id = 0
address = None
count = None
class APITestResponse(APISuccessResponse):
_fields = {'disassembly': True}
class APITestPlugin(APIPlugin):
request = 'test'
request_class = APITestRequest
response_class = APITestResponse
def setup():
voltron.setup_env()
def teardown():
time.sleep(2)
def test_parent_message_validation_fail():
msg = APIMessage()
exception = False
try:
msg.validate()
except MissingFieldError:
exception = True
assert exception
def test_parent_request_validation_fail():
msg = APIRequest()
exception = False
try:
msg.validate()
except MissingFieldError:
exception = True
assert exception
def test_parent_request_type():
msg = APIRequest()
assert msg.type == 'request'
def test_parent_request_request():
msg = APIRequest()
assert msg.request is None
def test_parent_response_validation_fail():
msg = APIResponse()
exception = False
try:
msg.validate()
except MissingFieldError:
exception = True
assert exception
def test_parent_response_type():
msg = APIResponse()
assert msg.type == 'response'
def test_parent_response_status():
msg = APIResponse()
assert msg.status is None
def test_success_response_validation_succeed():
msg = APISuccessResponse()
exception = False
try:
msg.validate()
except MissingFieldError:
exception = True
assert not exception
def test_success_response_type():
msg = APISuccessResponse()
assert msg.type == 'response'
def test_success_response_status():
msg = APISuccessResponse()
assert msg.status == 'success'
def test_error_response_validation_fail():
msg = APIErrorResponse()
exception = False
try:
msg.validate()
except MissingFieldError:
exception = True
assert exception
def test_error_response_type():
msg = APIErrorResponse()
assert msg.type == 'response'
def test_error_response_status():
msg = APIErrorResponse()
assert msg.status == 'error'
def test_invalid_request_error_response_validation_succeed():
msg = APIInvalidRequestErrorResponse()
exception = False
try:
msg.validate()
except MissingFieldError:
exception = True
assert not exception
def test_invalid_request_error_response_type():
msg = APIInvalidRequestErrorResponse()
assert msg.type == 'response'
def test_invalid_request_error_response_status():
msg = APIInvalidRequestErrorResponse()
assert msg.status == 'error'
def test_test_request_validation_fail():
msg = APITestRequest()
exception = False
try:
msg.validate()
except MissingFieldError:
exception = True
assert exception
def test_test_request_validation_fail_with_param():
msg = APITestRequest(target_id=0)
exception = False
try:
msg.validate()
except MissingFieldError:
exception = True
assert exception
def test_test_request_validation_succeed_with_param():
msg = api_request('test', count=16)
exception = False
try:
msg.validate()
except MissingFieldError:
exception = True
assert not exception
assert msg.count == 16
def test_test_request_validation_succeed_with_data():
msg = APITestRequest('{"data":{"count":16}}')
exception = False
try:
msg.validate()
except MissingFieldError as e:
exception = True
assert not exception
assert msg.count == 16
def test_test_request_validation_succeed_by_assign():
msg = APITestRequest()
msg.count = 16
exception = False
try:
msg.validate()
except MissingFieldError as e:
exception = True
assert not exception
assert msg.count == 16
def test_test_request_string():
msg = APITestRequest(count=16)
assert json.loads(str(msg)) == {"request": "test", "type": "request", "block": False, "timeout": 10,
"data": {"count": 16, "target_id": 0, "address": None}}
def test_test_response_validation_fail():
msg = APITestResponse()
exception = False
try:
msg.validate()
except MissingFieldError as e:
exception = True
assert exception
def test_test_response_validation_fail_with_param():
msg = APITestResponse(thing=1)
exception = False
try:
msg.validate()
except MissingFieldError as e:
exception = True
assert exception
def test_test_response_validation_succeed_with_param():
msg = APITestResponse(disassembly="xxx")
exception = False
try:
msg.validate()
except MissingFieldError as e:
exception = True
assert not exception
def test_test_response_validation_succeed_with_data():
msg = APITestResponse('{"data":{"disassembly":"xxx"}}')
exception = False
try:
msg.validate()
except MissingFieldError as e:
exception = True
assert not exception
def test_test_response_validation_succeed_by_assign():
msg = APITestResponse()
msg.disassembly = "xxx"
exception = False
try:
msg.validate()
except MissingFieldError as e:
print(str(e))
exception = True
assert not exception
def test_test_response_string():
msg = APITestResponse(disassembly='xxx')
assert json.loads(str(msg)) == {"status": "success", "type": "response", "data": {"disassembly": "xxx"}}
class APIEncodeMsg(APIMessage):
_fields = {'enc': False}
_encode_fields = ['enc']
def test_encode_fields():
msg = APIEncodeMsg()
msg.enc = six.b('').join([six.int2byte(x) for x in range(0x0, 0xff)])
assert msg.to_dict()['data']['enc'] == six.text_type('<KEY>+')
assert msg.to_json() == '{"data": {"enc": "<KEY>"}, "type": null}'
msg2 = APIEncodeMsg(data=msg.to_json())
assert msg.to_dict() == msg2.to_dict()
assert msg.to_json() == msg2.to_json()
assert msg2.enc == msg.enc
msg3 = APIEncodeMsg()
msg3.enc = six.u('xxxx')
assert msg3.to_dict() == {'data': {'enc': 'eHh4eA=='}, 'type': None}
msg3.enc = six.b('xxxx')
assert msg3.to_dict() == {'data': {'enc': 'eHh4eA=='}, 'type': None}
msg4 = APIEncodeMsg()
msg4.from_dict(msg.to_dict())
assert msg4.to_dict() == msg.to_dict()
```
#### File: voltron/tests/frontend_tests.py
```python
import tempfile
import sys
import json
import time
import logging
import subprocess
import time
from mock import Mock
from nose.tools import *
import voltron
from voltron.core import *
from voltron.api import *
from voltron.plugin import *
import platform
if platform.system() == 'Darwin':
sys.path.append("/Applications/Xcode.app/Contents/SharedFrameworks/LLDB.framework/Resources/Python")
try:
import lldb
from .common import *
log = logging.getLogger("tests")
def setup():
global server, client, target, pm, adaptor, methods
log.info("setting up API tests")
# set up voltron
voltron.setup_env()
pm = PluginManager()
plugin = pm.debugger_plugin_for_host('lldb')
adaptor = plugin.adaptor_class()
voltron.debugger = adaptor
# start up a voltron server
server = Server()
# import pdb;pdb.set_trace()
server.start()
time.sleep(0.1)
# set up client
client = Client()
# compile and load the test inferior
subprocess.call("cc -o tests/inferior tests/inferior.c", shell=True)
target = adaptor.host.CreateTargetWithFileAndArch("tests/inferior", lldb.LLDB_ARCH_DEFAULT)
main_bp = target.BreakpointCreateByName("main", target.GetExecutable().GetFilename())
def teardown():
server.stop()
time.sleep(5)
def test_state_no_target():
req = api_request('state')
res = client.send_request(req)
assert res.is_error
assert res.code == 4101
def test_state_stopped():
process = target.LaunchSimple(None, None, os.getcwd())
req = api_request('state')
res = client.send_request(req)
assert res.status == 'success'
assert res.state == "stopped"
target.process.Destroy()
def test_targets():
req = api_request('targets')
res = client.send_request(req)
assert res.status == 'success'
t = res.targets[0]
assert t["id"] == 0
assert t["arch"] == "x86_64"
assert t["file"].endswith("inferior")
def test_registers():
process = target.LaunchSimple(None, None, os.getcwd())
req = api_request('registers')
res = client.send_request(req)
assert res.status == 'success'
assert len(res.registers) > 0
assert res.registers['rip'] != 0
target.process.Destroy()
def test_memory():
process = target.LaunchSimple(None, None, os.getcwd())
res = client.perform_request('registers')
rsp = res.registers['rsp']
res = client.perform_request('memory', address=rsp, length=0x40)
assert res.status == 'success'
assert len(res.memory) > 0
res = client.perform_request('memory', address=rsp, length=0x40, deref=True)
assert res.status == 'success'
assert len(res.deref) > 0
target.process.Destroy()
def test_stack():
process = target.LaunchSimple(None, None, os.getcwd())
req = api_request('stack', length=0x40)
res = client.send_request(req)
assert res.status == 'success'
assert len(res.memory) > 0
target.process.Destroy()
def test_command():
process = target.LaunchSimple(None, None, os.getcwd())
req = api_request('command', command="reg read")
res = client.send_request(req)
assert res.status == 'success'
assert len(res.output) > 0
assert 'rax' in res.output
target.process.Destroy()
def test_disassemble():
process = target.LaunchSimple(None, None, os.getcwd())
req = api_request('disassemble', count=16)
res = client.send_request(req)
assert res.status == 'success'
assert len(res.disassembly) > 0
assert 'push' in res.disassembly
req = api_request('disassemble', count=16, use_capstone=True)
res = client.send_request(req)
assert res.status == 'success'
assert len(res.disassembly) > 0
assert 'push' in res.disassembly
target.process.Destroy()
def test_dereference():
process = target.LaunchSimple(None, None, os.getcwd())
res = client.perform_request('registers')
res = client.perform_request('dereference', pointer=res.registers['rsp'])
assert res.status == 'success'
assert res.output[0][0] == 'pointer'
assert res.output[-1][1] == 'start + 0x1'
target.process.Destroy()
def test_breakpoints():
process = target.LaunchSimple(None, None, os.getcwd())
res = client.perform_request('breakpoints')
assert res.status == 'success'
assert len(res.breakpoints) == 1
assert res.breakpoints[0]['one_shot'] == False
assert res.breakpoints[0]['enabled']
assert res.breakpoints[0]['id'] == 1
assert res.breakpoints[0]['hit_count'] > 0
assert res.breakpoints[0]['locations'][0]['name'] == "inferior`main"
target.process.Destroy()
def test_multi_request():
process = target.LaunchSimple(None, None, os.getcwd())
reg_res, dis_res = client.send_requests(api_request('registers'),
api_request('disassemble', count=16))
assert reg_res.status == 'success'
assert len(reg_res.registers) > 0
assert reg_res.registers['rip'] != 0
assert dis_res.status == 'success'
assert len(dis_res.disassembly) > 0
assert 'push' in dis_res.disassembly
target.process.Destroy()
except:
print("No LLDB")
```
#### File: voltron/tests/gdb_cli_tests.py
```python
from __future__ import print_function
import tempfile
import sys
import json
import time
import logging
import pexpect
import os
import six
from mock import Mock
from nose.tools import *
import voltron
from voltron.core import *
from voltron.api import *
from voltron.plugin import PluginManager, DebuggerAdaptorPlugin
from .common import *
log = logging.getLogger('tests')
p = None
client = None
def setup():
global p, client, pm
log.info("setting up GDB CLI tests")
voltron.setup_env()
# compile test inferior
pexpect.run("cc -o tests/inferior tests/inferior.c")
# start debugger
start_debugger()
def teardown():
read_data()
p.terminate(True)
def start_debugger(do_break=True):
global p, client
p = pexpect.spawn('gdb')
p.sendline("python import sys;sys.path.append('/home/travis/virtualenv/python3.5.0/lib/python3.5/site-packages')")
p.sendline("python import sys;sys.path.append('/home/travis/virtualenv/python3.4.3/lib/python3.4/site-packages')")
p.sendline("python import sys;sys.path.append('/home/travis/virtualenv/python3.3.6/lib/python3.3/site-packages')")
p.sendline("python import sys;sys.path.append('/home/travis/virtualenv/python2.7.10/lib/python2.7/site-packages')")
p.sendline("source voltron/entry.py")
p.sendline("file tests/inferior")
p.sendline("set disassembly-flavor intel")
p.sendline("voltron init")
if do_break:
p.sendline("b main")
p.sendline("run loop")
read_data()
time.sleep(5)
client = Client()
def stop_debugger():
# p.sendline("kill")
read_data()
p.terminate(True)
def read_data():
try:
while True:
data = p.read_nonblocking(size=64, timeout=1)
print(data.decode('UTF-8'), end='')
except:
pass
def restart_debugger(do_break=True):
stop_debugger()
start_debugger(do_break)
def test_bad_request():
req = client.create_request('version')
req.request = 'xxx'
res = client.send_request(req)
assert res.is_error
assert res.code == 0x1002
def test_version():
req = client.create_request('version')
res = client.send_request(req)
assert res.api_version == 1.1
assert 'gdb' in res.host_version
def test_registers():
global registers
read_data()
res = client.perform_request('registers')
registers = res.registers
assert res.status == 'success'
assert len(registers) > 0
assert registers['rip'] != 0
def test_memory():
res = client.perform_request('memory', address=registers['rip'], length=0x40)
assert res.status == 'success'
assert len(res.memory) > 0
def test_state_stopped():
res = client.perform_request('state')
assert res.is_success
assert res.state == "stopped"
def test_targets():
res = client.perform_request('targets')
assert res.is_success
assert res.targets[0]['state'] == "stopped"
assert res.targets[0]['arch'] == "x86_64"
assert res.targets[0]['id'] == 0
assert res.targets[0]['file'].endswith('tests/inferior')
def test_stack():
res = client.perform_request('stack', length=0x40)
assert res.status == 'success'
assert len(res.memory) > 0
def test_command():
res = client.perform_request('command', command="info reg")
assert res.status == 'success'
assert len(res.output) > 0
assert 'rax' in res.output
def test_disassemble():
res = client.perform_request('disassemble', count=0x20)
assert res.status == 'success'
assert len(res.disassembly) > 0
assert 'DWORD' in res.disassembly
def test_backtrace():
res = client.perform_request('backtrace')
print(res)
assert res.is_success
assert res.frames[0]['name'] == "main"
assert res.frames[0]['index'] == 0
# def test_write_memory():
# value = six.b("AAAAAAAA")
# res = client.perform_request('write_memory', address=registers['rsp'], value=value)
# assert res.is_success
# res = client.perform_request('memory', address=registers['rsp'], length=len(value))
# assert res.memory == value
```
#### File: plugins/api/plugins.py
```python
import voltron
import voltron.api
from voltron.api import *
from scruffy.plugin import Plugin
class APIPluginsRequest(APIRequest):
"""
API plugins request.
{
"type": "request",
"request": "plugins"
}
"""
@server_side
def dispatch(self):
res = APIPluginsResponse()
return res
class APIPluginsResponse(APISuccessResponse):
"""
API plugins response.
{
"type": "response",
"status": "success",
"data": {
"plugins": {
"api": {
"version": ["api_version", "host_version", "capabilities"]
...
},
"debugger": {
...
},
...
}
}
}
"""
_fields = {
'plugins': True
}
def __init__(self, *args, **kwargs):
super(APIPluginsResponse, self).__init__(*args, **kwargs)
self.plugins = {
'api': {n: {'request': p.request_class._fields, 'response': p.response_class._fields}
for (n, p) in voltron.plugin.pm.api_plugins.iteritems()},
'debugger': [n for n in voltron.plugin.pm.debugger_plugins],
'view': [n for n in voltron.plugin.pm.view_plugins],
'command': [n for n in voltron.plugin.pm.command_plugins],
'web': [n for n in voltron.plugin.pm.web_plugins],
}
class APIPluginsPlugin(APIPlugin):
request = 'plugins'
request_class = APIPluginsRequest
response_class = APIPluginsResponse
```
#### File: plugins/api/write_memory.py
```python
import voltron
import logging
import six
import struct
from voltron.api import *
log = logging.getLogger('api')
class APIWriteMemoryRequest(APIRequest):
"""
API write memory request.
{
"type": "request",
"request": "write_memory",
"data": {
"target_id":0,
"address": 0x12341234,
"data": "\xcc"
}
}
`target_id` is optional. If not present, the currently selected target
will be used.
`address` is the address at which to start writing.
`data` is the data to write.
"""
_fields = {
'target_id': False,
'address': True,
'value': True
}
_encode_fields = ['value']
target_id = 0
@server_side
def dispatch(self):
try:
target = voltron.debugger.target(self.target_id)
voltron.debugger.write_memory(address=int(self.address), data=self.value, target_id=int(self.target_id))
res = APISuccessResponse()
except TargetBusyException:
res = APITargetBusyErrorResponse()
except NoSuchTargetException:
res = APINoSuchTargetErrorResponse()
except Exception as e:
msg = "Exception writing memory in debugger: {}".format(repr(e))
log.exception(msg)
res = APIGenericErrorResponse(msg)
return res
class APIWriteMemoryPlugin(APIPlugin):
request = 'write_memory'
request_class = APIWriteMemoryRequest
response_class = APISuccessResponse
```
#### File: plugins/debugger/dbg_windbg.py
```python
from __future__ import print_function
import logging
import threading
import array
from voltron.api import *
from voltron.plugin import *
from voltron.dbg import *
try:
in_windbg = False
import pykd
try:
import vtrace
except:
in_windbg = True
except ImportError:
pass
log = logging.getLogger('debugger')
if in_windbg:
class WinDbgAdaptor(DebuggerAdaptor):
sizes = {
'x86': 4,
'x86_64': 8,
}
max_deref = 24
max_string = 128
def __init__(self, *args, **kwargs):
self.listeners = []
self.host_lock = threading.RLock()
self.host = pykd
def version(self):
"""
Get the debugger's version.
Returns a string containing the debugger's version
(e.g. 'Microsoft (R) Windows Debugger Version whatever, pykd 0.3.0.38')
"""
try:
[windbg] = [line for line in pykd.dbgCommand('version').split('\n') if 'Microsoft (R) Windows Debugger Version' in line]
except:
windbg = 'WinDbg <unknown>'
return '{}, {}'.format(windbg, 'pykd {}'.format(pykd.version))
def _target(self, target_id=0):
"""
Return information about the specified target.
Returns data in the following structure:
{
"id": 0, # ID that can be used in other funcs
"file": "/bin/ls", # target's binary file
"arch": "x86_64", # target's architecture
"state: "stopped" # state
}
"""
# get target properties
d = {}
d["id"] = pykd.getCurrentProcessId()
d["num"] = d['id']
# get target state
d["state"] = self._state()
d["file"] = pykd.getProcessExeName()
# get arch
d["arch"] = self.get_arch()
d['byte_order'] = self.get_byte_order()
d['addr_size'] = self.get_addr_size()
return d
@lock_host
def target(self, target_id=0):
"""
Return information about the current inferior.
We only support querying the current inferior with WinDbg.
`target_id` is ignored.
"""
return self._target()
@lock_host
def targets(self, target_ids=None):
"""
Return information about the debugger's current targets.
`target_ids` is ignored. Only the current target is returned. This
method is only implemented to maintain API compatibility with the
LLDBAdaptor.
"""
return [self._target()]
@validate_target
@lock_host
def state(self, target_id=0):
"""
Get the state of a given target.
"""
return self._state()
@validate_busy
@validate_target
@lock_host
def registers(self, target_id=0, thread_id=None, registers=[]):
"""
Get the register values for a given target/thread.
"""
arch = self.get_arch()
# if we got 'sp' or 'pc' in registers, change it to whatever the right name is for the current arch
if arch in self.reg_names:
if 'pc' in registers:
registers.remove('pc')
registers.append(self.reg_names[arch]['pc'])
if 'sp' in registers:
registers.remove('sp')
registers.append(self.reg_names[arch]['sp'])
else:
raise Exception("Unsupported architecture: {}".format(target['arch']))
# get registers
if registers != []:
vals = {}
for reg in registers:
vals[reg] = pykd.reg(reg)
else:
log.debug('Getting registers for arch {}'.format(arch))
if arch == "x86_64":
reg_names = ['rax', 'rbx', 'rcx', 'rdx', 'rbp', 'rsp', 'rdi', 'rsi', 'rip', 'r8', 'r9', 'r10',
'r11', 'r12', 'r13', 'r14', 'r15', 'cs', 'ds', 'es', 'fs', 'gs', 'ss']
elif arch == "x86":
reg_names = ['eax', 'ebx', 'ecx', 'edx', 'ebp', 'esp', 'edi', 'esi', 'eip', 'cs', 'ds', 'es',
'fs', 'gs', 'ss']
else:
raise UnknownArchitectureException()
vals = {}
for reg in reg_names:
try:
vals[reg] = pykd.reg(reg)
except:
log.debug('Failed getting reg: ' + reg)
vals[reg] = 'N/A'
# Get flags
try:
vals['rflags'] = pykd.reg(reg)
except:
log.debug('Failed getting reg: eflags')
vals['rflags'] = 'N/A'
# Get SSE registers
try:
vals.update(self.get_registers_sse(16))
except:
log.exception("Failed to get SSE registers")
# Get FPU registers
try:
vals.update(self.get_registers_fpu())
except:
log.exception("Failed to get FPU registers")
return vals
@validate_busy
@validate_target
@lock_host
def stack_pointer(self, target_id=0, thread_id=None):
"""
Get the value of the stack pointer register.
"""
arch = self.get_arch()
if arch in self.reg_names:
sp_name = self.reg_names[arch]['sp']
sp = pykd.reg(sp_name)
else:
raise UnknownArchitectureException()
return sp_name, sp
@validate_busy
@validate_target
@lock_host
def program_counter(self, target_id=0, thread_id=None):
"""
Get the value of the program counter register.
"""
arch = self.get_arch()
if arch in self.reg_names:
pc_name = self.reg_names[arch]['pc']
pc = pykd.reg(pc_name)
else:
raise UnknownArchitectureException()
return pc_name, pc
@validate_busy
@validate_target
@lock_host
def memory(self, address, length, target_id=0):
"""
Get the register values for .
`address` is the address at which to start reading
`length` is the number of bytes to read
"""
# read memory
log.debug('Reading 0x{:x} bytes of memory at 0x{:x}'.format(length, address))
memory = array.array('B', pykd.loadBytes(address, length)).tostring()
return memory
@validate_busy
@validate_target
@lock_host
def stack(self, length, target_id=0, thread_id=None):
"""
Get the register values for .
`length` is the number of bytes to read
`target_id` is a target ID (or None for the first target)
`thread_id` is a thread ID (or None for the selected thread)
"""
# get the stack pointer
sp_name, sp = self.stack_pointer(target_id=target_id, thread_id=thread_id)
# read memory
memory = self.memory(sp, length, target_id=target_id)
return memory
@validate_busy
@validate_target
@lock_host
def disassemble(self, target_id=0, address=None, count=16):
"""
Get a disassembly of the instructions at the given address.
`address` is the address at which to disassemble. If None, the
current program counter is used.
`count` is the number of instructions to disassemble.
"""
# make sure we have an address
if address is None:
pc_name, address = self.program_counter(target_id=target_id)
# disassemble
output = pykd.dbgCommand('u 0x{:x} l{}'.format(address, count))
return output
@validate_busy
@validate_target
@lock_host
def dereference(self, pointer, target_id=0):
"""
Recursively dereference a pointer for display
"""
fmt = ('<' if self.get_byte_order() == 'little' else '>') + {2: 'H', 4: 'L', 8: 'Q'}[self.get_addr_size()]
addr = pointer
chain = []
# recursively dereference
for i in range(0, self.max_deref):
try:
[ptr] = pykd.loadPtrs(addr, 1)
if ptr in chain:
break
chain.append(('pointer', addr))
addr = ptr
except:
log.exception("Dereferencing pointer 0x{:X}".format(addr))
break
# get some info for the last pointer
# first try to resolve a symbol context for the address
if len(chain):
p, addr = chain[-1]
output = pykd.findSymbol(addr)
sym = True
try:
# if there's no symbol found, pykd returns a hex string of the address
if int(output, 16) == addr:
sym = False
log.debug("no symbol context")
except:
pass
if sym:
chain.append(('symbol', output.strip()))
else:
log.debug("no symbol context")
mem = pykd.loadBytes(addr, 2)
if mem[0] < 127:
if mem[1] == 0:
a = []
for i in range(0, self.max_string, 2):
mem = pykd.loadBytes(addr + i, 2)
if mem == [0, 0]:
break
a.extend(mem)
output = array.array('B', a).tostring().decode('UTF-16').encode('latin1')
chain.append(('unicode', output))
else:
output = pykd.loadCStr(addr)
chain.append(('string', output))
log.debug("chain: {}".format(chain))
return chain
@lock_host
def command(self, command=None):
"""
Execute a command in the debugger.
`command` is the command string to execute.
"""
if command:
res = pykd.dbgCommand(command)
else:
raise Exception("No command specified")
return res
@lock_host
def disassembly_flavor(self):
"""
Return the disassembly flavor setting for the debugger.
Returns 'intel' or 'att'
"""
return 'intel'
@lock_host
def breakpoints(self, target_id=0):
"""
Return a list of breakpoints.
Returns data in the following structure:
[
{
"id": 1,
"enabled": True,
"one_shot": False,
"hit_count": 5,
"locations": [
{
"address": 0x100000cf0,
"name": 'main'
}
]
}
]
"""
breakpoints = []
for i in range(0, pykd.getNumberBreakpoints()):
b = pykd.getBp(i)
addr = b.getOffset()
name = hex(addr)
try:
name = pykd.findSymbol(addr)
except:
log.exception("No symbol found for address {}".format(addr))
pass
breakpoints.append({
'id': i,
'enabled': True,
'one_shot': False,
'hit_count': '-',
'locations': [{
"address": addr,
"name": name
}]
})
return breakpoints
def capabilities(self):
"""
Return a list of the debugger's capabilities.
Thus far only the 'async' capability is supported. This indicates
that the debugger host can be queried from a background thread,
and that views can use non-blocking API requests without queueing
requests to be dispatched next time the debugger stops.
"""
return ['async']
#
# Private functions
#
def _state(self):
"""
Get the state of a given target. Internal use.
"""
s = pykd.getExecutionStatus()
if s == pykd.executionStatus.Break:
state = 'stopped'
elif s == pykd.executionStatus.Go:
state = 'running'
else:
state = 'invalid'
return state
def get_registers_sse(self, num=8):
regs = {}
for i in range(0, 16):
try:
reg = 'xmm{}'.format(i)
regs[reg] = pykd.reg(reg)
except:
break
return regs
def get_registers_fpu(self):
regs = {}
for i in range(0, 8):
try:
reg = 'st{}'.format(i)
regs[reg] = pykd.reg(reg)
except:
break
return regs
def get_next_instruction(self):
return str(pykd.disasm())
def get_arch(self):
t = pykd.getCPUType()
if t == pykd.CPUType.I386:
return 'x86'
else:
return 'x86_64'
return arch
def get_addr_size(self):
arch = self.get_arch()
return self.sizes[arch]
def get_byte_order(self):
return 'little'
class EventHandler(pykd.eventHandler):
"""
Event handler for WinDbg/PyKD events.
"""
def __init__(self, adaptor, *args, **kwargs):
super(EventHandler, self).__init__(*args, **kwargs)
self.adaptor = adaptor
def onExecutionStatusChange(self, status):
if status == pykd.executionStatus.Break:
self.adaptor.update_state()
voltron.server.dispatch_queue()
class WinDbgCommand(DebuggerCommand):
"""
Debugger command class for WinDbg
"""
def __init__(self):
super(WinDbgCommand, self).__init__()
self.register_hooks()
def invoke(self, debugger, command, result, dict):
self.handle_command(command)
def register_hooks(self):
self.handler = EventHandler(self.adaptor)
def unregister_hooks(self):
del self.handler
self.handler = None
class WinDbgAdaptorPlugin(DebuggerAdaptorPlugin):
host = 'windbg'
adaptor_class = WinDbgAdaptor
command_class = WinDbgCommand
```
#### File: voltron/voltron/rdb.py
```python
import pdb
import socket
import sys
# Trying to debug a quirk in some code that gets called async by {ll,g}db?
#
# from .rdb import Rdb
# Rdb().set_trace()
#
# Then: telnet localhost 4444
socks = {}
# Only bind the socket once
def _sock(port):
if port in socks:
return socks[port]
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("127.0.0.1", port))
socks[port] = s
return s
class Rdb(pdb.Pdb):
def __init__(self, port=4444):
self.old_stdout = sys.stdout
self.old_stdin = sys.stdin
self.skt = _sock(port)
self.skt.listen(1)
(clientsocket, address) = self.skt.accept()
handle = clientsocket.makefile('rw')
pdb.Pdb.__init__(self, completekey='tab', stdin=handle, stdout=handle)
sys.stdout = sys.stdin = handle
``` |
{
"source": "jonasmue/adventofcode20",
"score": 3
} |
#### File: adventofcode20/day01/common.py
```python
TARGET = 2020
def get_input():
with open("input.txt") as f:
return [int(line.strip()) for line in f.readlines()]
```
#### File: adventofcode20/day01/part2.py
```python
from common import *
def find_result(numbers):
# 3-sum in O(n^2) time and O(1) space
numbers = sorted(numbers)
for i, first in enumerate(numbers):
l = i + 1
r = len(numbers) - 1
while l < r:
left = numbers[l]
right = numbers[r]
sum = first + left + right
if sum == TARGET:
return first * left * right
elif sum < TARGET:
l += 1
else:
r -= 1
if __name__ == "__main__":
numbers = get_input()
result = find_result(numbers)
print(result)
```
#### File: adventofcode20/day04/part2.py
```python
import re
from common import count_valid
def match_num(num_str, min, max):
try:
num = int(num_str)
return min <= num and num <= max
except ValueError:
return False
def match_byr(byr):
return match_num(byr, 1920, 2002)
def match_iyr(iyr):
return match_num(iyr, 2010, 2020)
def match_eyr(eyr):
return match_num(eyr, 2020, 2030)
def match_hgt(hgt):
unit = hgt[-2:]
value = hgt[:-2]
if unit == "in":
return match_num(value, 59, 76)
elif unit == "cm":
return match_num(value, 150, 193)
else:
return False
def match_hcl(hcl):
return len(hcl) == 7 and bool(re.match("\#([0-9a-f]){6}", hcl))
def match_ecl(ecl):
return ecl in {"amb", "blu", "brn", "gry", "grn", "hzl", "oth"}
def match_pid(pid):
return len(pid) == 9 and bool(re.match("\d{9}", pid))
REQUIRED_FIELDS = {
"byr": match_byr,
"iyr": match_iyr,
"eyr": match_eyr,
"hgt": match_hgt,
"hcl": match_hcl,
"ecl": match_ecl,
"pid": match_pid
}
if __name__ == "__main__":
print(count_valid(REQUIRED_FIELDS))
```
#### File: adventofcode20/day05/part2.py
```python
from common import all_ids, lowest_id, highest_id
def missing_id():
# O(n) time and space
# Length of seat codes is constant → conversion code to id is constant
low = lowest_id()
high = highest_id()
expected_values = set([i for i in range(low, high + 1)])
for id in all_ids():
expected_values.remove(id)
return expected_values.pop()
if __name__ == "__main__":
print(missing_id())
```
#### File: adventofcode20/day06/part2.py
```python
from common import get_input
def intersection_count():
# O(n*a) time and space
# with n: number of passengers, a: longest answer set
return sum([len(set.intersection(*group)) for group in get_input()])
if __name__ == "__main__":
print(intersection_count())
```
#### File: adventofcode20/day07/part1.py
```python
from common import get_input
def potential_containers():
luggage_rules = get_input()
def traverse(key="shiny gold", visited=None):
# O(n) time and space
# with n: number of rules (bag types) → we visit each rule at most once
if visited is None: visited = set()
total = 0
for container in luggage_rules.keys():
if container in visited: continue
if key in luggage_rules[container]:
visited.add(container)
total += 1
total += traverse(container, visited)
return total
return traverse()
if __name__ == "__main__":
print(potential_containers())
```
#### File: adventofcode20/day08/common.py
```python
class Computer():
def __init__(self, instructions):
self.instructions = instructions
self.pointer = 0
self.accumulator = 0
self.history = set()
self.finished = False
def exec_instruction(self):
op, arg = self.instructions[self.pointer]
if op == "nop":
self.pointer += 1
elif op == "acc":
self.pointer += 1
self.accumulator += int(arg)
elif op == "jmp":
self.pointer += int(arg)
def run(self):
while self.pointer not in self.history:
if self.pointer >= len(self.instructions):
self.finished = True
return
self.history.add(self.pointer)
self.exec_instruction()
def get_input():
with open("input.txt") as f:
instruction_lines = f.read().splitlines()
instructions = [l.split() for l in instruction_lines]
return instructions
```
#### File: adventofcode20/day09/part2.py
```python
from common import first_invalid, get_input
def encryption_weakness():
invalid = first_invalid()
# Complexity if we don't consider part 1 (previous line):
# O(n+c) time and O(1) space
# with n: length of input, c: length of contiguous sequence
numbers = get_input()
left, right = contiguous_sequence_pointers(numbers, invalid)
max_item = numbers[left]
min_item = numbers[left]
for i in range(left, right + 1):
max_item = max(max_item, numbers[i])
min_item = min(min_item, numbers[i])
return min_item + max_item
def contiguous_sequence_pointers(numbers, invalid):
left = 0
right = 1
current_sum = numbers[left] + numbers[right]
while True:
if current_sum == invalid:
return left, right
elif current_sum < invalid or left == right - 1:
right += 1
current_sum += numbers[right]
else:
current_sum -= numbers[left]
left += 1
if __name__ == "__main__":
print(encryption_weakness())
```
#### File: adventofcode20/day10/part2.py
```python
from common import get_sorted_adapters
from collections import deque
def count_ways():
# Sorting (next line) takes O(nlogn) time and O(n) space
adapters = get_sorted_adapters()
# Rest of the algorithm:
# O(n) time and O(1) space
dp = deque([1, 1, 1], maxlen=3)
for i in range(len(adapters)):
current_adapter = adapters[i]
current_ways = dp[-1]
if i >= 2 and adapters[i - 2] == current_adapter - 2:
current_ways += dp[1]
if i >= 3 and adapters[i - 3] == current_adapter - 3:
current_ways += dp[0]
dp.append(current_ways)
return dp[-1]
if __name__ == "__main__":
print(count_ways())
```
#### File: adventofcode20/day12/common.py
```python
HEADINGS = ["N", "E", "S", "W"]
def get_input():
with open("input.txt") as f:
return [(line[0], int(line[1:])) for line in f.read().splitlines()]
def move(point, direction, steps):
if direction == "N":
point[1] += steps
elif direction == "E":
point[0] += steps
elif direction == "S":
point[1] -= steps
elif direction == "W":
point[0] -= steps
def manhattan_distance(vector):
return sum([abs(item) for item in vector])
```
#### File: adventofcode20/day12/part1.py
```python
from common import *
def run_instructions(instructions):
# O(n) time and O(1) space
current_position = [0, 0]
current_heading = "E"
for item in instructions:
operation = item[0]
arg = item[1]
if operation in HEADINGS:
move(current_position, operation, arg)
elif operation == "F":
move(current_position, current_heading, arg)
elif operation == "L":
next_heading_idx = HEADINGS.index(current_heading) - (arg // 90)
current_heading = HEADINGS[next_heading_idx]
elif operation == "R":
next_heading_idx = HEADINGS.index(current_heading) + (arg // 90)
current_heading = HEADINGS[next_heading_idx % len(HEADINGS)]
return current_position
if __name__ == "__main__":
print(manhattan_distance(run_instructions(get_input())))
```
#### File: adventofcode20/day13/part1.py
```python
from common import get_input
def get_result():
# O(n) time and space
timestamp, buses = get_input()
buses = [int(b) for b in buses if b != "x"]
best_departure = best_bus = None
for bus in buses:
rest = timestamp % bus
prev_departure = timestamp - rest
next_departure = prev_departure + bus
if best_departure is None or next_departure < best_departure:
best_departure = next_departure
best_bus = bus
return (best_departure - timestamp) * best_bus
if __name__ == "__main__":
print(get_result())
```
#### File: adventofcode20/day14/part2.py
```python
from common import *
def process(computer):
for address in get_addresses(computer.current_mask, computer.current_operation):
computer.memory[address] = computer.current_argument
def get_addresses(mask, address):
result = [address]
for bit_index in range(len(mask)):
offset = len(mask) - bit_index - 1
current_mask_token = mask[bit_index]
if current_mask_token == "X":
result += result
for i in range(len(result)//2):
result[i] = unset_nth_bit(result[i], offset)
for i in range(len(result)//2, len(result)):
result[i] = set_nth_bit(result[i], offset)
elif current_mask_token == "1":
for i in range(len(result)):
result[i] = set_nth_bit(result[i], offset)
return result
if __name__ == "__main__":
computer = Computer()
computer.run_program(process)
print(sum(computer.memory.values()))
```
#### File: adventofcode20/day20/part2.py
```python
from common import *
def align_image_tiles(tiles, top_left_corner, border_counter):
def wanted_tile(current_tile, border):
for i, tile in enumerate(tiles):
if border in tile.get_possible_borders():
if current_tile.id == tile.id: continue
return tiles.pop(i)
def find_top_left(current_tile):
top_border = current_tile.bottom()
tile = wanted_tile(current_tile, top_border)
border_idx = tile.get_possible_borders().index(top_border)
if border_idx == 0:
# Top :)
pass
elif border_idx == 1:
# Bottom
tile.flip_vertical()
elif border_idx == 2:
# Left
tile.rotate_left(3)
tile.flip_horizontal()
elif border_idx == 3:
# Right
tile.rotate_left(1)
elif border_idx == 4:
# Top Flipped
tile.flip_horizontal()
elif border_idx == 5:
# Bottom Flipped
tile.rotate_left(2)
elif border_idx == 6:
# Left Flipped
tile.rotate_left(3)
elif border_idx == 6:
# Right Flipped
tile.rotate_left(1)
tile.flip_horizontal()
assert(top_border) == tile.top()
return tile
def find_next_col(current_tile):
left_border = current_tile.right()
tile = wanted_tile(current_tile, left_border)
border_idx = tile.get_possible_borders().index(left_border)
if border_idx == 0:
# Top
tile.rotate_left(1)
tile.flip_vertical()
elif border_idx == 1:
# Bottom
tile.rotate_left(3)
elif border_idx == 2:
# Left :)
pass
elif border_idx == 3:
# Right
tile.flip_horizontal()
elif border_idx == 4:
# Top Flipped
tile.rotate_left(1)
elif border_idx == 5:
# Bottom Flipped
tile.rotate_left(3)
tile.flip_vertical()
elif border_idx == 6:
# Left Flipped
tile.flip_vertical()
elif border_idx == 7:
# Right Flipped
tile.rotate_left(2)
assert(left_border == tile.left())
return tile
rows = cols = int(sqrt(len(tiles)))
image = [[None] * cols] * rows
while border_counter[top_left_corner.top()] > 1 or border_counter[top_left_corner.left()] > 1:
top_left_corner.rotate_left(1)
image = [[top_left_corner]]
for row in range(rows):
for col in range(cols):
if row == 0 and col == 0:
continue
elif col == 0:
image.append(list())
tile = find_top_left(image[row - 1][col])
image[row].append(tile)
else:
tile = find_next_col(image[row][col - 1])
image[row].append(tile)
if row > 0:
top_tile = image[row - 1][col]
assert(tile.top() == top_tile.bottom())
assert (len(tiles) == 1 and tiles.pop().id == image[0][0].id)
return image
def image_tiles_to_image(image_tiles):
image = []
for tile_row_idx in range(len(image_tiles)):
for tile_col_idx in range(len(image_tiles[tile_row_idx])):
tile = image_tiles[tile_row_idx][tile_col_idx]
tile_content = tile.cropped_content()
for content_row_idx, row_content in enumerate(tile_content):
image_row_idx = tile_row_idx * len(tile_content) + content_row_idx
if image_row_idx >= len(image): image.append(row_content)
else: image[image_row_idx] += row_content
return image
def row_matches_monster(string, offset, indices):
for idx in indices:
if string[offset + idx] != "#": return False
return True
def find_sea_monster(image, sea_monster):
sea_monster_length = max([max(item) for item in sea_monster]) + 1
sea_monsters = 0
for row in range(len(image) + 1 - (len(sea_monster))):
for offset in range(len(image[row]) + 1 - sea_monster_length):
matches_monster = True
for i in range(3):
matches_monster &= row_matches_monster(image[row + i], offset, sea_monster[i])
if matches_monster: sea_monsters += 1
return sea_monsters
def find_all_sea_monsters(image, sea_monster):
for _ in range(4):
found_monsters = find_sea_monster(image, sea_monster)
if found_monsters: return found_monsters
image = flip_array_horizontal(image)
found_monsters = find_sea_monster(image, sea_monster)
if found_monsters: return found_monsters
image = rotate_array_left(image)
return 0
def number_of_hashtags(image):
return sum([row.count("#") for row in image])
def sea_monster_hashtags(sea_monster, n=1):
return n * sum([len(row) for row in sea_monster])
if __name__ == "__main__":
tiles = get_input()
corners, border_counter = corner_tiles(tiles)
top_left_tile = [t for t in tiles if t.id == corners[0]].pop()
image_tiles = align_image_tiles(tiles, top_left_tile, border_counter)
image = image_tiles_to_image(image_tiles)
sea_monster = [[18], [0, 5, 6, 11, 12, 17, 18, 19], [1, 4, 7, 10, 13, 16]]
n_sea_monsters = find_all_sea_monsters(image, sea_monster)
print(number_of_hashtags(image) - sea_monster_hashtags(sea_monster, n_sea_monsters))
``` |
{
"source": "jonasmue/satisfaction.observer",
"score": 3
} |
#### File: satisfaction.observer/analysis/example_extractor.py
```python
import json
class ExampleExtractor:
def __init__(self, source_file, classifications_file, target_file, extract_num=1):
self.source_file = source_file
self.classifications_file = classifications_file
self.target_file = target_file
self.extract_num = extract_num
def run(self):
with open(self.source_file, "r") as input_file:
tweets = json.load(input_file)
with open(self.classifications_file, "r") as input_file:
classifications = json.load(input_file)
result = {}
for category_item, classification_list in classifications.items():
result[category_item] = {}
five_star_classifications = {i: c[1] for i, c in enumerate(classification_list) if c[0] == 5}
one_star_classifications = {i: c[1] for i, c in enumerate(classification_list) if c[0] == 1}
five_star_classifications = {k: v for k, v in
sorted(five_star_classifications.items(), key=lambda item: item[1])}
one_star_classifications = {k: v for k, v in
sorted(one_star_classifications.items(), key=lambda item: item[1])}
max_five_star = list(five_star_classifications.keys())[-self.extract_num:]
max_one_star = list(one_star_classifications.keys())[-self.extract_num:]
result[category_item]["pos"] = [tweets[category_item][t] for t in max_five_star]
result[category_item]["neg"] = [tweets[category_item][t] for t in max_one_star]
with open(self.target_file, "w") as output_file:
json.dump(result, output_file)
``` |
{
"source": "jonasnic/codingame",
"score": 3
} |
#### File: puzzles/python3/bender3.py
```python
import math
import statistics
from typing import Callable, Dict, List, Tuple
def read_input() -> List[Tuple[int, ...]]:
points: List[Tuple[int, ...]] = []
nb_points = int(input())
for _ in range(nb_points):
point: Tuple[int, ...] = tuple(map(int, input().split())) # nb_items, time
points.append(point)
return points
def compute_time_complexity(points: List[Tuple[int, ...]]) -> str:
mapping: Dict[str, Callable] = {
"1": lambda n: 1,
"log n": lambda n: math.log(n, 2),
"n": lambda n: n,
"n log n": lambda n: n * math.log(n, 2),
"n^2": lambda n: n ** 2,
"n^2 log n": lambda n: n ** 2 * math.log(n, 2),
"n^3": lambda n: n ** 2.2, # for validation test
"2^n": lambda n: 2 ** n,
}
best_fit = ""
min_normalized_variance = float("inf")
for name, function in mapping.items():
ratios: List[float] = [time / function(nb_items) for nb_items, time in points]
mean = statistics.mean(ratios)
variance = statistics.variance(ratios, mean)
normalized_variance = variance / mean ** 2
if normalized_variance < min_normalized_variance:
min_normalized_variance = normalized_variance
best_fit = name
return best_fit
if __name__ == "__main__":
points = read_input()
best_fit = compute_time_complexity(points)
print(f"O({best_fit})")
```
#### File: python3/the-labyrinth/the_labyrinth.py
```python
from collections import deque
from typing import Deque, Dict, List, Optional, Set, Tuple
Position = Tuple[int, int]
# Kirk's Possible Moves
UP = "UP"
DOWN = "DOWN"
LEFT = "LEFT"
RIGHT = "RIGHT"
# Possible ASCII Maze Characters
WALL = '#'
EMPTY = '.'
START = 'T'
CONTROL_ROOM = 'C'
UNKNOWN = '?'
class Game:
def __init__(self, height: int, width: int):
self.maze_explored = False
self.control_room_reached = False
self.height = height
self.width = width
self.maze: List[str] = []
self.kirk_position: Position = (0, 0) # (x, y)
def loop(self):
while True:
kirk_y, kirk_x = map(int, input().split())
self.kirk_position = (kirk_x, kirk_y)
self.maze = []
for y in range(height):
row = input()
self.maze.append(row)
for x, c in enumerate(row):
if c == CONTROL_ROOM and (x, y) == self.kirk_position:
self.control_room_reached = True
self.play()
def play(self):
came_from, neighbor = None, None
if not self.maze_explored:
to_avoid: Tuple[str] = (WALL, CONTROL_ROOM)
came_from, neighbor = self.bfs(UNKNOWN, to_avoid)
if not came_from:
self.maze_explored = True
if self.maze_explored:
to_avoid: Tuple[str] = (WALL)
if not self.control_room_reached:
came_from, neighbor = self.bfs(CONTROL_ROOM, to_avoid)
else:
came_from, neighbor = self.bfs(START, to_avoid)
path: List[Position] = self.reconstruct_path(came_from, neighbor)
next_position = path[-2]
self.print_next_move(next_position)
def bfs(self, goal: str, to_avoid: Tuple[str]) -> Tuple[Optional[Dict[Position, Position]], Optional[Position]]:
"""Compute the shortest path between Kirk and the goal with BFS."""
visited: Set[Position] = set()
queue: Deque[Position] = deque()
came_from: Dict[Position, Position] = {} # position => parent position on the shortest path
queue.append(self.kirk_position)
visited.add(self.kirk_position)
while len(queue) != 0:
position: Position = queue.popleft()
for neighbor in self.neighbors(position, to_avoid):
if neighbor not in visited:
visited.add(neighbor)
queue.append(neighbor)
came_from[neighbor] = position
neighbor_x, neighbor_y = neighbor
if self.maze[neighbor_y][neighbor_x] == goal:
return (came_from, neighbor)
return (None, None)
def neighbors(self, position: Position, to_avoid: Tuple[str]) -> List[Position]:
neighbors: List[Position] = []
x, y = position
if x > 0:
self.add_neighbor(to_avoid, neighbors, x - 1, y)
if x < self.width - 1:
self.add_neighbor(to_avoid, neighbors, x + 1, y)
if y > 0:
self.add_neighbor(to_avoid, neighbors, x, y - 1)
if y < self.height - 1:
self.add_neighbor(to_avoid, neighbors, x, y + 1)
return neighbors
def add_neighbor(self, to_avoid: Tuple[str], neighbors: List[Position], x, y):
if self.maze[y][x] not in to_avoid:
neighbors.append((x, y))
def reconstruct_path(self, came_from: Dict[Position, Position], neighbor: Position) -> List[Position]:
current_position: Position = neighbor
stack: List[Position] = []
while current_position in came_from:
stack.append(current_position)
current_position = came_from[current_position]
stack.append(current_position)
return stack
def print_next_move(self, next_position: Position):
kirk_x, kirk_y = self.kirk_position
next_x, next_y = next_position
if kirk_x < next_x:
print(RIGHT)
elif kirk_x > next_x:
print(LEFT)
elif kirk_y < next_y:
print(DOWN)
elif kirk_y > next_y:
print(UP)
if __name__ == "__main__":
height, width, _ = map(int, input().split())
game = Game(height, width)
game.loop()
``` |
{
"source": "Jonas-Nicodemus/PINNs-based-MPC",
"score": 2
} |
#### File: src/utils/system.py
```python
import tensorflow as tf
from scipy.constants import g
@tf.function
def f(t, x, u):
M_tf = M(x[1])
k_tf = k(x[2], x[1], x[3])
q_tf = q(x[0], x[2], x[1], x[3])
B_tf = B()
dx12dt = x[2:]
dx34dt = tf.linalg.solve(M_tf, tf.expand_dims(-k_tf + q_tf + tf.linalg.matvec(B_tf, u), 1))[:, 0]
dxdt = tf.concat((dx12dt, dx34dt), 0)
return dxdt
def M(beta, i_PR90=161.):
"""
Returns mass matrix of the robot for beta.
:param tf.Tensor beta: tensor from beta value
:param float i_PR90: motor constant
:return: tf.Tensor M_tf: mass matrx of the robot
"""
M_1 = tf.stack([0.00005267 * i_PR90 ** 2 + 0.6215099724 * tf.cos(beta) + 0.9560375168565, 0.00005267 * i_PR90 +
0.3107549862 * tf.cos(beta) + 0.6608899068565], axis=0)
M_2 = tf.stack([0.00005267 * i_PR90 + 0.3107549862 * tf.cos(beta) + 0.6608899068565,
0.00005267 * i_PR90 ** 2 + 0.6608899068565], axis=0)
M_tf = tf.stack([M_1, M_2], axis=1)
return M_tf
def k(dalpha_dt, beta, dbeta_dt):
"""
Returns stiffness vector of the robot for a set of generalized coordinates.
:param tf.Tensor dalpha_dt: tensor from values of the first derivation of alpha
:param tf.Tensor beta: tensor from beta values
:param tf.Tensor dbeta_dt: tensor from values of the first derivation of beta
:return: tf.Tensor: stiffness vector of the robot
"""
return tf.stack([0.040968 * dalpha_dt ** 2 * tf.sin(beta) * (0.18 * tf.cos(beta) + 0.5586) - 0.18 * tf.sin(beta) *
(1.714 * (0.07205 * dalpha_dt + 0.07205 * dbeta_dt) * (dalpha_dt + dbeta_dt) + 1.714 *
(0.08262 * dalpha_dt + 0.08262 * dbeta_dt) * (dalpha_dt + dbeta_dt) + 1.714 *
(0.04321 * dalpha_dt + 0.04321 * dbeta_dt) * (dalpha_dt + dbeta_dt) + 1.714 *
(0.09238 * dalpha_dt + 0.09238 * dbeta_dt) * (dalpha_dt + dbeta_dt) + 0.30852 *
dalpha_dt ** 2 * tf.cos(beta) + 1.714 * (0.0574 * dalpha_dt + 0.0574 * dbeta_dt) *
(dalpha_dt + dbeta_dt) + 1.714 * (0.11679 * dalpha_dt + 0.11679 * dbeta_dt) *
(dalpha_dt + dbeta_dt)) -
0.36 * tf.sin(beta) *
(0.1138 * (0.06415 * dalpha_dt + 0.06415 * dbeta_dt) * (dalpha_dt + dbeta_dt) +
0.1138 * (0.07205 * dalpha_dt + 0.07205 * dbeta_dt) * (dalpha_dt + dbeta_dt) +
0.1138 * (0.08262 * dalpha_dt + 0.08262 * dbeta_dt) * (dalpha_dt + dbeta_dt) +
0.1138 * (0.04321 * dalpha_dt + 0.04321 * dbeta_dt) * (dalpha_dt + dbeta_dt) +
0.1138 * (0.09238 * dalpha_dt + 0.09238 * dbeta_dt) * (dalpha_dt + dbeta_dt) +
0.020484 * dalpha_dt ** 2 * tf.cos(beta) + 0.1138 * (0.0574 * dalpha_dt + 0.0574 * dbeta_dt) *
(dalpha_dt + dbeta_dt) + 0.1138 * (0.11679 * dalpha_dt + 0.11679 * dbeta_dt) *
(dalpha_dt + dbeta_dt) + 0.1138 * (0.03 * dalpha_dt + 0.03 * dbeta_dt) * (dalpha_dt + dbeta_dt)) -
0.18 * tf.sin(beta) *
(2.751 * (0.04321 * dalpha_dt + 0.04321 * dbeta_dt) * (dalpha_dt + dbeta_dt) + 0.49518 *
dalpha_dt ** 2 * tf.cos(beta)) - 0.18 * tf.sin(beta) *
(1.531 * (0.08262 * dalpha_dt + 0.08262 * dbeta_dt) * (dalpha_dt + dbeta_dt) + 1.531 *
(0.04321 * dalpha_dt + 0.04321 * dbeta_dt) * (dalpha_dt + dbeta_dt) + 0.27558 * dalpha_dt ** 2 *
tf.cos(beta) + 1.531 * (0.11679 * dalpha_dt + 0.11679 * dbeta_dt) * (dalpha_dt + dbeta_dt)) -
0.18 * tf.sin(beta) *
(0.934 * (0.08262 * dalpha_dt + 0.08262 * dbeta_dt) * (dalpha_dt + dbeta_dt) +
0.934 * (0.04321 * dalpha_dt + 0.04321 * dbeta_dt) * (dalpha_dt + dbeta_dt) +
0.934 * (0.09238 * dalpha_dt + 0.09238 * dbeta_dt) * (dalpha_dt + dbeta_dt) +
0.16812 * dalpha_dt ** 2 * tf.cos(beta) + 0.934 * (0.11679 * dalpha_dt + 0.11679 * dbeta_dt) *
(dalpha_dt + dbeta_dt)) +
0.16812 * dalpha_dt ** 2 * tf.sin(beta) * (
0.18 * tf.cos(beta) + 0.335) + 0.49518 * dalpha_dt ** 2 *
tf.sin(beta) * (0.18 * tf.cos(beta) + 0.04321) + 0.30852 * dalpha_dt ** 2 * tf.sin(beta) *
(0.18 * tf.cos(beta) + 0.46445) + 0.27558 * dalpha_dt ** 2 * tf.sin(beta) * (0.18 * tf.cos(beta)
+ 0.24262),
0.3107549862 * dalpha_dt ** 2 * tf.sin(beta)], axis=0)
def q(alpha, dalpha_dt, beta, dbeta_dt):
"""
Returns reaction forces vector of the robot for a set of generalized coordinates.
:param tf.Tensor alpha: tensor from alpha values
:param tf.Tensor dalpha_dt: tensor from values of the first derivation of alpha
:param tf.Tensor beta: tensor from beta values
:param tf.Tensor dbeta_dt: tensor from values of the first derivation of beta
:return: tf.Tensor: reaction forces vectors of the robot
"""
return tf.stack(
[0.33777 * g * tf.sin(alpha) - 3.924 * tf.tanh(5 * dalpha_dt) - 10.838 * tf.tanh(10 * dalpha_dt) -
2.236 * tf.tanh(20 * dalpha_dt) - 76.556 * dalpha_dt - 1.288368 * g * tf.cos(alpha + beta) *
tf.sin(beta) + 0.2276 * g * tf.sin(alpha + beta) * (0.18 * tf.cos(beta) + 0.5586) +
0.934 * g * tf.sin(alpha + beta) * (0.18 * tf.cos(beta) + 0.335) + 2.751 * g *
tf.sin(alpha + beta) * (0.18 * tf.cos(beta) + 0.04321) + 1.714 * g * tf.sin(alpha + beta) *
(0.18 * tf.cos(beta) + 0.46445) + 1.531 * g * tf.sin(alpha + beta) * (0.18 * tf.cos(beta) +
0.24262),
1.72641659 * g * tf.sin(alpha + beta) - 0.368 * tf.tanh(5 * dbeta_dt) -
0.368 * tf.tanh(10 * dbeta_dt) - 8.342 * tf.tanh(100 * dbeta_dt) -
0.492 * tf.sign(dbeta_dt) - 56.231 * dbeta_dt], axis=0)
def B(i_PR90=161.):
"""
Returns input matrix of the robot.
:param float i_PR90: constant
:return: tf.Tensor: input matrix of the robot
"""
i_PR90 = tf.convert_to_tensor(i_PR90, dtype=tf.float64)
B_1 = tf.stack([i_PR90, 0.0], axis=0)
B_2 = tf.stack([0.0, i_PR90], axis=0)
B_tf = tf.stack([B_1, B_2], axis=1)
return B_tf
def M_tensor(beta, i_PR90):
"""
Returns mass matrices of the robot for multiple values for beta.
:param tf.Tensor beta: tensor from beta values
:param float i_PR90: constant
:return: tf.Tensor M_tf: mass matrices of the robot
"""
M_1 = tf.stack([0.00005267 * i_PR90 ** 2 + 0.6215099724 * tf.cos(beta) + 0.9560375168565, 0.00005267 * i_PR90 +
0.3107549862 * tf.cos(beta) + 0.6608899068565], axis=1)
M_2 = tf.stack([0.00005267 * i_PR90 + 0.3107549862 * tf.cos(beta) + 0.6608899068565,
0.00005267 * i_PR90 ** 2 + 0.6608899068565], axis=1)
M_tf = tf.stack([M_1, M_2], axis=2)
return M_tf
def k_tensor(dalpha_dt, beta, dbeta_dt):
"""
Returns stiffness vectors of the robot for multiple values of generalized coordinates.
:param tf.Tensor dalpha_dt: tensor from values of the first derivation of alpha
:param tf.Tensor beta: tensor from beta values
:param tf.Tensor dbeta_dt: tensor from values of the first derivation of beta
:return: tf.Tensor: stiffness vectors of the robot
"""
return tf.stack([0.040968 * dalpha_dt ** 2 * tf.sin(beta) * (0.18 * tf.cos(beta) + 0.5586) - 0.18 * tf.sin(beta) *
(1.714 * (0.07205 * dalpha_dt + 0.07205 * dbeta_dt) * (dalpha_dt + dbeta_dt) + 1.714 *
(0.08262 * dalpha_dt + 0.08262 * dbeta_dt) * (dalpha_dt + dbeta_dt) + 1.714 *
(0.04321 * dalpha_dt + 0.04321 * dbeta_dt) * (dalpha_dt + dbeta_dt) + 1.714 *
(0.09238 * dalpha_dt + 0.09238 * dbeta_dt) * (dalpha_dt + dbeta_dt) + 0.30852 *
dalpha_dt ** 2 * tf.cos(beta) + 1.714 * (0.0574 * dalpha_dt + 0.0574 * dbeta_dt) *
(dalpha_dt + dbeta_dt) + 1.714 * (0.11679 * dalpha_dt + 0.11679 * dbeta_dt) *
(dalpha_dt + dbeta_dt)) -
0.36 * tf.sin(beta) *
(0.1138 * (0.06415 * dalpha_dt + 0.06415 * dbeta_dt) * (dalpha_dt + dbeta_dt) +
0.1138 * (0.07205 * dalpha_dt + 0.07205 * dbeta_dt) * (dalpha_dt + dbeta_dt) +
0.1138 * (0.08262 * dalpha_dt + 0.08262 * dbeta_dt) * (dalpha_dt + dbeta_dt) +
0.1138 * (0.04321 * dalpha_dt + 0.04321 * dbeta_dt) * (dalpha_dt + dbeta_dt) +
0.1138 * (0.09238 * dalpha_dt + 0.09238 * dbeta_dt) * (dalpha_dt + dbeta_dt) +
0.020484 * dalpha_dt ** 2 * tf.cos(beta) + 0.1138 * (0.0574 * dalpha_dt + 0.0574 * dbeta_dt) *
(dalpha_dt + dbeta_dt) + 0.1138 * (0.11679 * dalpha_dt + 0.11679 * dbeta_dt) *
(dalpha_dt + dbeta_dt) + 0.1138 * (0.03 * dalpha_dt + 0.03 * dbeta_dt) * (dalpha_dt + dbeta_dt)) -
0.18 * tf.sin(beta) *
(2.751 * (0.04321 * dalpha_dt + 0.04321 * dbeta_dt) * (dalpha_dt + dbeta_dt) + 0.49518 *
dalpha_dt ** 2 * tf.cos(beta)) - 0.18 * tf.sin(beta) *
(1.531 * (0.08262 * dalpha_dt + 0.08262 * dbeta_dt) * (dalpha_dt + dbeta_dt) + 1.531 *
(0.04321 * dalpha_dt + 0.04321 * dbeta_dt) * (dalpha_dt + dbeta_dt) + 0.27558 * dalpha_dt ** 2 *
tf.cos(beta) + 1.531 * (0.11679 * dalpha_dt + 0.11679 * dbeta_dt) * (dalpha_dt + dbeta_dt)) -
0.18 * tf.sin(beta) *
(0.934 * (0.08262 * dalpha_dt + 0.08262 * dbeta_dt) * (dalpha_dt + dbeta_dt) +
0.934 * (0.04321 * dalpha_dt + 0.04321 * dbeta_dt) * (dalpha_dt + dbeta_dt) +
0.934 * (0.09238 * dalpha_dt + 0.09238 * dbeta_dt) * (dalpha_dt + dbeta_dt) +
0.16812 * dalpha_dt ** 2 * tf.cos(beta) + 0.934 * (0.11679 * dalpha_dt + 0.11679 * dbeta_dt) *
(dalpha_dt + dbeta_dt)) +
0.16812 * dalpha_dt ** 2 * tf.sin(beta) * (
0.18 * tf.cos(beta) + 0.335) + 0.49518 * dalpha_dt ** 2 *
tf.sin(beta) * (0.18 * tf.cos(beta) + 0.04321) + 0.30852 * dalpha_dt ** 2 * tf.sin(beta) *
(0.18 * tf.cos(beta) + 0.46445) + 0.27558 * dalpha_dt ** 2 * tf.sin(beta) * (0.18 * tf.cos(beta)
+ 0.24262),
0.3107549862 * dalpha_dt ** 2 * tf.sin(beta)], axis=1)
def q_tensor(alpha, dalpha_dt, beta, dbeta_dt):
"""
Returns reaction forces vectors of the robot for multiple values of generalized coordinates.
:param tf.Tensor alpha: tensor from alpha values
:param tf.Tensor dalpha_dt: tensor from values of the first derivation of alpha
:param tf.Tensor beta: tensor from beta values
:param tf.Tensor dbeta_dt: tensor from values of the first derivation of beta
:return: tf.Tensor: reaction forces vectors of the robot
"""
return tf.stack(
[0.33777 * g * tf.sin(alpha) - 3.924 * tf.tanh(5 * dalpha_dt) - 10.838 * tf.tanh(10 * dalpha_dt) -
2.236 * tf.tanh(20 * dalpha_dt) - 76.556 * dalpha_dt - 1.288368 * g * tf.cos(alpha + beta) *
tf.sin(beta) + 0.2276 * g * tf.sin(alpha + beta) * (0.18 * tf.cos(beta) + 0.5586) +
0.934 * g * tf.sin(alpha + beta) * (0.18 * tf.cos(beta) + 0.335) + 2.751 * g *
tf.sin(alpha + beta) * (0.18 * tf.cos(beta) + 0.04321) + 1.714 * g * tf.sin(alpha + beta) *
(0.18 * tf.cos(beta) + 0.46445) + 1.531 * g * tf.sin(alpha + beta) * (0.18 * tf.cos(beta) +
0.24262),
1.72641659 * g * tf.sin(alpha + beta) - 0.368 * tf.tanh(5 * dbeta_dt) -
0.368 * tf.tanh(10 * dbeta_dt) - 8.342 * tf.tanh(100 * dbeta_dt) -
0.492 * tf.sign(dbeta_dt) - 56.231 * dbeta_dt], axis=1)
def B_tensor(i_PR90):
"""
Returns input matrices of the robot.
:param float i_PR90: constant
:return: tf.Tensor: input matrices of the robot
"""
B_1 = tf.stack([i_PR90, tf.zeros(i_PR90.shape, dtype=tf.float64)], axis=1)
B_2 = tf.stack([tf.zeros(i_PR90.shape, dtype=tf.float64), i_PR90], axis=1)
B_tf = tf.stack([B_1, B_2], axis=2)
return B_tf
``` |
{
"source": "jonasnm/geometric-certificates",
"score": 2
} |
#### File: jonasnm/geometric-certificates/geocert.py
```python
import sys
import itertools
#sys.path.append('mister_ed')
import mister_ed.adversarial_perturbations as ap
#import prebuilt_loss_functions as plf
#import prebuilt_loss_functions as plf
#import loss_functions as lf
#import adversarial_attacks as aa
#import utils.pytorch_utils as me_utils
from _polytope_ import Polytope, Face
import utilities as utils
from .domains import Domain
from .plnn import PLNN
import inspect
print("PLNN", inspect.getfile(PLNN))
import torch
import numpy as np
import heapq
import time
import matplotlib.pyplot as plt
import torch.nn.functional as F
import joblib
import multiprocessing as mp
from dataclasses import dataclass, field
from typing import Any
from multiprocessing.managers import SyncManager
from threading import Thread
from queue import PriorityQueue, Empty
""" Different from the standard Geocert in that we use multiprocessing
Multiprocessing flow works like this:
- First compute the domain and upper bounds and all that nonsense
- Next handle the first linear region locally to push some stuff onto the
pq
- Initialize a bunch of processes that have two phases:
PROCESS SETUP:
- load a copy of the net
- keep track of the most recent domain
- keep track of the true label
PROCESS LOOP:
- Reread and copy the domain onto memory
- Reread and copy the dead neurons onto memory
- Given an element off the queue (config + tight constraint),
list all the facets that would need to be added to the PQ
- quickly reject what we can (using domain knowledge)
- quickly reject what we can (using the shared seen-dict)
- compute feasible/domain bounds on everything else
- make the new feasible domains available to the main pq
TERMINATION:
- if popped adversarial constraint,
SHARED MEMORY:
- domain
- seen_to_polytope_map
- dead_neurons
- valid domain
- priority queue
LOCAL PROCESS MEMORY :
- net
"""
############################################################################
# #
# HELPER CLASSES #
# #
############################################################################
def verbose_print(*args, verbose=True):
if verbose:
print(*args)
class PQElement:
priority: float # IS THE LP DIST OR 'POTENTIAL' VALUE
config: Any=field(compare=False) # Configs for neuron region
tight_constraint: Any=field(compare=False) # which constraint is tight
facet_type: Any=field(compare=False) # is decision or nah?
projection: Any=field(compare=False)
def __lt__(self, other):
return self.priority < other.priority
class GeoCertReturn:
""" Object that encapsulates the output from GeoCert """
def __init__(self, original=None, original_shape=None,
best_dist=None, best_ex=None, adv_bound=None,
adv_ex=None, seen_polytopes=None, missed_polytopes=None,
polytope_graph=None, lower_bound_times=None,
upper_bound_times=None, status=None, problem_type=None,
radius=None, num_regions=None):
self.original = original
self.original_shape = original_shape
# If computed the minimal distance adversarial example...
self.best_dist = best_dist # this is the distance
self.best_ex = best_ex # and this is the example itself
# If Upper bound Adv.Attack was performed...
self.adv_bound = adv_bound # this is the adv.ex distance
self.adv_ex = adv_ex # this is the adversarial example itself
# dict of binary strings corresponding to feasible polytopes seen by geocert
self.seen_polytopes = seen_polytopes
# dict of binary strings corresponding to infeasible polytopes checked
self.missed_polytopes = missed_polytopes
# dict of pairs of binary strings representing the edges of the graph
self.polytope_graph = polytope_graph
# list of pairs of (time, lower/upper_bound)
self.lower_bound_times = lower_bound_times
self.upper_bound_times = upper_bound_times
self.status = status # return status ['TIMEOUT', 'FAILURE', 'SUCCESS']
self.problem_type = problem_type # in ['min_dist', 'decision_problem', 'count_regions']
self.radius = radius
self.num_regions = num_regions
def __repr__(self):
""" Method to print out results"""
output_str = 'GeoCert Return Object\n'
output_str += '\tProblem Type: ' + self.problem_type + '\n'
output_str += '\tStatus: %s\n' % self.status
if self.status == 'TIMEOUT':
return output_str
if self.problem_type == 'min_dist':
output_str += '\tRobustness: %.04f' % self.best_dist
elif self.problem_type in ['decision_problem', 'count_regions']:
output_str += '\tRadius %.02f\n' % self.radius
if self.problem_type == 'count_regions':
output_str += '\tNum Linear Regions: %s' % self.num_regions
return output_str
def display_images(self, include_diffs=True, include_pgd=False,
figsize=(12, 12)):
""" Shorthand method to display images found by GeoCert.
Useful when doing things with GeoCert in jupyter notebooks
ARGS:
include_diffs : boolean - if True, we'll display the differences
between the original and GeoCert image
(diffs scaled up by 5x!)
include_pgd : boolean - if True, we'll also display the image
found by PGD (useful upper bound)
RETURNS:
None, but inline displays the images in the order
[original | diff | geoCert | PGD]
"""
if self.best_ex is None:
# No Geocert image => do nothing
return
# Build the display row of numpy elements
original_np = utils.as_numpy(self.original.reshape(self.original_shape))
best_ex_np = utils.as_numpy(self.best_ex.reshape(self.original_shape))
display_row = [original_np, best_ex_np]
label_row = ['original', 'geoCert']
if include_diffs:
diff_np = np.clip(0.5 + (best_ex_np - original_np) * 5, 0.0, 1.0)
display_row.insert(1, diff_np)
label_row.insert(1, 'difference x5 (+0.5)')
if include_pgd and self.adv_ex is not None:
adv_ex_np = utils.as_numpy(self.adv_ex.reshape(self.original_shape))
display_row.append(adv_ex_np)
label_row.append('PGD')
# Make sure everything has three dimensions (CxHxW)
# --- determine if grayscale or not
grayscale = (original_np.squeeze().ndim == 2)
if grayscale:
num_channels = 1
imshow_kwargs = {'cmap': 'gray'}
else:
num_channels = 3
imshow_kwargs = {}
# --- determine height/width
h, w = original_np.squeeze().shape[-2:]
for i in range(len(display_row)):
display_row[i] = display_row[i].reshape((num_channels, h, w))
# Concatenate everything into a single row, and display
# --- concatenate row together
cat_row = np.concatenate(display_row, -1)
if grayscale:
cat_row = cat_row.squeeze()
plt.figure(figsize=figsize, dpi=80, facecolor='w', edgecolor='k')
plt.axis('off')
plt.imshow(cat_row, **imshow_kwargs)
# -- add labels underneath the images
for label_idx, label in enumerate(label_row):
x_offset = (0.33 + label_idx) * w
plt.text(x_offset, h + 1, label)
plt.show()
##############################################################################
# #
# MAIN GEOCERT CLASS #
# #
##############################################################################
class GeoCert(object):
bound_fxn_selector = {'ia': PLNN.compute_interval_bounds,
'dual_lp': PLNN.compute_dual_lp_bounds,
'full_lp': PLNN.compute_full_lp_bounds}
def __init__(self, net, hyperbox_bounds=None,
verbose=True, neuron_bounds='ia',
# And for 2d inputs, some kwargs for displaying things
display=False, save_dir=None, ax=None):
""" To set up a geocert instance we need to know:
ARGS:
net : PLNN instance - the network we're verifying
hyperbox_bounds: if not None, is a tuple of pair of numbers
(lo, hi) that define a valid hyperbox domain
neuron_bounds: string - which technique we use to compute
preactivation bounds. ia is interval
analysis, full_lp is the full linear
program, and dual_lp is the Kolter-Wong
dual approach
verbose: bool - if True, we print things
THE REST ARE FOR DISPLAYING IN 2D CASES
"""
##############################################################
# First save the kwargs #
##############################################################
self.net = net
self.hyperbox_bounds = hyperbox_bounds
self.verbose = verbose
assert neuron_bounds in ['ia', 'dual_lp', 'full_lp']
self.neuron_bounds = neuron_bounds
self.bound_fxn = self.bound_fxn_selector[neuron_bounds]
# DISPLAY PARAMETERS
self.display = display
self.save_dir = save_dir
self.ax = ax
# And intialize the per-run state
self._reset_state()
def _reset_state(self):
""" Clears out the state of things that get set in a min_dist run """
# Things that are saved as instances for a run
self.lp_norm = None # filled in later
self.true_label = None # filled in later
self.lp_dist = None # filled in later
self.seen_to_polytope_map = {} # binary config str -> Polytope object
self.pq = [] # Priority queue that contains HeapElements
self.dead_constraints = None
self.on_off_neurons = None
self.domain = None # keeps track of domain and upper bounds
self.config_history = None # keeps track of all seen polytope configs
self.x = None
self.x_np = None
def _setup_state(self, x, lp_norm, potential):
""" Sets up the state to be used on a per-run basis
Shared between min_dist_multiproc and decision_problem_multiproc
Sets instance variables and does asserts
"""
assert lp_norm in ['l_2', 'l_inf']
self.lp_norm = lp_norm
self.x = x
self.x_np = utils.as_numpy(x)
self.true_label = int(self.net(x).max(1)[1].item())
dist_selector = {'l_2' : Face.l2_dist_gurobi,
'l_inf': Face.linf_dist_gurobi}
self.lp_dist = dist_selector[self.lp_norm]
self.domain = Domain(x.numel(), x)
if self.hyperbox_bounds is not None:
self.domain.set_original_hyperbox_bound(*self.hyperbox_bounds)
self._update_dead_constraints()
assert potential in ['lp', 'lipschitz']
if self.net.layer_sizes[-1] > 2 and potential == 'lipschitz':
raise NotImplementedError("Lipschitz potential buggy w/ >2 classes!")
def _verbose_print(self, *args):
""" Print method that leverages self.verbose -- makes code cleaner """
if self.verbose:
print(*args)
def _compute_upper_bounds(self, x, true_label,
extra_attack_kwargs=None):
""" Runs an adversarial attack to compute an upper bound on the
distance to the decision boundary.
In the l_inf case, we compute the constraints that are always
on or off in the specified upper bound
"""
self._verbose_print("Starting upper bound computation")
start = time.time()
upper_bound, adv_ex = self._pgd_upper_bound(x, true_label, self.lp_norm,
extra_kwargs=extra_attack_kwargs)
ub_time = time.time() - start
if upper_bound is None:
self._verbose_print("Upper bound failed in %.02f seconds" % ub_time)
else:
self._verbose_print("Upper bound of %s in %.02f seconds" %
(upper_bound, ub_time))
self._update_dead_constraints()
return upper_bound, adv_ex, ub_time
def _pgd_upper_bound(self, x, true_label, lp_norm, num_repeats=64,
extra_kwargs=None):
""" Runs PGD attack off of many random initializations to help generate
an upper bound.
Sets self.upper_bound as the lp distance to the best (of the ones we
found) adversarial example
Also returns both the upper bound and the supplied adversarial
example
"""
######################################################################
# Setup attack object #
######################################################################
norm = {'l_inf': 'inf', 'l_2': 2}[lp_norm]
linf_threat = ap.ThreatModel(ap.DeltaAddition, {'lp_style': 'inf',
'lp_bound': 1.0})
normalizer = me_utils.IdentityNormalize()
loss_fxn = plf.VanillaXentropy(self.net, normalizer)
pgd_attack = aa.PGD(self.net, normalizer, linf_threat, loss_fxn,
manual_gpu=False)
attack_kwargs = {'num_iterations': 1000,
'random_init': 0.4,
'signed': False,
'verbose': False}
if isinstance(extra_kwargs, dict):
attack_kwargs.update(extra_kwargs)
######################################################################
# Setup 'minibatch' of randomly perturbed examples to try #
######################################################################
new_x = x.view(1, -1).repeat(num_repeats, 1)
labels = [true_label for _ in range(num_repeats)]
labels = torch.Tensor(labels).long()
# Use the GPU to build adversarial attacks if we can
USE_GPU = torch.cuda.is_available()
if USE_GPU:
new_x = new_x.cuda()
labels = labels.cuda()
self.net.cuda()
######################################################################
# Run the attack and collect the best (if any) successful example #
######################################################################
pert_out = pgd_attack.attack(new_x, labels, **attack_kwargs)
pert_out = pert_out.binsearch_closer(self.net, normalizer, labels)
success_out = pert_out.collect_successful(self.net, normalizer,
success_def='alter_top_logit')
success_idxs = success_out['success_idxs']
if USE_GPU:
best_adv = best_adv.cpu()
labels = labels.cpu()
self.net.cpu()
if success_idxs.numel() == 0:
return None, None
diffs = pert_out.delta.data.index_select(0, success_idxs)
max_idx = me_utils.batchwise_norm(diffs, norm, dim=0).min(0)[1].item()
best_adv = success_out['adversarials'][max_idx].squeeze()
# Set both l_inf and l_2 upper bounds
l_inf_upper_bound = (best_adv - x.view(-1)).abs().max().item()
self.domain.set_l_inf_upper_bound(l_inf_upper_bound)
l_2_upper_bound = torch.norm(best_adv - x.view(-1), p=2).item()
self.domain.set_l_2_upper_bound(l_2_upper_bound)
upper_bound = {'l_inf': l_inf_upper_bound,
'l_2': l_2_upper_bound}[self.lp_norm]
return upper_bound, best_adv
def _update_dead_constraints(self):
# Compute new bounds
new_bounds = self.bound_fxn(self.net, self.domain)
# Change to dead constraint form
self.dead_constraints = utils.ranges_to_dead_neurons(new_bounds)
self.on_off_neurons = utils.ranges_to_on_off_neurons(new_bounds)
def run(self, x, lp_norm='l_2', compute_upper_bound=False,
potential='lp', problem_type='min_dist', decision_radius=None,
collect_graph=False, max_runtime=None):
"""
Main method for running GeoCert. This method handles each of the three
problem types, as specified by the problem_type argument:
- min_dist : computes the minimum distance point (under the specified
lp_norm), x', for which net(x) != net(x')
- decision_problem : answers yes/no whether or not an adversarial
example exists within a radius of decision_radius
from the specified point x. Will return early if
finds an adversarial example within the radius
(which may not be the one with minimal distance!)
- count_regions : like decision_problem, explores the region specified
by decision_radius, but will not stop early and instead
explore the entire region
ARGS:
x : numpy array or tensor - vector that we wish to certify
robustness for
lp_norm: string - needs to be 'l_2' or 'l_inf'
compute_upper_bound : None, True, or dict - if None, no upper bound
to pointwise robustness is computed. If not
None, should be either True (to use default
attack params) or a dict specifying extra
kwargs to use in the PGD attack (see examples)
potential : string - needs to be 'lp' or 'lipschitz', affects which
potential function to be used in ordering facets
problem_type : string - must be in ['min_dist', 'decision_problem',
'count_regions']
collect_graph: bool - if True, we collect the graph of linear regions
and return it
max_runtime : None or numeric - if not None, is a limit on the runtime
RETURNS:
GeoCertReturn object which has attributes regarding the output data
"""
######################################################################
# Step 0: Clear and setup state #
######################################################################
# 0.A) Establish clean state for a new run
original_shape = x.shape
x = x.view(-1)
self._reset_state() # clear out the state first
self._setup_state(x, lp_norm, potential)
start_time = time.time()
# 0.B) Setup objects to gather bound updates with timing info
# Upper bound times that the domain queue updater knows about
upper_bound_times = [] # (time, bound)
# Lower bound times that the workers know about
lower_bound_times = [] # (time, bound)
# 0.C) Compute upper bounds to further restrict search
adv_bound, adv_ex, ub_time = None, None, None
if problem_type == 'min_dist':
# If finding min dist adv.ex, possibly run a PGD attack first
if compute_upper_bound is not False:
ub_out = self._compute_upper_bounds(x, self.true_label,
extra_attack_kwargs=compute_upper_bound)
adv_bound, adv_ex, ub_time = ub_out
if adv_bound is not None:
upper_bound_times.append((time.time() - start_time, adv_bound))
self.domain.set_upper_bound(adv_bound, lp_norm)
if problem_type in ['decision_problem', 'count_regions']:
# If searching the entire provided domain, set up asymmetric domain
assert decision_radius is not None
self.domain.set_upper_bound(decision_radius, lp_norm)
# 0.D) Set up priority queues
sync_pq = []
pq_decision_bounds = []
# 0.E) Set up the objects to collect seen/missed polytopes and connections
seen_polytopes = {}
missed_polytopes = {}
if collect_graph:
polytope_graph = {}
else:
polytope_graph = None
# 0.F) Set up heuristic dicts to hold info on domain, fixed neurons,
# and lipschitz constant
heuristic_dict = {}
heuristic_dict['domain'] = self.domain
heuristic_dict['dead_constraints'] = self.dead_constraints
if potential == 'lipschitz':
# Just assume binary classifiers for now
# on_off_neurons = self.net.compute_interval_bounds(self.domain, True)
dual_lp = utils.dual_norm(lp_norm)
c_vector, lip_value = self.net.fast_lip_all_vals(x, dual_lp,
self.on_off_neurons)
self._verbose_print("LIPSCHITZ CONSTANTS", lip_value)
self._verbose_print(c_vector[0].dot(self.net(x).squeeze()) / lip_value[0])
else:
lip_value = None
c_vector = None
heuristic_dict['fast_lip'] = lip_value
heuristic_dict['c_vector'] = c_vector
# 0.G) Set up return object to be further populated later
# (mutable objects for all dynamic kwargs to GeoCertReturn make this ok)
return_obj = GeoCertReturn(original=x,
original_shape=original_shape,
best_dist=None,
best_ex=None,
adv_bound=adv_bound,
adv_ex=adv_ex,
seen_polytopes=seen_polytopes,
missed_polytopes=missed_polytopes,
polytope_graph=polytope_graph,
lower_bound_times=lower_bound_times,
upper_bound_times=upper_bound_times,
status=None,
problem_type=problem_type,
radius=decision_radius)
######################################################################
# Step 1: handle the initial polytope #
######################################################################
# NOTE: The loop doesn't quite work here, so have to do the first part
# (aka emulate update_step_build_poly) manually.
# 1) Build the original polytope
# 2) Add polytope to seen polytopes
#
self._verbose_print('---Initial Polytope---')
p_0_dict = self.net.compute_polytope(self.x)
p_0 = Polytope.from_polytope_dict(p_0_dict, self.x_np,
domain=self.domain,
dead_constraints=self.dead_constraints,
gurobi=True,
lipschitz_ub=lip_value,
c_vector=c_vector)
seen_polytopes[utils.flatten_config(p_0.config)] = True
update_step_handle_polytope(self.net, self.x_np, self.true_label,
sync_pq, seen_polytopes, self.domain,
self.dead_constraints, p_0, self.lp_norm,
pq_decision_bounds, potential, missed_polytopes,
problem_type, polytope_graph,
heuristic_dict, upper_bound_times,
start_time, max_runtime,
verbose=self.verbose)
if problem_type == 'decision_problem':
# If a decision problem and found a decision bound in the first polytope
# (which must also be in the 'restricted domain'), then we can return
try:
best_decision_bound = heapq.heappop(pq_decision_bounds)
# Will error here^ unless found a decision bound
return_obj.status = 'SUCCESS'
return return_obj # note, not guaranteed to be optimal!
except IndexError:
pass
######################################################################
# Step 2: Loop until termination #
######################################################################
proc_args = (self.net, self.x_np, self.true_label, sync_pq,
seen_polytopes, heuristic_dict, self.lp_norm,
pq_decision_bounds,
potential, missed_polytopes, problem_type,
polytope_graph, lower_bound_times, start_time,
upper_bound_times, max_runtime)
update_step_worker(*proc_args, **{'proc_id': 0,
'verbose': self.verbose})
######################################################################
# Step 3: Collect the best thing in the decision queue and return #
######################################################################
overran_time = ((max_runtime is not None) and\
(time.time() - start_time > max_runtime))
if overran_time:
return_obj.status = 'TIMEOUT'
return return_obj
if problem_type == 'min_dist':
best_decision_bound = heapq.heappop(pq_decision_bounds)
elif problem_type in ['decision_problem', 'count_regions']:
try:
best_decision_bound = heapq.heappop(pq_decision_bounds)
except IndexError:
if problem_type == 'decision_problem':
self._verbose_print("DECISION PROBLEM FAILED")
return_obj.status = 'FAILURE'
else:
self._verbose_print("COUNTED %s LINEAR REGIONS" % len(seen_polytopes))
return_obj.status = 'SUCCESS'
return_obj.num_regions = len(seen_polytopes)
return return_obj
return_obj.best_dist = best_decision_bound.priority
return_obj.best_ex = best_decision_bound.projection
return_obj.status = 'SUCCESS'
return return_obj
##############################################################################
# #
# FUNCTIONAL VERSION OF UPDATES #
# (useful for multiprocessing) #
##############################################################################
def update_step_worker(piecewise_net, x, true_label, pqueue, seen_polytopes,
heuristic_dict, lp_norm,
pq_decision_bounds, potential,
missed_polytopes, problem_type,
polytope_graph, lower_bound_times, start_time,
upper_bound_times, max_runtime,
proc_id=None, verbose=True):
""" Setup for the worker objects
ARGS:
network - actual network object to be copied over into memory
everything else is a manager
"""
assert problem_type in ['min_dist', 'decision_problem', 'count_regions']
# with everything set up, LFGD
while True:
output = update_step_loop(piecewise_net, x, true_label, pqueue,
seen_polytopes, heuristic_dict,
lp_norm,
pq_decision_bounds,
potential, missed_polytopes,
problem_type, polytope_graph,
lower_bound_times, start_time, proc_id,
upper_bound_times, max_runtime,
verbose=verbose)
if output is not True: # Termination condition
return output
if (max_runtime is not None) and (time.time() - start_time) > max_runtime:
return output
def update_step_loop(piecewise_net, x, true_label, pqueue, seen_polytopes,
heuristic_dict, lp_norm,
pq_decision_bounds, potential, missed_polytopes,
problem_type, polytope_graph,
lower_bound_times, start_time, proc_id, upper_bound_times,
max_runtime, verbose=True):
""" Inner loop for how to update the priority queue. This handles one
particular thing being popped off the PQ
"""
# Build the polytope to pop from the queue
poly_out = update_step_build_poly(piecewise_net, x, pqueue, seen_polytopes,
heuristic_dict, lp_norm,
pq_decision_bounds, potential,
problem_type,
lower_bound_times, start_time, proc_id,
verbose=verbose)
if isinstance(poly_out, bool): # bubble up booleans
return poly_out
new_poly, domain, dead_constraints = poly_out
# Build facets, reject what we can, and do optimization on the rest
return update_step_handle_polytope(piecewise_net, x, true_label, pqueue,
seen_polytopes, domain, dead_constraints,
new_poly, lp_norm,
pq_decision_bounds, potential,
missed_polytopes,
problem_type, polytope_graph,
heuristic_dict, upper_bound_times,
start_time, max_runtime,
verbose=verbose)
def update_step_build_poly(piecewise_net, x, pqueue, seen_polytopes,
heuristic_dict, lp_norm,
pq_decision_bounds, potential,
problem_type, lower_bound_times, start_time,
proc_id, verbose=True):
""" Component method of the loop.
1) Pops the top PQ element off and rejects it as seen before if so
2) Collect the domain/heuristics
3) builds the new polytope and returns the polytope
"""
##########################################################################
# Step 1: pop something off the queue #
##########################################################################
try:
item = heapq.heappop(pqueue)
except IndexError:
return False
#priority, config, tight_constraint, proj, facet_type = item
if item.priority < 0: #item.priority < 0: # Termination condition -- bubble up the termination
return False
if item.facet_type == 'decision': # Termination condition -- bubble up
heapq.heappush(pq_decision_bounds, item)
#pq_decision_bounds.put(item)
return False
# Update the lower bound queue
lower_bound_times.append(((time.time() - start_time), item.priority))
new_configs = utils.get_new_configs(item.config, item.tight_constraint)
if utils.flatten_config(new_configs) in seen_polytopes:
return True # No need to go further, but don't terminate!
else:
seen_polytopes[utils.flatten_config(new_configs)] = True
##########################################################################
# Step 2: Gather the domain and dead neurons #
##########################################################################
domain = heuristic_dict['domain']
current_upper_bound = domain.current_upper_bound(lp_norm) or 1e10
verbose_print("(p%s) Popped: %.06f | %.06f" %
(proc_id, item.priority, current_upper_bound),
verbose=verbose)
assert isinstance(domain, Domain)
dead_constraints = heuristic_dict['dead_constraints']
lipschitz_ub = heuristic_dict['fast_lip']
c_vector = heuristic_dict['c_vector']
##########################################################################
# Step 3: Build polytope and return #
##########################################################################
new_poly_dict = piecewise_net.compute_polytope_config(new_configs, False)
new_poly = Polytope.from_polytope_dict(new_poly_dict, x,
domain=domain,
dead_constraints=dead_constraints,
lipschitz_ub=lipschitz_ub,
c_vector=c_vector)
return new_poly, domain, dead_constraints
def update_step_handle_polytope(piecewise_net, x, true_label, pqueue,
seen_polytopes, domain, dead_constraints,
new_poly, lp_norm,
pq_decision_bounds, potential,
missed_polytopes,
problem_type, polytope_graph, heuristic_dict,
upper_bound_times, start_time, max_runtime,
verbose=True):
""" Component method of the loop
1) Makes facets, rejecting quickly where we can
2) Run convex optimization on everything we can't reject
3) Push the updates to the process-safe objects
"""
##########################################################################
# Step 1: Make new facets while doing fast rejects #
##########################################################################
new_facets, rejects = new_poly.generate_facets_configs(seen_polytopes,
missed_polytopes)
if problem_type != 'count_regions':
adv_constraints = piecewise_net.make_adversarial_constraints(new_poly,
true_label, domain)
else:
adv_constraints = []
##########################################################################
# Step 2: Compute the min-dists/feasibility checks using LP/QP #
##########################################################################
# -- compute the distances
chained_facets = itertools.chain(new_facets, adv_constraints)
parallel_args = [(_, x) for _ in chained_facets]
dist_selector = {'l_2': Face.l2_dist_gurobi,
'l_inf': Face.linf_dist_gurobi}
lp_dist = dist_selector[lp_norm]
dist_fxn = lambda el: (el[0], lp_dist(*el))
outputs = [dist_fxn(_) for _ in parallel_args]
updated_domain = False
# -- collect the necessary facets to add to the queue
current_upper_bound = domain.current_upper_bound(lp_norm)
pq_elements_to_push = []
fail_count = 0
for facet, (dist, proj) in outputs:
try:
new_facet_conf = utils.flatten_config(facet.get_new_configs())
except:
new_facet_conf = None
if dist is None:
rejects['optimization infeasible'] += 1
if facet.facet_type == 'decision':
continue
# Handle infeasible case
missed_polytopes[new_facet_conf] = True
fail_count += 1
continue
if polytope_graph is not None:
edge = (utils.flatten_config(new_poly.config),
new_facet_conf)
polytope_graph[edge] = dist
if current_upper_bound is not None and dist > current_upper_bound:
#Handle the too-far-away facets
continue
rejects['optimization successful'] += 1
new_pq_element = PQElement()
for k, v in {'priority': dist,
'config': new_poly.config,
'tight_constraint': facet.tight_list[0],
'projection': proj,
'facet_type': facet.facet_type}.items():
setattr(new_pq_element, k, v)
pq_elements_to_push.append(new_pq_element)
if facet.facet_type == 'decision':
if problem_type == 'decision_problem':
# If in decision_problem style, just return
heapq.heappush(pq_decision_bounds, new_pq_element)
return True
updated_domain = True
# If also a decision bound, update the upper_bound
domain.set_upper_bound(dist, lp_norm)
# update l_inf bound in l_2 case as well
if lp_norm == 'l_2':
new_linf = abs(proj - x).max()
domain.set_upper_bound(new_linf, 'l_inf')
current_upper_bound = domain.current_upper_bound(lp_norm)
##########################################################################
# Step 3: Process all the updates and return #
##########################################################################
# -- push objects to priority queue
for pq_element in pq_elements_to_push:
heapq.heappush(pqueue, pq_element)
# -- call the update domain to try and compute tighter stable neurons
if updated_domain:
update_domain(domain, piecewise_net, x, heuristic_dict, potential,
lp_norm, None, upper_bound_times, start_time,
max_runtime, verbose=verbose)
return True
def update_domain(new_domain, piecewise_net, x, heuristic_dict, potential,
lp_norm, bound_fxn, upper_bound_times, start_time,
max_runtime, verbose=True):
linf_radius = new_domain.linf_radius or 1e10
l2_radius = new_domain.l2_radius or 1e10
verbose_print('-' * 20, "DOMAIN UPDATE | L_inf %.06f | L_2 %.06f" %
(linf_radius, l2_radius), verbose=verbose)
# Record the update in the upper_bound_times log
attr = {'l_inf': 'linf_radius',
'l_2': 'l2_radius'}[lp_norm]
upper_bound_times.append((time.time() - start_time,
getattr(new_domain, attr)))
# Update the current domain and change the heuristic dict
heuristic_dict['domain'] = new_domain
# And use the domain to compute the new dead constraints
new_bounds = piecewise_net.compute_interval_bounds(new_domain)
# new_bounds = bound_fxn(piecewise_net, domain)
dead_constraints = utils.ranges_to_dead_neurons(new_bounds)
on_off_neurons = utils.ranges_to_on_off_neurons(new_bounds)
heuristic_dict['dead_constraints'] = dead_constraints
# Use the domain to update the lipschitz bound on everything
# (this can only shrink as we shrink the domain)
if potential == 'lipschitz':
# Just assume binary classifiers for now
dual_lp = utils.dual_norm(lp_norm)
c_vector, lip_value = piecewise_net.fast_lip_all_vals(x, dual_lp,
on_off_neurons)
heuristic_dict['fast_lip'] = lip_value
```
#### File: jonasnm/geometric-certificates/mip_verify.py
```python
import numpy as np
import gurobipy as gb
import utilities as utils
import full_lp
from domains import Domain
import time
def looper(start, max_val):
current = start
while current <= max_val:
yield current
current *= 2
##############################################################################
# #
# MAIN SOLVER METHOD #
# #
##############################################################################
def mip_min_dist(network, x, lp_norm='l_inf', box_bounds=None, radius_list=None,
timeout=None):
if radius_list is None:
# Do binary-increasing sequence of radii if none specified
radius_list = looper(*{'l_inf': (0.05, 1.0),
'l_2': (0.5, 10.0)}[lp_norm])
start_time = time.time()
# First compute the pre-relus to be reused throughout:
# Then loop through all the radii until we either solve or timeout
for radius in radius_list:
print('-' * 20, 'STARTING RADIUS ', radius, '-' * 20)
mip_out = mip_solve(network, x, radius=radius, problem_type='min_dist',
lp_norm=lp_norm, box_bounds=box_bounds,
force_radius=True, timeout=timeout)
if mip_out.Status == 3:
print("-" * 20, "Infeasible on radius: ", radius, "-" * 20)
print('\n' * 3)
if mip_out.Status == 9: #timeout code
# If TimeLimit reached here then we probably can't do any better
return mip_out
if mip_out.Status == 2:
return mip_out
def mip_decision_problem(network, x, radius, lp_norm='l_inf', box_bounds=None):
pass
def retrieve_adv_from_mip(model):
return np.array([_.X for _ in model.getVars()
if _.VarName.startswith('x[')])
def mip_solve(network, x, radius=None, problem_type='min_dist',
lp_norm='l_inf', box_bounds=None, force_radius=False,
bound_fxn='full_lp', timeout=None):
""" Computes the decision problem for MIP :
- first computes the LP for each neuron to get pre-relu actviations
- then loops through all logits to compute decisions
"""
dom = Domain(x.numel(), x)
if box_bounds is not None:
dom.set_original_hyperbox_bound(*box_bounds)
else:
dom.set_original_hyperbox_bound(0.0, 1.0)
assert problem_type in ['decision_problem', 'min_dist']
if (problem_type == 'decision_problem') or (force_radius is True):
assert radius is not None
dom.set_upper_bound(radius, lp_norm)
# Build domain and shrink if only doing a decision problem
start = time.time()
if bound_fxn == 'full_lp':
pre_relu_bounds = full_lp.compute_full_lp_bounds(network, dom,
compute_logit_bounds=True)
print("COMPUTED FULL-LP BOUNDS IN %.03f seconds" % (time.time() - start))
if bound_fxn == 'ia':
# assert bound_fxn == 'ia'
pre_relu_bounds = network.compute_interval_bounds(dom,
compute_logit_bounds=True)
print("COMPUTED IA BOUNDS IN %.03f seconds" % (time.time() - start))
true_label = network(x).max(1)[1].item()
num_logits = network(x).numel()
solved_models = []
model = build_mip_model(network, x, dom, pre_relu_bounds,
true_label, problem_type, radius, lp_norm,
timeout=timeout)
model.optimize()
if model.Status == 3:
print("INFEASIBLE!")
return model
def build_mip_model(network, x, domain, pre_relu_bounds, true_label,
problem_type, radius, lp_norm, timeout=None):
"""
ARGS:
network : plnn.PLNN - network we wish to compute bounds on
x : Tensor or numpy of the point we want to verify
domain : domain.Domain - domain restricting the input domain
pre_relu_bounds : list of np arrays of shape [#relu x 2] -
holds the upper/lower bounds for each pre_relu
(and the logits)
true_label : int - what the model predicts for x
problem_type: 'min_dist' or 'decision_problem'
radius: float - l_inf ball that we are 'deciding' on for
'decision_problem' variant
"""
##########################################################################
# Step 1: setup things we'll need throughout #
##########################################################################
num_pre_relu_layers = len(network.fcs) - 1
# - build model, add variables and box constraints
model = gb.Model()
# model.setParam('OutputFlag', False) # -- uncomment to suppress gurobi logs
if timeout is not None:
model.setParam('TimeLimit', timeout)
model.setParam('Threads', 1) # Fair comparisions -- we only use 1 thread
x_np = utils.as_numpy(x).reshape(-1)
assert domain.box_low is not None
assert domain.box_high is not None
box_bounds = zip(domain.box_low, domain.box_high)
x_namer = build_var_namer('x')
x_vars = [model.addVar(lb=low, ub=high, name= x_namer(i))
for i, (low, high) in enumerate(box_bounds)]
for (low, high), xvar in zip(box_bounds, x_vars):
model.addConstr(xvar >= low)
model.addConstr(xvar <= high)
var_dict = {'x': x_vars}
if lp_norm == 'l_2':
diff_namer = build_var_namer('diff')
diff_vars = []
for i in range(len(x_vars)):
diff_var = model.addVar(lb=-gb.GRB.INFINITY, ub=gb.GRB.INFINITY,
name=diff_namer(i))
diff_vars.append(diff_var)
model.addConstr(diff_var == x_vars[i] - x_np[i])
l2_norm = gb.quicksum(diff_vars[i] * diff_vars[i]
for i in range(len(diff_vars)))
model.addConstr(l2_norm <= radius ** 2)
# if l_2, and the radius is not None, add those constraints as well
model.update()
##########################################################################
# Step 2: Now add layers iteratively #
##########################################################################
# all layers except the last final layer
for i, fc_layer in enumerate(network.fcs[:-1]):
# add linear layer
if i == 0:
input_name = 'x'
else:
input_name = 'fc_%s_post' % i
pre_relu_name = 'fc_%s_pre' % (i + 1)
post_relu_name = 'fc_%s_post' % (i + 1)
relu_name = 'relu_%s' % (i + 1)
add_linear_layer_mip(network, i, model, var_dict, input_name,
pre_relu_name)
add_relu_layer_mip(network, i, model, var_dict, pre_relu_name,
pre_relu_bounds[i], post_relu_name, relu_name)
# add the final fully connected layer
output_var_name = 'logits'
add_linear_layer_mip(network, len(network.fcs) - 1, model, var_dict,
post_relu_name, output_var_name)
##########################################################################
# Step 3: Add the 'adversarial' constraint and objective #
##########################################################################
add_adversarial_constraint(model, var_dict[output_var_name], true_label,
pre_relu_bounds[-1])
if lp_norm == 'l_inf':
add_l_inf_obj(model, x_np, var_dict['x'], problem_type)
else:
add_l_2_obj(model, x_np, var_dict['x'], problem_type)
model.update()
return model
######################################################################
# #
# HELPER FUNCTIONS #
# (builds layers, objective, adversarial constraint) #
######################################################################
def add_linear_layer_mip(network, layer_no, model, var_dict, var_input_key,
var_output_key):
""" Method to add the variables and constraints to handle a linear layer
"""
fc_layer = network.fcs[layer_no]
fc_weight = utils.as_numpy(fc_layer.weight)
if fc_layer.bias is not None:
fc_bias = utils.as_numpy(fc_layer.bias)
else:
fc_bias = np.zeros(fc_layer.out_features)
input_vars = var_dict[var_input_key]
relu = lambda el: max([el, 0.0])
# add the variables and constraints for the pre-relu layer
var_namer = build_var_namer(var_output_key)
pre_relu_vars = [model.addVar(lb=-gb.GRB.INFINITY, ub=gb.GRB.INFINITY,
name=var_namer(i))
for i in range(fc_layer.out_features)]
var_dict[var_output_key] = pre_relu_vars
model.addConstrs((pre_relu_vars[i] ==\
gb.LinExpr(fc_weight[i], input_vars) + fc_bias[i])
for i in range(fc_layer.out_features))
model.update()
return
def add_relu_layer_mip(network, layer_no, model, var_dict, var_input_key,
input_bounds, post_relu_var_names,
relu_config_var_names):
""" Method to add the variables and constraints to handle a ReLU layer
"""
post_relu_vars = []
relu_vars = []
post_relu_namer = build_var_namer(post_relu_var_names)
relu_namer = build_var_namer(relu_config_var_names)
#input bounds are the pre-relu bound
for i, (low, high) in enumerate(input_bounds):
post_relu_name = post_relu_namer(i)
relu_name = relu_namer(i)
if high <= 0:
# If always off, don't add an integral constraint
post_relu_vars.append(model.addVar(lb=0.0, ub=0.0,
name=post_relu_name))
else:
pre_relu = var_dict[var_input_key][i]
post_relu_vars.append(model.addVar(lb=low, ub=high,
name=post_relu_name))
post_relu = post_relu_vars[-1]
if low >= 0:
# If always on, enforce equality
model.addConstr(post_relu == pre_relu)
else:
# If unstable, add tightest possible relu constraints
relu_var = model.addVar(lb=0.0, ub=1.0, vtype=gb.GRB.BINARY,
name=relu_name)
relu_vars.append(relu_var)
# y <= x - l(1 - a)
model.addConstr(post_relu <= pre_relu - low * (1 - relu_var))
# y >= x
model.addConstr(post_relu >= pre_relu)
# y <= u * a
model.addConstr(post_relu <= high * relu_var)
# y >= 0
model.addConstr(post_relu >= 0)
model.update()
var_dict[post_relu_var_names] = post_relu_vars
var_dict[relu_config_var_names] = relu_vars
return
def add_adversarial_constraint(model, logit_vars, true_label, logit_bounds):
""" Adds the adversarial constraint to the model
Two cases here:
1) only two valid logits could be maximal, so
"""
if len(logit_vars) == 2:
model.addConstr(logit_vars[true_label] <= logit_vars[1 - true_label])
# First collect all potential max labels that aren't the true label
highest_low = max(logit_bounds[:, 0])
target_labels = []
for i in range(len(logit_vars)):
this_high = logit_bounds[i][1]
if (i == true_label) or (this_high <= highest_low):
continue
target_labels.append(i)
print("ADVERSARIAL CONSTRAINTS ADDED ", len(target_labels))
if len(target_labels) == 1:
# Trivial case
model.addConstr(logit_vars[true_label] <= logit_vars[target_labels[0]])
return
##########################################################################
# If multiple target labels, we have to add a max layer #
##########################################################################
# Generate a max logit variable (which is greater than all target logits)
max_logit_var = model.addVar(lb=-gb.GRB.INFINITY, ub=gb.GRB.INFINITY,
name='max_logit')
for i in target_labels:
model.addConstr(max_logit_var >= logit_vars[i])
# And max logit integer variables (where onnly 1 can be on at a time)
max_logit_ints = {i: model.addVar(lb=0.0, ub=1.0, vtype=gb.GRB.BINARY,
name='is_max_logit[%s]' % i)
for i in target_labels}
model.addConstr(gb.quicksum(list(max_logit_ints.values())) == 1)
# Add upper bound constraints on max's
for i in target_labels:
high_max_not_i = max(_[1] for j, _ in enumerate(logit_bounds)
if (j != i) and (j in target_labels))
rhs = (1 - max_logit_ints[i]) * (high_max_not_i - logit_bounds[i][0])
model.addConstr(max_logit_var <= rhs)
# Finally add in the adversarial constraint
model.addConstr(logit_vars[true_label] <= max_logit_var)
model.update()
def add_l_inf_obj(model, x_np, x_vars, problem_type):
""" Adds objective to minimize the l_inf distance from the original input x
ARGS:
x_np: numpy vector for the original fixed point we compute robustness for
x_vars : list of variables representing input to the MIP
"""
if problem_type == 'decision_problem':
model.setObjective(0, gb.GRB.MINIMIZE)
elif problem_type == 'min_dist':
# min t such that |x_var-x_np|_i <= t
t_var = model.addVar(lb=0, ub=gb.GRB.INFINITY, name='t')
for coord, val in enumerate(x_np):
model.addConstr(t_var >= x_vars[coord] - val)
model.addConstr(t_var >= val - x_vars[coord])
model.setObjective(t_var, gb.GRB.MINIMIZE)
model.update()
def add_l_2_obj(model, x_np, x_vars, problem_type):
""" Adds the constraint for the l2 norm case """
if problem_type == 'decision_problem':
model.setObjective(0, gb.GRB.MINIMIZE)
elif problem_type == 'min_dist':
t_var = model.addVar(lb=0, ub=gb.GRB.INFINITY, name='t')
l2_norm = gb.quicksum((x_vars[i] - x_np[i]) * (x_vars[i] - x_np[i])
for i in range(len(x_vars)))
model.addConstr(l2_norm <= t_var)
model.setObjective(t_var, gb.GRB.MINIMIZE)
model.update()
###############################################################################
# #
# SILLY UTILITIES #
# #
###############################################################################
def build_var_namer(pfx):
return lambda i: '%s[%s]' % (pfx, i)
```
#### File: geometric-certificates/mister_ed/adversarial_evaluation.py
```python
from __future__ import print_function
import torch
from torch.autograd import Variable
import torch.nn as nn
import utils.pytorch_utils as utils
import utils.image_utils as img_utils
import custom_lpips.custom_dist_model as dm
import os
import config
import glob
import numpy as np
from skimage.measure import compare_ssim as ssim
import adversarial_attacks as aa
import math
import functools
import bundled_attacks as ba
###########################################################################
# #
# EVALUATION RESULT OBJECT #
# #
###########################################################################
class EvaluationResult(object):
""" Stores results of adversarial evaluations, will be used in the
output of AdversarialEvaluation
"""
def __init__(self, attack_params, to_eval=None,
manual_gpu=None):
""" to_eval is a dict of {str : toEval methods}.
"""
self.attack_params = attack_params
self.normalizer = attack_params.adv_attack_obj.normalizer
if manual_gpu is not None:
self.use_gpu = manual_gpu
else:
self.use_gpu = utils.use_gpu()
# First map shorthand strings to methods
shorthand_evals = {'top1': self.top1_accuracy,
'avg_successful_lpips': self.avg_successful_lpips,
'avg_successful_ssim': self.avg_successful_ssim,
'stash_perturbations': self.stash_perturbations,
'avg_loss_value': self.avg_loss_value}
if to_eval is None:
to_eval = {'top1': 'top1'}
to_eval = dict(to_eval.items())
for key, val in list(to_eval.items()):
if val in shorthand_evals:
to_eval[key] = shorthand_evals[val]
else:
assert callable(val)
to_eval[key] = functools.partial(val, self)
self.to_eval = to_eval
self.results = {k: None for k in self.to_eval}
self.params = {k: None for k in self.to_eval}
def set_gpu(self, use_gpu):
self.attack_params.set_gpu(use_gpu)
def eval(self, examples, labels):
attack_out = self.attack_params.attack(examples, labels)
for k, v in self.to_eval.items():
v(k, attack_out)
def _get_successful_attacks(self, attack_out):
''' Gets the (successful, corresponding-original) attacks '''
perturbation = attack_out[4]
pre_adv_labels = attack_out[1]
classifier_net = self.attack_params.adv_attack_obj.classifier_net
normalizer = self.attack_params.adv_attack_obj.normalizer
successful = perturbation.collect_successful(classifier_net, normalizer,
success_def='misclassify',
labels=pre_adv_labels)
return successful['adversarials'], successful['originals']
def top1_accuracy(self, eval_label, attack_out):
######################################################################
# First set up evaluation result if doesn't exist: #
######################################################################
if self.results[eval_label] is None:
self.results[eval_label] = utils.AverageMeter()
result = self.results[eval_label]
######################################################################
# Computes the top 1 accuracy and updates the averageMeter #
######################################################################
attack_examples = attack_out[0]
pre_adv_labels = attack_out[1]
num_examples = float(attack_examples.shape[0])
attack_accuracy_int = self.attack_params.eval_attack_only(
attack_examples,
pre_adv_labels, topk=1)
result.update(attack_accuracy_int / num_examples, n=int(num_examples))
self.results[eval_label] = result
def avg_successful_lpips(self, eval_label, attack_out):
######################################################################
# First set up evaluation result if doesn't exist: #
######################################################################
if self.results[eval_label] is None:
self.results[eval_label] = utils.AverageMeter()
self.dist_model = dm.DistModel(net='alex', manual_gpu=self.use_gpu)
result = self.results[eval_label]
if self.params[eval_label] is None:
dist_model = dm.DistModel(net='alex', manual_gpu=self.use_gpu)
self.params[eval_label] = {'dist_model': dist_model}
dist_model = self.params[eval_label]['dist_model']
######################################################################
# Compute which attacks were successful #
######################################################################
successful_pert, successful_orig = self._get_successful_attacks(
attack_out)
if successful_pert is None or successful_pert.numel() == 0:
return
successful_pert = Variable(successful_pert)
successful_orig = Variable(successful_orig)
num_successful = successful_pert.shape[0]
xform = lambda im: im * 2.0 - 1.0
lpips_dist = self.dist_model.forward_var(xform(successful_pert),
xform(successful_orig))
avg_lpips_dist = float(torch.mean(lpips_dist))
result.update(avg_lpips_dist, n=num_successful)
def avg_successful_ssim(self, eval_label, attack_out):
# We actually compute (1-ssim) to match better with notion of a 'metric'
######################################################################
# First set up evaluation result if doesn't exist: #
######################################################################
if self.results[eval_label] is None:
self.results[eval_label] = utils.AverageMeter()
result = self.results[eval_label]
######################################################################
# Compute which attacks were successful #
######################################################################
successful_pert, successful_orig = self._get_successful_attacks(
attack_out)
if successful_pert is None or successful_pert.numel() == 0:
return
successful_pert = Variable(successful_pert)
successful_orig = Variable(successful_orig)
count = 0
runsum = 0
for og, adv in zip(successful_orig, successful_pert):
count += 1
runsum += ssim(og.transpose(0, 2).cpu().numpy(),
adv.transpose(0, 2).cpu().numpy(), multichannel=True)
avg_minus_ssim = 1 - (runsum / float(count))
result.update(avg_minus_ssim, n=count)
def avg_loss_value(self, eval_label, attack_out):
""" Computes and keeps track of the average attack loss
"""
######################################################################
# First set up evaluation result if it doesn't exist #
######################################################################
if self.results[eval_label] is None:
self.results[eval_label] = utils.AverageMeter()
result = self.results[eval_label]
######################################################################
# Next collect the loss function and compute loss #
######################################################################
attack_obj = self.attack_params.adv_attack_obj
# Structure of loss objects varies based on which attack class used
if isinstance(attack_obj, (aa.FGSM, aa.PGD)):
attack_loss = attack_obj.loss_fxn
elif isinstance(attack_obj, aa.CarliniWagner):
attack_loss = attack_obj._construct_loss_fxn(1.0, 0.0)
elif isinstance(attack_obj, ba.AttackBundle):
if attack_obj.goal == 'max_loss':
attack_loss = attack_obj.goal_params
else:
single_param = next(iter(attack_obj.bundled_attacks.values()))
attack_loss = single_param.adv_attack_obj.loss_fxn
attack_loss.setup_attack_batch(attack_out[0])
loss_val = attack_loss.forward(attack_out[0], attack_out[1],
perturbation=attack_out[4])
loss_val_sum = float(torch.sum(loss_val))
count = attack_out[0].shape[0]
result.update(loss_val_sum, n=count)
def stash_perturbations(self, eval_label, attack_out):
""" This will store the perturbations.
(TODO: make these tensors and store on CPU)
"""
######################################################################
# First set up evaluation result if it doesn't exist #
######################################################################
if self.results[eval_label] is None:
self.results[eval_label] = []
result = self.results[eval_label]
perturbation_obj = attack_out[4]
result.append(perturbation_obj)
def switch_model(self, new_classifier, new_normalizer=None):
new_params = self.attack_params(new_classifier,
new_normalizer=new_normalizer)
return self.__class__(new_params, to_eval=self.to_eval,
manual_gpu=self.use_gpu)
class IdentityEvaluation(EvaluationResult):
""" Subclass of evaluation result that just computes top1 accuracy for the
ground truths (attack perturbation is the identity)
Constructor) ARGS:
classifier_net : nn.module - standard argument, is just the nn to eval
normalizer : DifferentiableNormalize - standard normalizing argument
manual_gpu : boolean - if not None, is the boolean we specify to use
the gpu or not
loss_fxn : None or nn.module - if not None is a loss function that takes
in arguments of the shape (NxC), (N), where the second arg has
integer values in 0 <= val <= C - 1. See nn.CrossEntropyLoss()
as an example signature
"""
def __init__(self, classifier_net, normalizer, manual_gpu=None,
loss_fxn=None):
self.classifier_net = classifier_net
self.normalizer = normalizer
if manual_gpu is not None:
self.use_gpu = manual_gpu
else:
self.use_gpu = utils.use_gpu()
self.loss_fxn = loss_fxn or nn.CrossEntropyLoss()
self.results = {'top1': utils.AverageMeter(),
'avg_loss_value': utils.AverageMeter()}
def set_gpu(self, use_gpu):
pass
def eval(self, examples, labels):
assert sorted(list(self.results.keys())) == ['avg_loss_value', 'top1']
ground_output = self.classifier_net(self.normalizer(Variable(examples)))
minibatch = float(examples.shape[0])
# Compute accuracy
ground_avg = self.results['top1']
minibatch_accuracy_int = utils.accuracy_int(ground_output,
Variable(labels), topk=1)
ground_avg.update(minibatch_accuracy_int / minibatch,
n=int(minibatch))
# Compute loss
ground_avg_loss = self.results['avg_loss_value']
minibatch_loss = float(self.loss_fxn(ground_output, labels))
ground_avg_loss.update(minibatch_loss, n=(int(minibatch)))
def switch_model(self, new_classifier, new_normalizer=None):
if new_normalizer is None:
new_normalizer = self.normalizer
return self.__class__(new_classifier, new_normalizer,
manual_gpu=self.use_gpu,
loss_fxn=self.loss_fxn)
############################################################################
# #
# EVALUATION OBJECT #
# #
############################################################################
class AdversarialEvaluation(object):
""" Wrapper for evaluation of NN's against adversarial examples
"""
def __init__(self, classifier_net, normalizer, manual_gpu=None):
self.classifier_net = classifier_net
self.normalizer = normalizer
if manual_gpu is not None:
self.use_gpu = manual_gpu
else:
self.use_gpu = utils.use_gpu()
def switch_model(self, new_classifier, new_normalizer=None):
if new_normalizer is None:
new_normalizer = self.normalizer
return self.__class__(new_classifier, new_normalizer,
manual_gpu=self.use_gpu)
def evaluate_ensemble(self, data_loader, attack_ensemble,
skip_ground=False, verbose=True,
num_minibatches=None, filter_successful=False,
callback=None):
""" Runs evaluation against attacks generated by attack ensemble over
the entire training set
ARGS:
data_loader : torch.utils.data.DataLoader - object that loads the
evaluation data
attack_ensemble : dict {string -> EvaluationResult}
is a dict of attacks that we want to make.
None of the strings can be 'ground'
skip_ground : bool - if True we don't evaluate the no-attack case
verbose : bool - if True, we print things
num_minibatches: int - if not None, we only validate on a fixed
number of minibatches
filter_successful: boolean - if True we only evaluate on examples
that the classifier gets correct to start with
callback : if not None is a function that takes the attack_ensemble
and minibatch number as an argument and runs after each
minibatch.
RETURNS:
a dict same keys as attack_ensemble, as well as the key 'ground'.
The values are utils.AverageMeter objects
"""
######################################################################
# Setup input validations #
######################################################################
self.classifier_net.eval()
assert isinstance(data_loader, torch.utils.data.DataLoader)
if attack_ensemble is None:
attack_ensemble = {}
if not skip_ground:
assert 'ground' not in attack_ensemble
# Build ground result
ground_result = IdentityEvaluation(self.classifier_net,
self.normalizer,
manual_gpu=self.use_gpu)
attack_ensemble['ground'] = ground_result
# Do GPU checks
utils.cuda_assert(self.use_gpu)
if self.use_gpu:
self.classifier_net.cuda()
for eval_result in attack_ensemble.values():
eval_result.set_gpu(self.use_gpu)
######################################################################
# Loop through validation set and attack efficacy #
######################################################################
for i, data in enumerate(data_loader, 0):
if num_minibatches is not None and i >= num_minibatches:
break
if verbose:
print("Starting minibatch %s..." % i)
inputs, labels = utils.cudafy(self.use_gpu, data)
if filter_successful:
inputs, labels = utils.filter_examples(self.classifier_net,
inputs, labels,
self.normalizer)
for k, result in attack_ensemble.items():
if verbose:
print("\t (mb: %s) evaluating %s..." % (i, k))
result.eval(inputs, labels)
if callback is not None:
callback(attack_ensemble, i)
return attack_ensemble
def full_attack(self, data_loader, attack_parameters,
output_filename, num_minibatches=None,
continue_attack=True, checkpoint_minibatch=10,
verbose=True, save_xform=img_utils.nhwc255_xform):
""" Builds an attack on the data and outputs the resulting attacked
images into a .numpy file
ARGS:
data_loader : torch.utils.data.DataLoader - object that loads the
evaluation data.
NOTE: for Madry challenge this shouldn't be shuffled
attack_parameters : AdversarialAttackParameters object - wrapper to
contain the attack
output_filename : string - name of the file we want to output.
should just be the base name (extension is .npy)
num_minibatches : int - if not None, we only build attacks for this
many minibatches of data
continue_attack : bool - if True, we do the following :
1) check if output_filename exists. If it doesn't
exist, proceed to make full attack as usual.
2) if output_filename exists, figure out how many
minibatches it went through and skip to the
next minibatch in the data loader
This is kinda like a checkpointing system for attacks
checkpoint_minibatch: int - how many minibatches until we checkpoint
verbose: bool - if True, we print out which minibatch we're in out
of total number of minibatches
save_xform: fxn, np.ndarray -> np.ndarray - function that
transforms our adv_example.data.numpy() to the form that
we want to store it in in the .npy output file
RETURNS:
numpy array of attacked examples
"""
raise NotImplementedError("BROKEN!!!")
######################################################################
# Setup and assert things #
######################################################################
self.classifier_net.eval()
# Check if loader is shuffled. print warning if random
assert isinstance(data_loader, torch.utils.data.DataLoader)
if isinstance(data_loader.batch_sampler.sampler,
torch.utils.data.sampler.RandomSampler):
print("WARNING: data loader is shuffled!")
total_num_minibatches = int(math.ceil(len(data_loader.dataset) /
data_loader.batch_size))
minibatch_digits = len(str(total_num_minibatches))
# Do cuda stuff
utils.cuda_assert(self.use_gpu)
attack_parameters.set_gpu(self.use_gpu)
if self.use_gpu:
self.classifier_net.cuda()
# Check attack is attacking everything
assert attack_parameters.proportion_attacked == 1.0
# handle output_file + continue_attack stuff
assert os.path.basename(output_filename) == output_filename, \
"Provided output_filename was %s, should have been %s" % \
(output_filename, os.path.basename(output_filename))
output_file = os.path.join(config.OUTPUT_IMAGE_PATH,
output_filename + '.npy')
minibatch_attacks = [] # list of 4d numpy arrays
num_prev_minibatches = 0
if continue_attack and len(glob.glob(output_file)) != 0:
# load file and see how many minibatches we went through
saved_data = np.load(output_file)
saved_num_examples = saved_data.shape[0]
loader_batch_size = data_loader.batch_size
if saved_num_examples % loader_batch_size != 0:
print("WARNING: incomplete minibatch in previously saved attack")
minibatch_attacks.append(saved_data)
num_prev_minibatches = saved_num_examples / loader_batch_size
if verbose:
def printer(num):
print("Minibatch %%0%dd/%s" % (minibatch_digits,
total_num_minibatches) % num)
else:
printer = lambda num: None
######################################################################
# Start attacking and saving #
######################################################################
for minibatch_num, data in enumerate(data_loader):
# Handle skippy cases
if minibatch_num < num_prev_minibatches: # CAREFUL ABOUT OBOEs HERE
continue
if num_minibatches is not None and minibatch_num >= num_minibatches:
break
printer(minibatch_num)
# Load data and build minibatch of attacked images
inputs, labels = utils.cudafy(self.use_gpu, data)
adv_examples = attack_parameters.attack(inputs, labels)[0]
# Convert to numpy and append to our save buffer
adv_data = adv_examples.cpu().numpy()
minibatch_attacks.append(save_xform(adv_data))
# Perform checkpoint if necessary
if minibatch_num > 0 and minibatch_num % checkpoint_minibatch == 0:
minibatch_attacks = utils.checkpoint_incremental_array(
output_file, minibatch_attacks,
return_concat=True)
return utils.checkpoint_incremental_array(output_file,
minibatch_attacks,
return_concat=True)[0]
```
#### File: mister_ed/custom_lpips/custom_dist_model.py
```python
import torch
import torch.nn as nn
import torch.nn.init as init
import utils.pytorch_utils as utils
from collections import namedtuple
from torchvision import models
from custom_lpips.base_model import BaseModel
import os
###############################################################################
# #
# NN Architecture #
# #
###############################################################################
class alexnet(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True):
super(alexnet, self).__init__()
alexnet_pretrained_features = models.alexnet(pretrained=pretrained).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
self.N_slices = 5
for x in range(2):
self.slice1.add_module(str(x), alexnet_pretrained_features[x])
for x in range(2, 5):
self.slice2.add_module(str(x), alexnet_pretrained_features[x])
for x in range(5, 8):
self.slice3.add_module(str(x), alexnet_pretrained_features[x])
for x in range(8, 10):
self.slice4.add_module(str(x), alexnet_pretrained_features[x])
for x in range(10, 12):
self.slice5.add_module(str(x), alexnet_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1 = h
h = self.slice2(h)
h_relu2 = h
h = self.slice3(h)
h_relu3 = h
h = self.slice4(h)
h_relu4 = h
h = self.slice5(h)
h_relu5 = h
alexnet_outputs = namedtuple("AlexnetOutputs",
['relu1', 'relu2', 'relu3', 'relu4', 'relu5'])
out = alexnet_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5)
return out
##############################################################################
# #
# NN Functional Code #
# #
##############################################################################
# Learned perceptual metric
class PNetLin(nn.Module):
def __init__(self, pnet_tune=False, use_dropout=False,
manual_gpu=None):
# HACKETY HACK -- MJ modified this file
super(PNetLin, self).__init__()
net_type = alexnet # ADD FREEDOM HERE LATER
self.pnet_tune = pnet_tune
self.chns = [64,192,384,256,256]
if self.pnet_tune:
self.net = net_type(requires_grad=self.pnet_tune)
else:
self.net = [net_type(requires_grad=self.pnet_tune),]
# define the layers
self.lin0 = NetLinLayer(self.chns[0],use_dropout=use_dropout)
self.lin1 = NetLinLayer(self.chns[1],use_dropout=use_dropout)
self.lin2 = NetLinLayer(self.chns[2],use_dropout=use_dropout)
self.lin3 = NetLinLayer(self.chns[3],use_dropout=use_dropout)
self.lin4 = NetLinLayer(self.chns[4],use_dropout=use_dropout)
self.lins = [self.lin0,self.lin1,self.lin2,self.lin3,self.lin4]
# define transfrom to make mean 0, unit var
self.shift = torch.autograd.Variable(torch.Tensor([-.030, -.088, -.188]).view(1,3,1,1))
self.scale = torch.autograd.Variable(torch.Tensor([.458, .448, .450]).view(1,3,1,1))
# cuda all the things
if manual_gpu is not None:
self.use_gpu = manual_gpu
else:
self.use_gpu = utils.use_gpu()
if self.use_gpu:
if self.pnet_tune:
self.net.cuda()
else:
self.net[0].cuda()
self.shift = self.shift.cuda()
self.scale = self.scale.cuda()
self.lin0.cuda()
self.lin1.cuda()
self.lin2.cuda()
self.lin3.cuda()
self.lin4.cuda()
def forward(self, in0, in1):
# normalize
in0_sc = (in0 - self.shift.expand_as(in0))/self.scale.expand_as(in0)
in1_sc = (in1 - self.shift.expand_as(in0))/self.scale.expand_as(in0)
if self.pnet_tune:
outs0 = self.net.forward(in0_sc)
outs1 = self.net.forward(in1_sc)
else:
outs0 = self.net[0].forward(in0_sc)
outs1 = self.net[0].forward(in1_sc)
diffs = []
for kk in range(len(outs0)):
normed_0 = normalize_tensor(outs0[kk])
normed_1 = normalize_tensor(outs1[kk])
diffs.append((normed_0 - normed_1) ** 2)
val = 0
for i in range(len(self.lins)):
val = val + torch.mean(
torch.mean(self.lins[i].model(diffs[i]), dim=3),
dim=2)
return val.view(val.size()[0],val.size()[1],1,1)
class NetLinLayer(nn.Module):
''' A single linear layer which does a 1x1 conv '''
def __init__(self, chn_in, chn_out=1, use_dropout=False):
super(NetLinLayer, self).__init__()
layers = [nn.Dropout(),] if(use_dropout) else []
layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False),]
self.model = nn.Sequential(*layers)
def normalize_tensor(in_feat,eps=1e-10):
root_sum_square = torch.sqrt(torch.sum(in_feat ** 2, dim=1))
og_size = in_feat.size()
norm_factor = root_sum_square.view(og_size[0], 1,
og_size[2], og_size[3]) + eps
return in_feat / norm_factor
###############################################################################
# #
# Distance model #
# #
###############################################################################
class DistModel(BaseModel):
def __init__(self, net='squeeze', manual_gpu=None):
super(DistModel, self).__init__(manual_gpu=manual_gpu)
if self.use_gpu:
self.map_location = None
else:
self.map_location = lambda storage, loc: storage
self.net = PNetLin(manual_gpu=manual_gpu, pnet_tune=False,
use_dropout=True)
weight_path = os.path.join(os.path.dirname(__file__), 'weights',
'%s.pth' % net)
self.net.load_state_dict(torch.load(weight_path,
map_location=self.map_location))
self.parameters = list(self.net.parameters())
self.net.eval()
def forward_var(self, input_0, input_1):
# input_0 and input_1 are both NxCxHxW VARIABLES!
return self.net.forward(input_0, input_1)
def zero_grad(self):
self.net.zero_grad()
```
#### File: mister_ed/utils/checkpoints.py
```python
import torch
import math
import os
import re
import glob
import config
import numpy as np
import utils.pytorch_utils as utils
import random
CHECKPOINT_DIR = config.MODEL_PATH
OUTPUT_IMAGE_DIR = config.OUTPUT_IMAGE_PATH
##############################################################################
# #
# CHECKPOINTING MODELS #
# #
##############################################################################
def clear_experiment(experiment_name, architecture):
""" Deletes all saved state dicts for an experiment/architecture pair """
for filename in params_to_filename(experiment_name, architecture):
full_path = os.path.join(*[CHECKPOINT_DIR, filename])
os.remove(full_path) if os.path.exists(full_path) else None
def list_saved_epochs(experiment_name, architecture):
""" Returns a list of int epochs we've checkpointed for this
experiment name and architecture
"""
safe_int_cast = lambda s: int(s) if s.isdigit() else s
extract_epoch = lambda f: safe_int_cast(f.split('.')[-2])
filename_list = params_to_filename(experiment_name, architecture)
return [extract_epoch(f) for f in filename_list]
def params_to_filename(experiment_name, architecture, epoch_val=None):
""" Outputs string name of file.
ARGS:
experiment_name : string - name of experiment we're saving
architecture : string - abbreviation for model architecture
epoch_val : int/(intLo, intHi)/None -
- if int we return this int exactly
- if (intLo, intHi) we return all existing filenames with
highest epoch in range (intLo, intHi), in sorted order
- if None, we return all existing filenames with params
in ascending epoch-sorted order
RETURNS:
filenames: string or (possibly empty) string[] of just the base name
of saved models
"""
if isinstance(epoch_val, int):
return '.'.join([experiment_name, architecture, '%06d' % epoch_val,
'path'])
elif epoch_val == 'best':
return '.'.join([experiment_name, architecture, epoch_val,
'path'])
glob_prefix = os.path.join(*[CHECKPOINT_DIR,
'%s.%s.*' % (experiment_name, architecture)])
re_prefix = '%s\.%s\.' % (experiment_name, architecture)
re_suffix = r'\.path'
valid_name = lambda f: bool(re.match(re_prefix + r'(\d{6}|best)' +
re_suffix, f))
safe_int_cast = lambda s: int(s) if s.isdigit() else s
select_epoch = lambda f: safe_int_cast(re.sub(re_prefix, '',
re.sub(re_suffix, '', f)))
valid_epoch = lambda e: ((e == 'best') or
(e >= (epoch_val or (0, 0))[0] and
e <= (epoch_val or (0, float('inf')))[1]))
filename_epoch_pairs = []
best_filename = []
for full_path in glob.glob(glob_prefix):
filename = os.path.basename(full_path)
if not valid_name(filename):
continue
epoch = select_epoch(filename)
if valid_epoch(epoch):
if epoch != 'best':
filename_epoch_pairs.append((filename, epoch))
else:
best_filename.append(filename)
return best_filename +\
[_[0] for _ in sorted(filename_epoch_pairs, key=lambda el: el[1])]
def save_state_dict(experiment_name, architecture, epoch_val, model,
k_highest=10):
""" Saves the state dict of a model with the given parameters.
ARGS:
experiment_name : string - name of experiment we're saving
architecture : string - abbreviation for model architecture
epoch_val : int - which epoch we're saving
model : model - object we're saving the state dict of
k_higest : int - if not None, we make sure to not include more than
k state_dicts for (experiment_name, architecture) pair,
keeping the k-most recent if we overflow
RETURNS:
The model we saved
"""
# First resolve THIS filename
this_filename = params_to_filename(experiment_name, architecture, epoch_val)
# Next clear up memory if too many state dicts
current_filenames = [_ for _ in
params_to_filename(experiment_name, architecture)
if not _.endswith('.best.path')]
delete_els = []
if k_highest is not None:
num_to_delete = len(current_filenames) - k_highest + 1
if num_to_delete > 0:
delete_els = sorted(current_filenames)[:num_to_delete]
for delete_el in delete_els:
full_path = os.path.join(*[CHECKPOINT_DIR, delete_el])
os.remove(full_path) if os.path.exists(full_path) else None
# Finally save the state dict
torch.save(model.state_dict(), os.path.join(*[CHECKPOINT_DIR,
this_filename]))
return model
def load_state_dict_from_filename(filename, model):
""" Skips the whole parameter argument thing and just loads the whole
state dict from a filename.
ARGS:
filename : string - filename without directories
model : nn.Module - has 'load_state_dict' method
RETURNS:
the model loaded with the weights contained in the file
"""
assert len(glob.glob(os.path.join(*[CHECKPOINT_DIR, filename]))) == 1
# LOAD FILENAME
# If state_dict in keys, use that as the loader
right_dict = lambda d: d.get('state_dict', d)
model.load_state_dict(right_dict(torch.load(
os.path.join(*[CHECKPOINT_DIR, filename]))))
return model
def load_state_dict(experiment_name, architecture, epoch, model):
""" Loads a checkpoint that was previously saved
experiment_name : string - name of experiment we're saving
architecture : string - abbreviation for model architecture
epoch_val : int - which epoch we're loading
"""
filename = params_to_filename(experiment_name, architecture, epoch)
return load_state_dict_from_filename(filename, model)
###############################################################################
# #
# CHECKPOINTING DATA #
# #
###############################################################################
"""
This is a hacky fix to save batches of adversarial images along with their
labels.
"""
class CustomDataSaver(object):
# TODO: make this more pytorch compliant
def __init__(self, image_subdirectory):
self.image_subdirectory = image_subdirectory
# make this folder if it doesn't exist yet
def save_minibatch(self, examples, labels):
""" Assigns a random name to this minibatch and saves the examples and
labels in two separate files:
<random_name>.examples.npy and <random_name>.labels.npy
ARGS:
examples: Variable or Tensor (NxCxHxW) - examples to be saved
labels : Variable or Tensor (N) - labels matching the examples
"""
# First make both examples and labels into numpy arrays
examples = examples.cpu().numpy()
labels = labels.cpu().numpy()
# Make a name for the files
random_string = str(random.random())[2:] # DO THIS BETTER WHEN I HAVE INTERNET
# Save both files
example_file = '%s.examples.npy' % random_string
example_path = os.path.join(OUTPUT_IMAGE_DIR, self.image_subdirectory,
example_file)
np.save(example_path, examples)
label_file = '%s.labels.npy' % random_string
label_path = os.path.join(OUTPUT_IMAGE_DIR, self.image_subdirectory,
label_file)
np.save(label_path, labels)
class CustomDataLoader(object):
# TODO: make this more pytorch compliant
def __init__(self, image_subdirectory, batch_size=128, to_tensor=True,
use_gpu=False):
super(CustomDataLoader, self).__init__()
self.image_subdirectory = image_subdirectory
self.batch_size = batch_size
assert to_tensor >= use_gpu
self.to_tensor = to_tensor
self.use_gpu = use_gpu
def _prepare_data(self, examples, labels):
""" Takes in numpy examples and labels and tensor-ifies and cuda's them
if necessary
"""
if self.to_tensor:
examples = torch.Tensor(examples)
labels = torch.Tensor(labels)
return utils.cudafy(self.use_gpu, (examples, labels))
def _base_loader(self, prefix, which):
assert which in ['examples', 'labels']
filename = '%s.%s.npy' % (prefix, which)
full_path = os.path.join(OUTPUT_IMAGE_DIR, self.image_subdirectory,
filename)
return np.load(full_path)
def _example_loader(self, prefix):
""" Loads the numpy array of examples given the random 'prefix' """
return self._base_loader(prefix, 'examples')
def _label_loader(self, prefix):
""" Loads the numpy array of labels given the random 'prefix' """
return self._base_loader(prefix, 'labels')
def __iter__(self):
# First collect all the filenames:
glob_prefix = os.path.join(OUTPUT_IMAGE_DIR, self.image_subdirectory,
'*')
files = glob.glob(glob_prefix)
valid_random_names = set(os.path.basename(_).split('.')[0]
for _ in files)
# Now loop through filenames and yield out minibatches of correct size
running_examples, running_labels = [], []
running_size = 0
for random_name in valid_random_names:
# Load data from files and append to 'running' lists
loaded_examples = self._example_loader(random_name)
loaded_labels = self._label_loader(random_name)
running_examples.append(loaded_examples)
running_labels.append(loaded_labels)
running_size += loaded_examples.shape[0]
if running_size < self.batch_size:
# Load enough data to populate one minibatch, which might
# take multiple files
continue
# Concatenate all images together
merged_examples = np.concatenate(running_examples, axis=0)
merged_labels = np.concatenate(running_labels, axis=0)
# Make minibatches out of concatenated things,
for batch_no in range(running_size // self.batch_size):
index_lo = batch_no * self.batch_size
index_hi = index_lo + self.batch_size
example_batch = merged_examples[index_lo:index_hi]
label_batch = merged_labels[index_lo:index_hi]
yield self._prepare_data(example_batch, label_batch)
# Handle any remainder for remaining files
remainder_idx = (running_size // self.batch_size) * self.batch_size
running_examples = [merged_examples[remainder_idx:]]
running_labels = [merged_labels[remainder_idx:]]
running_size = running_size - remainder_idx
# If we're out of files, yield this last sub-minibatch of data
if running_size > 0:
merged_examples = np.concatenate(running_examples, axis=0)
merged_labels = np.concatenate(running_labels, axis=0)
yield self._prepare_data(merged_examples, merged_labels)
``` |
{
"source": "jonasnoll/s2t-sim",
"score": 2
} |
#### File: s2t-sim/modeling/autoencoder.py
```python
from modeling.models import BaseAutoencoder
from utils.log import Log
from utils.utils import create_directory, timer
import torch
import torch.nn as nn
from torchvision.utils import save_image
import numpy as np
import os
import time
class Autoencoder():
def __init__(self, visible_gpus, log_id, img_channels=1):
##### Logging #####
self.log = Log(log_id).log if log_id else None
os.environ["CUDA_VISIBLE_DEVICES"] = visible_gpus # '1,2,3'
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
assert img_channels in [1, 3], "Caution: img_channels should be either int 1 or 3"
self.img_channels = img_channels
# Model
self.model = None
self.metrics = {}
self.metrics['train_loss'] = []
self.outputs = []
# Hyper Parameters
self.num_epochs = 8 if img_channels == 1 else 120 # Train longer for more complex DomainNet data
self.batch_size = 64
self.learning_rate = 1e-3
self.shuffle_train_loader = True
self.datalader_workers = 1
if self.log:
self.log.info(
f"AE Hyperparameter: num_epochs={self.num_epochs}, batch_size={self.batch_size}, learning_rate={self.learning_rate}, shuffle_train_loader={self.shuffle_train_loader}, datalader_workers={self.datalader_workers}, (img_channels={self.img_channels})")
def train_autoencoder(self, dataset):
"""Run the autoencoder training"""
data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=self.batch_size, shuffle=self.shuffle_train_loader)
# Init base autoencoder
self.model = BaseAutoencoder(input_channels=self.img_channels) # 3
self.model.to(self.device)
if self.log:
self.log.info("# Start AE Training #")
tr_start = time.time()
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate, weight_decay=1e-5)
# Training Loop
for epoch in range(self.num_epochs):
for i, (img, _) in enumerate(data_loader):
img = img.to(self.device)
# Forward pass
recon = self.model(img)
loss = criterion(recon, img)
# Backward pass and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if self.log:
self.log.info(f'Epoch: {epoch+1}/{self.num_epochs}, Loss:{loss.item():.4f}')
self.metrics['train_loss'].append(loss.item())
self.outputs.append((epoch, img.cpu(), recon.cpu()))
tr_end = time.time()
if self.log:
self.log.info(f"# AE Training Successful - took {timer(tr_start, tr_end)}#")
def get_recon_losses(self, dataset, save_imgs=True):
"""Run reconstruction on dataset (source) and return the loss scores for all samples"""
if self.log:
self.log.info(f"Predicting Recon Losses ({len(dataset)})...")
# Use batch size 1 so loss is calculated for individual images
data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=1, shuffle=False)
self.model.eval()
criterion = nn.MSELoss()
losses = []
imgs = []
recons = []
with torch.set_grad_enabled(False):
for i, (img, _) in enumerate(data_loader):
img = img.to(self.device)
recon = self.model(img)
loss = criterion(recon, img)
losses.append(loss.item())
# Save 32 imaegs for analysis
if i < 32:
imgs.append(img)
recons.append(recon)
if save_imgs:
path = './results/img'
create_directory(path)
save_image(torch.cat(imgs), f'{path}/32_autoencder_original.png')
save_image(torch.cat(recons), f'{path}/32_autoencder_recon.png')
return np.array(losses)
```
#### File: s2t-sim/sampler/feature_sampler.py
```python
from utils.utils import change_ds_transform, get_label_dist, get_random_idx, get_unique_counts, timer
from sampler.ranking import get_items_idx_of_min_segment, get_norm_subset_idx
from utils.log import Log
import torch
from torch.utils.data import Dataset, Subset
from modeling.feature_extractor import FeatureExtractor
import math
import time
from scipy import spatial
import copy
import numpy as np
import torchvision.transforms as transforms
class FeatureDistSampler():
"""Feature Sampler class to steer the application of learned feature space distance sampling"""
def __init__(self, src_dataset, trgt_dataset, dist='cos', visible_gpus='0', log_id=None):
##### Logging #####
self.log = Log(log_id).log if log_id else None
if self.log:
self.log.info("Initializing Dist Sampler...")
self.log_id = log_id
self.visible_gpus = visible_gpus
assert isinstance(src_dataset, Dataset) and isinstance(trgt_dataset, Dataset), "Datasets are not type torch.utils.data.Dataset"
self.src_ds = src_dataset
self.trgt_ds = trgt_dataset
self.dist_measure = dist # from 'cos' or 'euclid'
self.src_imgs_dists = self.get_src_feat_dists(self.src_ds, self.trgt_ds) # List of Lists
self.src_imgs_dist_avgs = self.get_dist_avgs(self.src_imgs_dists)
self.src_imgs_dist_mins = self.get_dist_minima(self.src_imgs_dists)
self.src_imgs_dist_kmin_avgs = self.get_dist_kmin_avgs(self.src_imgs_dists)
if self.log:
self.log.info("Finished initiating Dist Sampler.")
def get_features(self, dataset):
"""Extract features from images with feature extractor."""
# Temporarly change Transformation
ds_copy = copy.deepcopy(dataset)
resnet_transform = transforms.Compose([
transforms.Resize([224, 224]),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
ds_copy = change_ds_transform(ds_copy, resnet_transform)
fe = FeatureExtractor(self.visible_gpus, self.log_id)
ftrs = fe.get_features(ds_copy)
return ftrs
def get_src_feat_dists(self, src_ds, trgt_ds):
"""Return disttances beween image features.
Returns two-dimensional array containing n-sized lists
of similarity measures for each source images - here
the distance between extracted features.
Args:
src_ds (torch.utils.data.Dataset): Source dataset
src_ds (torch.utils.data.Dataset): Target dataset
Returns:
numpy.ndarray: list of lists with similarity measures for each image
"""
# Take random n images from target for comparison
n = 500
r_idx = get_random_idx(self.trgt_ds, n) # Sample idx
trgt_compare_ds = Subset(trgt_ds, r_idx) # Create Subset Dataset
trgt_label_dist = get_label_dist(trgt_compare_ds) # Get Label Distribution
if self.log:
self.log.info(f"FTR: Comparing Source (={len(src_ds)}) to Target (={len(trgt_compare_ds)}): {trgt_label_dist}")
# Get Features
src_ftrs = self.get_features(src_ds)
trgt_ftrs = self.get_features(trgt_compare_ds) # len of n
src_img_dists = []
tstart = time.time()
# For each image, get list of ssim to all trgt_imgs
for i, s_ftr_vec in enumerate(src_ftrs):
curr_dists = []
for t_ftr_vec in trgt_ftrs:
if self.dist_measure == 'euclid':
# Eucledean Distance
dist = np.linalg.norm(s_ftr_vec - t_ftr_vec)
curr_dists.append(dist)
if self.dist_measure == 'cos':
# Cosine Distance
dist = spatial.distance.cosine(s_ftr_vec, t_ftr_vec)
curr_dists.append(dist)
src_img_dists.append(curr_dists)
if i == 0 or (i+1) % int(math.ceil(len(src_ftrs)/10)) == 0 or (i+1) % len(src_ftrs) == 0:
tend = time.time()
if self.log:
self.log.info(f"Computed Feature Dist for {i+1} / {len(src_ftrs)} - took {timer(tstart, tend)}")
return np.array(src_img_dists)
def get_dist_avgs(self, imgs_dists):
"""Take average dist for each image"""
dist_avg_list = np.array([sum(dists_list)/len(dists_list) for dists_list in imgs_dists])
return dist_avg_list
def get_dist_minima(self, imgs_dists):
"""Take minima dist for each image"""
dist_min_list = np.array([min(dists_list) for dists_list in imgs_dists])
return dist_min_list
def get_dist_kmin_avgs(self, imgs_dists, k=5):
"""Take avg min dist for 5 dists for each image"""
imgs_dists_sorted = [sorted(dists_list, reverse=False) for dists_list in imgs_dists]
dist_k_min_avgs = np.array([sum(dists_list[:k])/k for dists_list in imgs_dists_sorted])
return dist_k_min_avgs
def get_min_dist_subset(self, dataset, subset_size, mode_id=None, class_equal=False, ds_origins=None):
"""Get subset according to similarity - here distance measure feature distance and mode"""
assert mode_id in ['dist_avg', 'dist_min', 'dist_k_min'], "Invalid mode_id, options are 'dist_avg', 'dist_min', 'dist_k_min'"
if mode_id == 'dist_avg':
distance_measure_list = self.src_imgs_dist_avgs
elif mode_id == 'dist_min':
distance_measure_list = self.src_imgs_dist_mins
elif mode_id == 'dist_k_min':
distance_measure_list = self.src_imgs_dist_kmin_avgs
assert (len(dataset) == len(distance_measure_list)), "Dataset and Feature Distance Array don't have the same length."
if class_equal:
# Get idx_min_segment sampling all classes as equally as possible: Min distane, means high similarity
idx_min_segment = get_norm_subset_idx(dataset, subset_size, distance_measure_list, segment='min')
else:
# Get idx_min_segment according to overall ranking
idx_min_segment = get_items_idx_of_min_segment(subset_size, distance_measure_list)
# Origin Dataset Distribution
origin_dist = get_unique_counts(ds_origins[idx_min_segment])
if self.log:
self.log.info(f"Orgin Dataset Distribution: {origin_dist}")
# Ceate Subset
subset_ds = Subset(dataset, idx_min_segment)
return subset_ds
```
#### File: s2t-sim/utils/datasets.py
```python
import os
import numpy as np
import scipy.io
import h5py
from PIL import Image
from PIL import ImageFile
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset, Subset
# Adapted DomainNet for reasonable class sizes >= 200, left:
domain_net_targets = ['sea_turtle', 'swan', 'zebra', 'submarine', 'saxophone', 'bird',
'squirrel', 'teapot', 'tiger', 'flower', 'streetlight', 'whale', 'feather']
ImageFile.LOAD_TRUNCATED_IMAGES = True
# Digit 5
class MNISTDataset(Dataset):
def __init__(self, split, transform=None, path='./data/digit-5'):
# Load Data here
assert split == 'train' or split == 'test', "Split should be string 'train' or 'test'"
split = True if split == 'train' else False
self.dataset = torchvision.datasets.MNIST(root=path,
train=split,
transform=transforms.Grayscale(
num_output_channels=3),
download=True)
self.targets = self.dataset.targets
self.classes = self.dataset.classes
self.transform = transform
self.name = 'mnist'
def show_image(self, idx):
img, label = self.dataset[idx]
return img
def __getitem__(self, idx):
# dataset[0]
img, label = self.dataset[idx]
if self.transform:
img = self.transform(img)
return img, label
def __len__(self):
# len(dataset)
return len(self.targets)
class SVHNDataset(Dataset):
def __init__(self, split, transform=None, path='./data/digit-5'):
# Load Data here
assert split == 'train' or split == 'test', "Split should be string 'train' or 'test'"
self.dataset = torchvision.datasets.SVHN(root=path,
split=split,
transform=None,
download=True)
self.targets = self.dataset.labels
self.classes = np.unique(self.dataset.labels)
self.transform = transform
self.name = 'svhn'
def show_image(self, idx):
img, label = self.dataset[idx]
print(self.classes[int(label)])
return img
def __getitem__(self, idx):
# dataset[0]
img, label = self.dataset[idx]
if self.transform:
img = self.transform(img)
return img, label
def __len__(self):
# len(dataset)
return len(self.targets)
class USPSDataset(Dataset):
def __init__(self, split, transform=None, path='./data/digit-5/usps.h5'):
# Load Data
with h5py.File(path, 'r') as hf:
data = hf.get(split)
X = data.get('data')[:] * 255
y = data.get('target')[:]
X = np.reshape(X, (len(X), 16, 16))
X = np.array([np.stack((img.astype(np.uint8),)*3, axis=-1)
for img in X]) # Making it 3 channel
self.X = [Image.fromarray(img, mode="RGB") for img in X]
self.targets = np.array([int(yi) for yi in y])
self.classes = np.unique(self.targets)
self.transform = transform
self.name = 'usps'
def show_image(self, idx):
img, label = self.X[idx], self.targets[idx]
return img
def __getitem__(self, idx):
# dataset[0]
img, label = self.X[idx], self.targets[idx]
if self.transform:
img = self.transform(img)
return img, label
def __len__(self):
# len(dataset)
return len(self.targets)
class SYNDataset(Dataset):
def __init__(self, split, transform=None, path='./data/digit-5/syn_number.mat'):
# Load Data
data = scipy.io.loadmat(path)
X = data.get(f"{split}_data")
y = data.get(f"{split}_label")
self.X = [Image.fromarray(img, mode="RGB") for img in X]
self.targets = np.array([int(label[0]) for label in y])
self.classes = np.unique(self.targets)
self.transform = transform
self.name = 'syn'
def show_image(self, idx):
img, label = self.X[idx], self.targets[idx]
print(self.classes[int(label)])
return img
def __getitem__(self, idx):
# dataset[0]
img, label = self.X[idx], self.targets[idx]
if self.transform:
img = self.transform(img)
return img, label
def __len__(self):
# len(dataset)
return len(self.targets)
class MNISTMDataset(Dataset):
def __init__(self, split, transform=None, path='./data/digit-5/mnistm_with_label.mat'):
# Load Data
data = scipy.io.loadmat(path)
X = data.get(f"{split}")
y = data.get(f"label_{split}")
self.X = [Image.fromarray(img, mode="RGB") for img in X]
self.targets = np.array(
[int(np.where(labelmap == 1)[0][0]) for labelmap in y])
self.classes = np.unique(self.targets)
self.transform = transform
self.name = 'mnistm'
def show_image(self, idx):
img, label = self.X[idx], self.targets[idx]
print(self.classes[int(label)])
return img
def __getitem__(self, idx):
# dataset[0]
img, label = self.X[idx], self.targets[idx]
if self.transform:
img = self.transform(img)
return img, label
def __len__(self):
# len(dataset)
return len(self.targets)
class Digit5Subset(Dataset):
def __init__(self, dataset, indices):
self.dataset = dataset
self.name = dataset.name
self.indices = indices
self.targets = dataset.targets[indices]
def __getitem__(self, idx):
if isinstance(idx, list):
return self.dataset[[self.indices[i] for i in idx]]
return self.dataset[self.indices[idx]]
def __len__(self):
return len(self.indices)
# DomainNet
class ClipartDataset(Dataset):
def __init__(self, transform=None, root='data/domainnet/multi', selection_txt_path='data/domainnet/txt/clipart_imgs.txt'):
# Load Data
with open(selection_txt_path) as f:
lines = f.readlines()
self.img_paths = []
self.label_list = []
self.classes = []
self.label_map = {}
for x in lines:
path, label_idx = x.split(' ')
self.img_paths.append(path)
label_idx = label_idx.strip()
self.label_list.append(int(label_idx))
label_name = path.split('/')[1]
if label_name not in self.classes:
self.classes.append(label_name)
self.label_map[label_idx] = label_name
self.X = [Image.open(os.path.join(root, img_path))
for img_path in self.img_paths]
self.targets = np.array(self.label_list)
self.transform = transform
self.name = 'clipart'
def show_image(self, idx):
img, label = self.X[idx], self.targets[idx]
print(self.label_map.get(str(label)))
return img
def __getitem__(self, idx):
# dataset[0]
img, label = self.X[idx], self.targets[idx]
if self.transform:
img = self.transform(img)
return img, label
def __len__(self):
# len(dataset)
return len(self.targets)
class PaintingDataset(Dataset):
def __init__(self, transform=None, root='data/domainnet/multi', selection_txt_path='data/domainnet/txt/painting_imgs.txt'):
# Load Data
with open(selection_txt_path) as f:
lines = f.readlines()
self.img_paths = []
self.label_list = []
self.classes = []
self.label_map = {}
for x in lines:
path, label_idx = x.split(' ')
self.img_paths.append(path)
label_idx = label_idx.strip()
self.label_list.append(int(label_idx))
label_name = path.split('/')[1]
if label_name not in self.classes:
self.classes.append(label_name)
self.label_map[label_idx] = label_name
self.X = [Image.open(os.path.join(root, img_path))
for img_path in self.img_paths]
self.targets = np.array(self.label_list)
self.transform = transform
self.name = 'painting'
def show_image(self, idx):
img, label = self.X[idx], self.targets[idx]
print(self.label_map.get(str(label)))
return img
def __getitem__(self, idx):
# dataset[0]
img, label = self.X[idx], self.targets[idx]
if self.transform:
img = self.transform(img)
return img, label
def __len__(self):
# len(dataset)
return len(self.targets)
class RealDataset(Dataset):
def __init__(self, transform=None, root='data/domainnet/multi', selection_txt_path='data/domainnet/txt/real_imgs.txt'):
# Load Data
with open(selection_txt_path) as f:
lines = f.readlines()
self.img_paths = []
self.label_list = []
self.classes = []
self.label_map = {}
for x in lines:
path, label_idx = x.split(' ')
self.img_paths.append(path)
label_idx = label_idx.strip()
self.label_list.append(int(label_idx))
label_name = path.split('/')[1]
if label_name not in self.classes:
self.classes.append(label_name)
self.label_map[label_idx] = label_name
self.X = [Image.open(os.path.join(root, img_path))
for img_path in self.img_paths]
self.targets = np.array(self.label_list)
self.transform = transform
self.name = 'real'
def show_image(self, idx):
img, label = self.X[idx], self.targets[idx]
print(self.label_map.get(str(label)))
return img
def __getitem__(self, idx):
# dataset[0]
img, label = self.X[idx], self.targets[idx]
if self.transform:
img = self.transform(img)
return img, label
def __len__(self):
# len(dataset)
return len(self.targets)
class SketchDataset(Dataset):
def __init__(self, transform=None, root='data/domainnet/multi', selection_txt_path='data/domainnet/txt/sketch_imgs.txt'):
# Load Data
with open(selection_txt_path) as f:
lines = f.readlines()
self.img_paths = []
self.label_list = []
self.classes = []
self.label_map = {}
for x in lines:
path, label_idx = x.split(' ')
self.img_paths.append(path)
label_idx = label_idx.strip()
self.label_list.append(int(label_idx))
label_name = path.split('/')[1]
if label_name not in self.classes:
self.classes.append(label_name)
self.label_map[label_idx] = label_name
self.X = [Image.open(os.path.join(root, img_path))
for img_path in self.img_paths]
self.targets = np.array(self.label_list)
self.transform = transform
self.name = 'sketch'
def show_image(self, idx):
img, label = self.X[idx], self.targets[idx]
print(self.label_map.get(str(label)))
return img
def __getitem__(self, idx):
# dataset[0]
img, label = self.X[idx], self.targets[idx]
if self.transform:
img = self.transform(img)
return img, label
def __len__(self):
# len(dataset)
return len(self.targets)
``` |
{
"source": "JonasNorling/m4audio",
"score": 3
} |
#### File: src/sawsynth/make_wavetable.py
```python
import math
nyquist = 24000
def saw(fraction, harmonics):
sum = 0
for i in range(1, harmonics+1):
sum += (1/i) * math.sin(i * fraction * 2*math.pi)
return sum
def note2hz(note):
# Get frequency of MIDI note number (note 69 is middle A @ 440 Hz)
return 2**((note - 69) / 12) * 440
def make_tables(function, firstNote, notesPerTable, tableCount, tableLength):
output = "{\n"
for tableNum in range(tableCount):
highestFreq = note2hz(firstNote + (tableNum + 1) * notesPerTable)
harmonics = (int)(nyquist/highestFreq)
data = list(map(lambda s: function(s/tableLength, harmonics), range(tableLength)))
output += " { // notes %d - %d, up to %.2f Hz (%d harmonics)" % (firstNote + tableNum * notesPerTable,
firstNote + (tableNum+1) * notesPerTable - 1,
highestFreq, harmonics)
for i in range(len(data)):
if not i % 8:
output += "\n "
output += "%.06f, " % data[i]
output += "\n },\n"
output += "}"
return output
if __name__ == "__main__":
tableCount = 10
tableLength = 32
t = make_tables(saw, 8, 12, tableCount, tableLength)
print("""/*
* Saw wavetable generated by make_wavetable.py
*/
""")
print("#define WT_TABLES %d" % tableCount)
print("#define WT_LENGTH %d" % tableLength)
print("#define WT_FIRST_NOTE 8")
print("#define WT_NOTES_PER_TABLE 8")
print("static const float wtSaw[WT_TABLES][WT_LENGTH] = %s;\n" % t)
``` |
{
"source": "JonasNorling/rpi0_room_node",
"score": 2
} |
#### File: JonasNorling/rpi0_room_node/skidl_board.py
```python
from skidl import *
class RpiConnector(Part):
def __init__(self, ref=None, value=None,
footprint='Connector_PinHeader_2.54mm:PinHeader_2x20_P2.54mm_Vertical'):
Part.__init__(self, 'Connector_Generic', 'Conn_02x20_Odd_Even',
ref=ref, value=value, footprint=footprint)
self[1].name = '+3.3V'; self[1].drive = POWER
self[2].name = '+5V'; self[2].drive = POWER
self[4].name = '+5V'
self[6].name = 'GND'; self[6].drive = POWER
self[9].name = 'GND'
self[14].name = 'GND'
self[20].name = 'GND'
self[25].name = 'GND'
self[30].name = 'GND'
self[34].name = 'GND'
self[39].name = 'GND'
t_led = Part('Device', 'LED', footprint='LED_SMD:LED_0805_2012Metric_Castellated', dest=TEMPLATE)
t_r = Part('Device', 'R', value='390', footprint='Resistor_SMD:R_0805_2012Metric_Pad1.15x1.40mm_HandSolder', dest=TEMPLATE)
t_c = Part('Device', 'C', footprint='Capacitor_SMD:C_0805_2012Metric_Pad1.15x1.40mm_HandSolder', dest=TEMPLATE)
t_tp = Part('Connector', 'TestPoint', footprint='TestPoint:TestPoint_THTPad_1.0x1.0mm_Drill0.5mm', dest=TEMPLATE)
# Global nets
vcc_33 = Net('+3.3V')
vcc_5 = Net('+5V')
vcc_reg = Net('VCC_reg')
gnd = Net('GND')
# Raspberry Pi connector
rpi_con = RpiConnector(ref='J1', value='Raspberry Pi')
rpi_con['+3.3V'] += vcc_33
rpi_con['+5V'] += vcc_5
rpi_con['GND'] += gnd
rpi_con[17, 26, 27, 28, 33, 35, 36, 37, 38, 40] += NC
# Serial port header
uart_header = Part('Connector_Generic', 'Conn_01x03', ref='J3',
footprint='Connector_PinHeader_2.54mm:PinHeader_1x03_P2.54mm_Vertical')
uart_header[:] += gnd, rpi_con[10], rpi_con[8]
# LEDs
vcc_33 & t_led(ref='D3')['A', 'K'] & t_r(ref='R3') & rpi_con[3]
vcc_33 & t_led(ref='D4')['A', 'K'] & t_r(ref='R4') & rpi_con[5]
vcc_33 & t_led(ref='D5')['A', 'K'] & t_r(ref='R5') & rpi_con[13]
rgb_led = Part('LED', 'ASMB-MTB0-0A3A2', ref='D1', footprint='LED_SMD:LED_Avago_PLCC4_3.2x2.8mm_CW')
rgb_led[1] += vcc_33
rgb_led[2] & t_r(ref='R6', value='390') & rpi_con[32]
rgb_led[3] & t_r(ref='R7', value='390') & rpi_con[31]
rgb_led[4] & t_r(ref='R8', value='390') & rpi_con[29]
# One-wire headers
onewire = Net('onewire')
onewire += rpi_con[7]
vcc_33 & t_r(ref='R1', value='4k7') & onewire
j2 = Part('Connector_Generic', 'Conn_01x03', ref='J2',
footprint='Connector_PinHeader_2.54mm:PinHeader_1x03_P2.54mm_Vertical')
j4 = j2(ref='J4')
j2[:] += vcc_33, onewire, gnd
j4[:] += vcc_33, onewire, gnd
# IR receiver
ir = Part('Interface_Optical', 'TSOP382xx', ref='U2',
footprint='OptoDevice:Vishay_MINICAST-3Pin')
ir[:] += rpi_con[12], gnd, vcc_33
gnd & t_c(ref='C3', value='1u') & vcc_33
# IR transmitter
irled = Part('LED', 'CQY99', ref='D2', footprint='LED_THT:LED_D5.0mm_IRGrey')
# Note: I'm suspicious of this being the correct transistor
t = Part('Transistor_BJT', 'DTA123E', value='PDTC123Y', ref='Q1', footprint='Package_TO_SOT_SMD:SC-59_Handsoldering')
vcc_5 & irled['A', 'K'] & t_r(ref='R2', value=70) & t['C', 'E'] & gnd
t['B'] += rpi_con[11]
# LDO
ldo = Part('Regulator_Linear', 'AP2204K-3.3', ref='U1',
footprint='Package_TO_SOT_SMD:SOT-23-5')
ldo['VIN', 'EN'] += vcc_5
ldo['GND'] += gnd
ldo['VOUT'] += vcc_reg
gnd & t_c(ref='C1', value='1u') & vcc_5
gnd & t_c(ref='C2', value='10u') & vcc_reg
vcc_reg += t_tp(ref='TP7')[1]
# RF module
rfm = Part('RF_Module', 'RFM69HCW', ref='U3', footprint='RF_Module:HOPERF_RFM69HW')
ant1 = Part('Device', 'Antenna_Shield', ref='AE1', value='SMA', footprint='Connector_Coaxial:SMA_Amphenol_132289_EdgeMount')
ant2 = Part('Device', 'Antenna_Shield', ref='AE2', value='U.Fl', footprint='Connector_Coaxial:U.FL_Hirose_U.FL-R-SMT-1_Vertical')
rfm['GND'] += gnd
rfm['3.3V'] += vcc_reg
rfm['SCK'] += rpi_con[23]
rfm['MOSI'] += rpi_con[19]
rfm['MISO'] += rpi_con[21]
rfm['NSS'] += rpi_con[24]
rfm['RESET'] += rpi_con[15]
rfm['DIO0'] += rpi_con[22] | t_tp(ref='TP6')
rfm['DIO1'] += rpi_con[18] | t_tp(ref='TP5')
rfm['DIO2'] += rpi_con[16] | t_tp(ref='TP4')
rfm['DIO3'] += t_tp(ref='TP3')[1]
rfm['DIO4'] += t_tp(ref='TP2')[1]
rfm['DIO5'] += t_tp(ref='TP1')[1]
rfm['ANT'] += Net('ant') | ant1[1] | ant2[1]
gnd += ant1['Shield'] | ant2['Shield']
gnd & t_c(ref='C4', value='10u') & vcc_reg
# Sundry
Part('Mechanical', 'MountingHole', ref='H1', footprint='holes:hole_2.75')
Part('Mechanical', 'MountingHole', ref='H2', footprint='holes:hole_2.75')
Part('Mechanical', 'MountingHole', ref='H3', footprint='holes:hole_2.75')
Part('Mechanical', 'MountingHole', ref='H4', footprint='holes:hole_2.75')
# Finalize
ERC()
generate_netlist()
``` |
{
"source": "jonas-nothnagel/sdg_text_classification",
"score": 3
} |
#### File: sdg_text_classification/src/tools.py
```python
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
from collections import Counter
from typing import Iterable, Tuple
'''features'''
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.preprocessing import label_binarize
'''Classifiers'''
from sklearn.dummy import DummyClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn import svm
from sklearn.naive_bayes import MultinomialNB
from sklearn.utils import class_weight
'''Metrics/Evaluation'''
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_curve, auc, confusion_matrix
from scipy import interp
from itertools import cycle
from sklearn.model_selection import cross_val_score
from sklearn.metrics import classification_report
from sklearn.pipeline import make_pipeline
from sklearn.pipeline import Pipeline
import operator
import joblib
def show_labels(y_true:Iterable, y_hat:Iterable, title:str = 'Classifier', class_range:Tuple[int,int] = (1,16)):
"""
Plot heatmap of confusion matrix for SDGs.
Parameters
----------
y_true : array-like
The input array of true labels.
y_hat : array-like
The input array of predicted labels.
title : str, default 'Classifier'
A title of the plot to be displayed.
class_range : Tuple[int,int], default (1,18)
A tuple of SDG range. The default value assumes that SDGs 1 through 17 are used.
If some SGDs are missing, adjust class_range accordingly.
Returns
-------
Has not return statement. Prints a plot.
"""
assert len(y_true) == len(y_hat), "Arrays must be of the same length"
to_labels = list(range(class_range[0],class_range[1]))
to_accuracy = accuracy_score(y_true, y_hat)
df_lambda = pd.DataFrame(confusion_matrix(y_true, y_hat),
index = list(range(class_range[0], class_range[1])),
columns = list(range(class_range[0], class_range[1]))
)
f, ax = plt.subplots(figsize=(10, 8))
sns.heatmap(df_lambda, annot=True, fmt="d", linewidths=.5, ax=ax)
ax.set_ylim(ax.get_ylim()[0] + 0.5, ax.get_ylim()[1] - 0.5)
plt.title(title + f'\nAccuracy:{round(to_accuracy, 3)}')
plt.ylabel('True label')
plt.xlabel('Predicted label');
def get_topwords(logit_model, vectorizer, n_models:int = 15, n:int = 30, show_idxmax:bool = True) -> pd.DataFrame:
"""
Extract top n predictors with highest coefficients from a logistic regression model and vectorizer object.
Parameters
----------
logit_model : LogisticRegression estimator
A fitted LogisticRegression object from scikit-learn with coef_ attribute
vectoriser : CountVectorizer or TfidfVectorizer
A fitted CountVectorizer or TfidfVectorizer object from scikit-learn with get_feature_names attribute.
n_models : int, default 17
Indicates the number of models fitter by logit_model, i.e. n_classes.
n : int, default 30
The number of top predictors for each model to be returned. If None, returns all predictors
show_idxmax : bool default True
Indicates whether to print the keyword/predictor for each class
Returns
-------
df_lambda : a pandas DataFrame object of shape (n_models,1) with a columns Keywords. Each cell in the column is
a sorted list of tupples with top n predictors which has the form of (keyword, coefficient).
"""
df_lambda = pd.DataFrame(logit_model.coef_,
columns = vectorizer.get_feature_names(),
index = [f'sdg_{x}' for x in range(1,n_models+1)]).round(3)
if show_idxmax:
display(df_lambda.idxmax(axis = 1))
df_lambda = pd.DataFrame([df_lambda.to_dict(orient = 'index')])
df_lambda = df_lambda.T.rename({0:'Keywords'}, axis = 1)
if n is None:
return df_lambda
else:
falpha = lambda alpha: sorted(alpha.items(), key=lambda x:x[1], reverse=True)[:n]
df_lambda['Keywords'] = df_lambda['Keywords'].apply(falpha)
return df_lambda
'''for training and storing models and vectorizers'''
def model_score_df_all(model_dict, category, folder_label, X_train, X_test, y_train, y_test):
models, model_name, ac_score_list, p_score_list, r_score_list, f1_score_list = [], [], [], [], [], []
for k,v in model_dict.items():
v.fit(X_train, y_train)
model_name.append(k)
models.append(v)
y_pred = v.predict(X_test)
# ac_score_list.append(accuracy_score(y_test, y_pred))
# p_score_list.append(precision_score(y_test, y_pred, average='macro'))
# r_score_list.append(recall_score(y_test, y_pred, average='macro'))
f1_score_list.append(f1_score(y_test, y_pred, average='macro'))
# model_comparison_df = pd.DataFrame([model_name, ac_score_list, p_score_list, r_score_list, f1_score_list]).T
# model_comparison_df.columns = ['model_name', 'accuracy_score', 'precision_score', 'recall_score', 'f1_score']
# model_comparison_df = model_comparison_df.sort_values(by='f1_score', ascending=False)
results = dict(zip(models, f1_score_list))
name = dict(zip(model_name, f1_score_list))
#return best performing model according to f1_score
best_clf = max(results.items(), key=operator.itemgetter(1))[0]
best_f1 = max(results.items(), key=operator.itemgetter(1))[1]
best_name = max(name.items(), key=operator.itemgetter(1))[0]
print("best classifier model:", best_name)
print("f1_score:", best_f1)
#save best performing model
filename = '../models/tf_idf/'+folder_label+'/'+category+'_'+best_name+'_'+'model.sav'
joblib.dump(best_clf, filename)
#save best performing model without name appendix
#filename = '../models/tf_idf/'+folder_label+'/'+category+'_'+'model.sav'
joblib.dump(best_clf, filename)
return results, best_f1
``` |
{
"source": "jonas-nothnagel/simple_cluster_and_topic_modeling",
"score": 3
} |
#### File: simple_cluster_and_topic_modeling/src/visualise.py
```python
from wordcloud import WordCloud, STOPWORDS
from wordcloud import WordCloud
import wordcloud
def draw_cloud(dataframe, column):
# Join the different processed titles together.
long_string = ','.join(list(dataframe[column]))
# Create a WordCloud object
wordcloud = WordCloud(background_color="white", max_words=5000, contour_width=6, contour_color='steelblue')
# Generate a word cloud
wordcloud.generate(long_string)
# Visualize the word cloud
return wordcloud.to_image()
``` |
{
"source": "jonas-nothnagel/Text-Classification-Feedback-Loop",
"score": 2
} |
#### File: Text-Classification-Feedback-Loop/streamlit/main.py
```python
import pandas as pd
import numpy as np
import joblib
import pickle5 as pickle
from PIL import Image
import time
import datetime
#firestore
from google.cloud import firestore
db = firestore.Client.from_service_account_json("firestore_key.json")
#eli5
from eli5 import show_prediction
#streamlit
import streamlit as st
st.set_page_config(layout="wide")
import SessionState
from st_aggrid import AgGrid
from st_aggrid import GridOptionsBuilder, AgGrid, GridUpdateMode, DataReturnMode, JsCode
from load_css import local_css
local_css("style.css")
DEFAULT = '< PICK A VALUE >'
def selectbox_with_default(text, values, default=DEFAULT, sidebar=False):
func = st.sidebar.selectbox if sidebar else st.selectbox
return func(text, np.insert(np.array(values, object), 0, default))
#helper functions
from inspect import getsourcefile
import os.path as path, sys
current_dir = path.dirname(path.abspath(getsourcefile(lambda:0)))
sys.path.insert(0, current_dir[:current_dir.rfind(path.sep)])
import src.clean_dataset as clean
sys.path.pop(0)
#%%
@st.cache(allow_output_mutation=True)
def load_data():
#1. load in complete transformed and processed dataset for pre-selection and exploration purpose
df_taxonomy = pd.read_csv('../data/taxonomy_final.csv')
df_columns = df_taxonomy .drop(columns=['PIMS_ID', 'all_text_clean', 'all_text_clean_spacy', 'hyperlink',
'title',
'leading_country',
'grant_amount',
'country_code',
'lon',
'lat'])
to_match = df_columns.columns.tolist()
#2. load parent dict
with open("../data/parent_dict.pkl", 'rb') as handle:
parents = pickle.load(handle)
#3. load sub category dict
with open("../data/category_dict.pkl", 'rb') as handle:
sub = pickle.load(handle)
#4. Load Training Scores:
with open("../data/tfidf_only_f1.pkl", 'rb') as handle:
scores_dict = pickle.load(handle)
#5. Load all categories as list:
with open("../data/all_categories_list.pkl", 'rb') as handle:
all_categories = pickle.load(handle)
# sort list
all_categories = sorted(all_categories)
return df_taxonomy, df_columns, to_match, parents, sub, scores_dict, all_categories
#%%
session = SessionState.get(run_id=0)
categories = ""
#%%
#title start page
#%%
#title start page
st.title('Machine Learning for Nature Climate Energy Portfolio')
sdg = Image.open('../streamlit/logo.png')
st.sidebar.image(sdg, width=200)
st.sidebar.title('Navigation')
st.write('## Frontend Application that takes text as input and outputs classification decision.')
st.write("Map your project, document or text to NCE taxonomy and help improve the models by giving your feedback!")
df_taxonomy, df_columns, to_match, parents, sub, scores_dict, all_categories = load_data()
items = [k for k in parents.keys()]
items.insert(0,'')
# load data and choose category
with st.spinner('Choose categories to predict...'):
agree = st.sidebar.checkbox(label='Would you like to browse and choose the different classes?', value = False, key= "key0")
if agree:
option = st.sidebar.selectbox('Select a category:', items, format_func=lambda x: 'Select a category' if x == '' else x)
if option:
st.sidebar.write('You selected:', option)
st.sidebar.markdown("**Categories**")
for i in parents[option]:
st.sidebar.write(i)
st.sidebar.markdown("**Further Choices**")
agree = st.sidebar.checkbox(label='Would you like to display and predict sub-categories of your choice?', value = False)
if agree:
sub_option = st.sidebar.selectbox('Select a category:', parents[option], format_func=lambda x: 'Select a category' if x == '' else x)
if sub_option:
st.sidebar.markdown("**Sub Categories:**")
for i in sub[sub_option]:
st.sidebar.write(i)
categories = sub[sub_option]
else:
categories = parents[option]
#choose one category from all:
agree = st.sidebar.checkbox(label='Would you like to predict specific categories?', value = False, key = "key1")
if agree:
all_options = st.sidebar.multiselect('Select a category:', all_categories, format_func=lambda x: 'Select a category' if x == '' else x)
if all_options:
st.sidebar.markdown("**You've chosen:**")
for i in all_options:
st.sidebar.write(i)
categories = all_options
# predict each category:
agree = st.sidebar.checkbox(label='Would you like to predict the whole taxonomy?', value = False, key= "key2")
if agree:
categories = all_categories
@st.cache(allow_output_mutation=True, suppress_st_warning=True)
def predict(text_input):
#define lists
all_results_name = []
all_results_score = []
name = []
hat = []
number = []
top_5 = []
last_5 = []
top_5_10 = []
last_5_10 = []
if text_input != '':
#placeholder = st.empty()
#with placeholder.beta_container():
with st.spinner('Load Models and Predict...'):
if categories != "":
for category in categories:
# take only models with over 20 training datapoints
if df_taxonomy[category].sum(axis=0) > 20:
#prune the long names to ensure proper loading
if len(category) > 50:
#st.write("pruning the long name of category:", category)
category = category[0:20]
else:
pass
# Pre-process text:
input_list = [text_input]
input_df = pd.DataFrame(input_list, columns =['input_text'])
# clean text
input_df['input_text'] = input_df['input_text'].apply(clean.spacy_clean)
clean_df = pd.Series(input_df['input_text'])
tfidf_vectorizer = joblib.load('../models/tf_idf/tf_idf_only/'+category+'_'+'vectorizer.sav')
fnames = tfidf_vectorizer.get_feature_names()
vector_df = tfidf_vectorizer.transform(clean_df)
clf = joblib.load('../models/tf_idf/tf_idf_only/'+category+'_'+'model.sav')
y_hat = clf.predict(vector_df)
y_prob = clf.predict_proba(vector_df)
if y_hat == 1:
all_results_name.append(category)
all_results_score.append(y_prob[0][1].round(1)*100)
#element = st.write(category)
number.append(df_taxonomy[category].sum(axis=0))
name.append(category)
#element = st.write("Yes with Confidence:", y_prob[0][1].round(2)*100, "%")
hat.append(y_prob[0][1].round(2)*100)
results= dict(zip(name, hat))
#return top features:
w = show_prediction(clf, tfidf_vectorizer.transform(clean_df),
highlight_spaces = True,
top=5000,
feature_names=fnames,
show_feature_values = True)
result = pd.read_html(w.data)[0]
top_5_list = result.Feature.iloc[0:5].tolist()
top_5.append(top_5_list)
top_5_10_list = result.Feature.iloc[5:10].tolist()
top_5_10.append(top_5_10_list)
last_5_list = result.Feature.iloc[-5:].tolist()
last_5.append(last_5_list)
last_5_10_list = result.Feature.iloc[-10:].tolist()
last_5_10_list = list(set(last_5_10_list) - set(last_5_list))
last_5_10.append(last_5_10_list)
if y_hat == 0:
all_results_name.append(category)
all_results_score.append(y_prob[0][1].round(1)*100)
#element= st.write(category)
#element = st.write("No with Confidence:", y_prob[0][0].round(2)*100, "%")
# make dataframe from all prediction results
df = pd.DataFrame(
{'category': all_results_name,
'confidence_score': all_results_score
})
# add decision column:
df['prediction'] = np.where(df['confidence_score']>= 50, "True", "False")
df['confidence_score'] = df['confidence_score'].astype(str) + "%"
df = df[['category', 'prediction', 'confidence_score']]
else:
st.warning('No category is selected')
return all_results_name, all_results_score, name, hat, number, top_5, last_5, top_5_10, last_5_10, clean_df, results, df
text_input = st.text_area('Please Input your Text:')
if text_input != "":
all_results_name, all_results_score, name, hat, number, top_5, last_5, top_5_10, last_5_10, clean_df, results, df = predict(text_input)
#time.sleep(3)
#placeholder.empty()
if all_results_name != []:
# st.write("Prediction Results:")
# st.table(df)
#grid table
#Example controlers
st.sidebar.subheader("AgGrid layout options")
#sample_size = st.sidebar.number_input("rows", min_value=0, value=len(df))
grid_height = st.sidebar.slider("Grid height", min_value=100, max_value=1000, value=400)
return_mode_value = DataReturnMode.FILTERED
update_mode_value = GridUpdateMode.MODEL_CHANGED
selection_mode = 'multiple'
# use_checkbox = True
# if use_checkbox:
groupSelectsChildren = True
groupSelectsFiltered = True
# if selection_mode == 'multiple':
rowMultiSelectWithClick = False
if not rowMultiSelectWithClick:
suppressRowDeselection = False
#Infer basic colDefs from dataframe types
gb = GridOptionsBuilder.from_dataframe(df)
#row height
gb.configure_grid_options(rowHeight=45)
#customize gridOptions
gb.configure_default_column(groupable=True, value=True, enableRowGroup=True, aggFunc='sum', editable=True)
#configures last row to use custom styles based on cell's value, injecting JsCode on components front end
cellsytle_jscode = JsCode("""
function(params) {
if (params.value == 'True') {
return {
'color': 'white',
'backgroundColor': 'green'
}
}
else {
return {
'color': 'black',
'backgroundColor': 'white'
}
}
};
""")
gb.configure_column("prediction", cellStyle=cellsytle_jscode)
gb.configure_column("category", cellStyle={'color': 'blue'})
gb.configure_side_bar()
gb.configure_selection(selection_mode)
gb.configure_selection(selection_mode, use_checkbox=True, groupSelectsChildren=groupSelectsChildren, groupSelectsFiltered=groupSelectsFiltered)
gb.configure_selection(selection_mode, use_checkbox=False, rowMultiSelectWithClick=rowMultiSelectWithClick, suppressRowDeselection=suppressRowDeselection)
gb.configure_grid_options(domLayout='normal')
gridOptions = gb.build()
#Display the grid
st.write("## Prediction Result")
st.write("Please tick all categories you think are NOT correct and then submit your choices. You may take guidance from the model's confidence scores. Your feedback will be stored together with the predicted text and will help the models to make better decisions in the future.")
grid_response = AgGrid(
df,
gridOptions=gridOptions,
height=grid_height,
width='100%',
data_return_mode=return_mode_value,
update_mode=update_mode_value,
fit_columns_on_grid_load=True,
allow_unsafe_jscode=True, #Set it to True to allow jsfunction to be injected
)
if st.button('Submit Corrections.'):
df = grid_response['data']
selected = grid_response['selected_rows']
selected_df = pd.DataFrame(selected)
selected_df['corrected_prediction'] = np.where(selected_df['prediction']== "False", "True", "False")
selected_df = selected_df[['category', 'corrected_prediction']]
selected_df = selected_df.T
selected_df.columns = selected_df.iloc[0]
selected_df.drop(selected_df.index[0], inplace=True)
selected_df.reset_index(drop=True, inplace=True)
st.table(selected_df)
selected_df['text'] = text_input
#shift columns
cols = list(selected_df.columns)
cols = [cols[-1]] + cols[:-1]
selected_df = selected_df[cols]
#store in firestore:
selected_df = selected_df.astype(str)
selected_df.index = selected_df.index.map(str)
postdata = selected_df.to_dict()
date = str(datetime.datetime.now())
db.collection(u'feedback').document(date).set(postdata)
st.success("Feedback successfully stored in the cloud!")
else:
t = "<div> <span class='highlight red'>Not enough confidence in any category.</div>"
st.markdown(t, unsafe_allow_html=True)
# for AgGrid
# https://share.streamlit.io/pablocfonseca/streamlit-aggrid/main/examples/example.py
``` |
{
"source": "jonasoh/ambvis",
"score": 2
} |
#### File: ambvis/ambvis/ambvis.py
```python
import os
import sys
import textwrap
import argparse
from ambvis import webui
from ambvis.config import cfg
parser = argparse.ArgumentParser(
description=textwrap.dedent("""\
AMBvis control software.
Running this command without any flags starts the web interface.
Specifying flags will perform those actions, then exit."""))
parser.add_argument('--install-service', action="store_true", dest="install",
help="install systemd user service file")
parser.add_argument('--reset-config', action="store_true", dest="reset",
help="reset all configuration values to defaults")
parser.add_argument('--reset-password', action="store_true", dest="resetpw",
help="reset web UI password")
parser.add_argument('--toggle-debug', action="store_true", dest="toggle_debug",
help="toggles additional debug logging on or off")
options = parser.parse_args()
def install_service():
try:
os.makedirs(os.path.expanduser('~/.config/systemd/user'), exist_ok=True)
except OSError as e:
print("Could not make directory (~/.config/systemd/user):", e)
try:
with open(os.path.expanduser('~/.config/systemd/user/ambvis.service'), 'w') as f:
if (os.path.exists('/home/pi/.local/bin/ambvis')):
exe = '/home/pi/.local/bin/ambvis'
else:
exe = '/usr/local/bin/ambvis'
f.write(textwrap.dedent("""\
[Unit]
Description=AMBvis control software
[Service]
ExecStart={}
Restart=always
[Install]
WantedBy=default.target
""").format(exe))
except OSError as e:
print("Could not write file (~/.config/systemd/user/ambvis.service):", e)
print("Systemd service file installed.")
def main():
if options.reset:
print("Clearing all configuration values.")
try:
os.remove(os.path.expanduser('~/.config/ambvis/spiro.conf'))
except OSError as e:
print("Could not remove file ~/.config/ambvis/spiro.conf:", e.strerror)
raise
if options.install:
print("Installing systemd service file.")
install_service()
if options.resetpw:
print("Resetting web UI password.")
cfg.set('password', '')
if options.toggle_debug:
cfg.set('debug', not cfg.get('debug'))
if cfg.get('debug'):
print("Debug mode on.")
else:
print("Debug mode off")
if any([options.install, options.resetpw, options.toggle_debug]):
sys.exit()
webui.run()
```
#### File: ambvis/ambvis/filemanager.py
```python
import os
import shutil
import subprocess
from flask import Blueprint, Response, request, abort, redirect, url_for, flash, render_template
from ambvis.config import cfg
bp = Blueprint('filemanager', __name__, url_prefix='/files')
def stream_popen(p):
'''generator for sending STDOUT to a web client'''
data = p.stdout.read(128*1024)
while data:
yield data
data = p.stdout.read(128*1024)
def verify_dir(check_dir):
'''checks that the directory is
1. immediately contained within the appropriate parent dir
2. does not contain initial dots, and
3. is indeed a directory'''
check_dir = os.path.abspath(check_dir)
dir = os.path.expanduser('~')
return os.path.dirname(check_dir) == dir and not os.path.basename(check_dir).startswith('.') and os.path.isdir(check_dir)
@bp.route('/')
def file_browser():
dirs = []
dir = os.path.join(os.path.expanduser('~'), 'ambvis')
df = shutil.disk_usage(dir)
diskspace = round(df.free / 1024 ** 3, 1)
for entry in os.scandir(dir):
if entry.is_dir() and os.path.dirname(entry.path) == dir and not entry.name.startswith('.'):
dirs.append(entry.name)
return render_template('filemanager.jinja', dirs=sorted(dirs), diskspace=diskspace, name=cfg.get('name'))
@bp.route('/get/<exp_dir>.zip')
def make_zipfile(exp_dir):
'creates a zipfile on the fly, and streams it to the client'
dir = os.path.join(os.path.expanduser('~'), 'ambvis')
zip_dir = os.path.abspath(os.path.join(dir, exp_dir))
if verify_dir(zip_dir):
p = subprocess.Popen(['/usr/bin/zip', '-r', '-0', '-',
os.path.basename(zip_dir)], stdout=subprocess.PIPE, cwd=dir)
return Response(stream_popen(p), mimetype='application/zip')
else:
abort(404)
@bp.route('/delete/<exp_dir>/', methods=['GET', 'POST'])
def delete_dir(exp_dir):
dir = os.path.join(os.path.expanduser('~'), 'ambvis')
del_dir = os.path.abspath(os.path.join(dir, exp_dir))
if request.method == 'GET':
return render_template('delete.html', dir=exp_dir)
else:
#if os.path.abspath(experimenter.dir) == del_dir and experimenter.running:
# flash(
# 'Cannot remove active experiment directory. Please stop experiment first.')
# return redirect(url_for('file_browser'))
if verify_dir(del_dir):
shutil.rmtree(del_dir)
flash(f'Directory {exp_dir} deleted.')
return redirect(url_for('file_browser'))
else:
flash(f'Unable to delete directory "{exp_dir}".')
return redirect(url_for('file_browser'))
```
#### File: ambvis/ambvis/system_settings.py
```python
from flask import Blueprint, Response, request, abort, session, redirect, url_for, flash, render_template
from ambvis import globals
from ambvis.hw import motor, MotorError
from ambvis.config import Config
from ambvis.logger import log, debug
from ambvis.decorators import public_route
cfg = Config()
bp = Blueprint('system_settings', __name__, url_prefix='/settings/system')
@bp.route('/')
def settings():
return render_template('system_settings.jinja')
```
#### File: ambvis/ambvis/webui.py
```python
import os
import time
import hashlib
import subprocess
from threading import Timer, Thread
import cherrypy
from cherrypy.process.plugins import Daemonizer
from werkzeug import datastructures
from ws4py.websocket import WebSocket
from wsgiref.simple_server import make_server
from ws4py.server.cherrypyserver import WebSocketPlugin, WebSocketTool
from flask import Flask, Response, request, abort, flash, redirect, url_for, render_template, session
from ambvis import auth
from ambvis import motor_control
from ambvis.hw import cam, motor, led, update_websocket
from ambvis.config import cfg
from ambvis import filemanager
from ambvis import system_settings
from ambvis import imaging_settings
from ambvis.logger import log, debug
from ambvis.video_stream import Broadcaster
from ambvis.experimenter import Experimenter
from ambvis.decorators import public_route, not_while_running
def create_app():
app = Flask(__name__)
if cfg.get('secret') == '':
secret = hashlib.sha1(os.urandom(16))
cfg.set('secret', secret.hexdigest())
app.config.update(
SECRET_KEY=cfg.get('secret')
)
app.register_blueprint(auth.bp)
app.register_blueprint(filemanager.bp)
app.register_blueprint(motor_control.bp)
app.register_blueprint(system_settings.bp)
app.register_blueprint(imaging_settings.bp)
return app
app = create_app()
experimenter = Experimenter()
experimenter.daemon = True
experimenter.start()
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
WebSocketPlugin(cherrypy.engine).subscribe()
cherrypy.tools.websocket = WebSocketTool()
def run():
'''start web ui and initialize hardware peripherals'''
log('Initializing hardware...')
# flash led to show it is working
led.on = True
time.sleep(1)
led.on = False
# home the motor
motor.find_home()
cam.streaming = True
ws_t = Thread(target=continuously_update_websocket, daemon=True)
ws_t.start()
broadcast_thread = Broadcaster(camera=cam)
try:
#app.run(host="0.0.0.0", port=8080)
broadcast_thread.daemon = True
broadcast_thread.start()
cherrypy.tree.graft(app, '/')
cherrypy.tree.mount(StreamingWebSocketRoot, '/video', config={'/frame': {'tools.websocket.on': True, 'tools.websocket.handler_cls': StreamingWebSocket}})
cherrypy.tree.mount(StatusWebSocketRoot, '/api', config={'/ws': {'tools.websocket.on': True, 'tools.websocket.handler_cls': StatusWebSocket}})
cherrypy.server.bind_addr = ('0.0.0.0', 8080)
cherrypy.engine.start()
cherrypy.engine.block()
finally:
experimenter.stop_experiment = experimenter.quit = True
cam.close()
motor.close()
led.on = False
@app.route('/index.html')
@app.route('/')
def index():
if experimenter.running:
return redirect(url_for('experiment'))
else:
return render_template('index.jinja')
@app.route('/still.png')
def get_image():
return Response(cam.image.read(), mimetype='image/png')
def run_shutdown():
subprocess.run(['sudo', 'shutdown', '-h', 'now'])
@not_while_running
@app.route('/shutdown')
def shutdown():
t = Timer(1, run_shutdown)
t.start()
return render_template('shutdown.jinja', message='Shutting down', refresh=0, longmessage="Allow the shutdown process to finish before turning the power off.")
def run_reboot():
subprocess.run(['sudo', 'shutdown', '-r', 'now'])
@not_while_running
@app.route('/reboot')
def reboot():
t = Timer(1, run_reboot)
t.start()
return render_template('shutdown.jinja', message='Rebooting', refresh=120, longmessage='Please allow up to two minutes for system to return to usable state.')
def run_restart():
cherrypy.engine.restart()
@not_while_running
@app.route('/restart')
def restart():
t = Timer(1, run_restart)
t.start()
return render_template('shutdown.jinja', message='Restarting web UI', refresh=15, longmessage='Please allow up to 15 seconds for the web UI to restart.')
@not_while_running
@app.route('/led/<val>')
def set_led(val):
if val in ['on', 'off']:
if val == 'on':
led.on = True
else:
led.on = False
return Response('OK', 200)
abort(404)
@app.route('/experiment', methods=['GET', 'POST'])
def experiment():
if request.method == 'GET':
return render_template('experiment.jinja', running=experimenter.running, starttime=experimenter.starttime, imgfreq=experimenter.imgfreq)
else:
if request.form['action'] == 'start':
if request.form.get('expname') is None:
expname = time.strftime("%Y-%m-%d Unnamed experiment", time.localtime())
else:
expname = time.strftime("%Y-%m-%d ", time.localtime()) + request.form.get('expname')
experimenter.dir = os.path.join(os.path.expanduser('~'), 'ambvis', expname)
if request.form.get('imgfreq') is None:
experimenter.imgfreq = 60
else:
experimenter.imgfreq = int(request.form.get('imgfreq'))
experimenter.stop_experiment = False
experimenter.status_change.set()
elif request.form['action'] == 'stop':
experimenter.stop_experiment = True
time.sleep(0.5)
return render_template('experiment.jinja', running=experimenter.running, starttime=experimenter.starttime, imgfreq=experimenter.imgfreq)
class StreamingWebSocket(WebSocket):
'''the video streaming websocket broadcasts only binary data'''
def opened(self):
print("New video client connected")
def send(self, payload, binary=False):
if binary == True:
super().send(payload, binary)
class StatusWebSocket(WebSocket):
'''the status websocket only broadcasts non-binary data'''
last_message = None
def opened(self):
print("New status client connected")
def send(self, payload, binary=False):
if binary == False:
if payload != self.last_message:
self.last_message = payload
super().send(payload, binary)
class StreamingWebSocketRoot:
@cherrypy.expose
def index():
pass
@cherrypy.expose
def frame():
pass
class StatusWebSocketRoot:
@cherrypy.expose
def index():
pass
@cherrypy.expose
def ws():
pass
def continuously_update_websocket():
'''send out a status message as json every second'''
while True:
update_websocket()
time.sleep(1)
``` |
{
"source": "jonasoh/etalumacontrol",
"score": 2
} |
#### File: etalumacontrol/CypressFX/__init__.py
```python
import usb.core
import usb.util
import intelhex
import pkg_resources
class FX2(object):
"""Supports firmware and EEPROM operations on Cypress FX2 devices"""
REQ_WRITE = (usb.core.util.ENDPOINT_OUT | usb.core.util.CTRL_TYPE_VENDOR |
usb.core.util.CTRL_RECIPIENT_DEVICE)
REQ_READ = (usb.core.util.ENDPOINT_IN | usb.core.util.CTRL_TYPE_VENDOR |
usb.core.util.CTRL_RECIPIENT_DEVICE)
CMD_RW_INTERNAL = 0xA0
CMD_RW_EEPROM = 0xA2
MAX_CTRL_BUFFER_LENGTH = 4096
def __init__(self, usbDev):
if not usbDev:
raise AttributeError("USB Device passed is not valid")
self.dev = usbDev
self.running_vend_ax_fw = False
@staticmethod
def with_vid_pid(vid, pid):
"""Opens a device with a given USB VendorID and ProductID"""
dev = usb.core.find(idVendor=vid, idProduct=pid)
if dev:
return FX2(dev)
return None
@staticmethod
def with_bus_address(bus, address):
"""Opens a device at a given USB Bus and Address"""
dev = usb.core.find(bus=bus, address=address)
if dev:
return FX2(dev)
return None
def reset(self, enable_cpu):
"""Resets a device and optionally enables the CPU core"""
cpu_address = 0xE600
data = bytearray(1)
if enable_cpu:
data[0] = 0
print("reset CPU")
else:
print("stop CPU")
data[0] = 1
wrote = self.__send_usbctrldata(cpu_address & 0xFFFF, bytes(data))
if not wrote > 0:
return False
return True
def __ensure_vend_ax_firmware(self):
"""Makes sure that we're running the default code"""
if not self.running_vend_ax_fw:
hexfile = pkg_resources.resource_filename('CypressFX',
'vend_ax.hex')
self.load_intelhex_firmware(hexfile)
self.running_vend_ax_fw = True
def read_eeprom(self, length=8):
"""Reads bytes from the device's EEPROM"""
self.__ensure_vend_ax_firmware()
data = self.dev.ctrl_transfer(self.REQ_READ, self.CMD_RW_EEPROM, 0x00,
0x00, length)
return data
def write_eeprom(self, data):
"""Writes data to the device's EEPROM"""
self.__ensure_vend_ax_firmware()
wrote = self.dev.ctrl_transfer(self.REQ_WRITE, self.CMD_RW_EEPROM,
0x00, 0x00, data)
return wrote
def __send_usbctrldata(self, addr, data):
wrote = self.dev.ctrl_transfer(self.REQ_WRITE,
self.CMD_RW_INTERNAL,
addr, 0x00, data)
if not wrote == len(data):
raise IOError("Failed to write %d bytes to %x" % (len(data),
addr))
return wrote
def load_intelhex_firmware(self, filename):
"""Loads firmware from an IntelHex formatted file"""
total = 0
fw_hex = intelhex.IntelHex(filename)
if not self.reset(enable_cpu=False):
raise IOError("Failed to halt CPU")
for seg_start, seg_end in fw_hex.segments():
data = fw_hex.tobinstr(start=seg_start, end=seg_end-1)
# libusb issue #110 https://github.com/libusb/libusb/issues/110
offset = 0
while len(data) > 0:
end = len(data)
if end > self.MAX_CTRL_BUFFER_LENGTH:
end = self.MAX_CTRL_BUFFER_LENGTH
print("0x{0:04x} loading {1:4d} bytes".format(
seg_start + offset, end))
wrote = self.dev.ctrl_transfer(self.REQ_WRITE,
self.CMD_RW_INTERNAL,
seg_start+offset, 0x00,
data[:end])
if not wrote == end:
raise IOError("Failed to write %d bytes to %x" % (end,
seg_start))
total += wrote
offset += wrote
data = data[end:]
if not self.reset(enable_cpu=True):
raise IOError("Failed to start CPU")
return total
``` |
{
"source": "jonasoh/petripi",
"score": 2
} |
#### File: petripi/spiro/hostapd.py
```python
import re
import os
import uuid
import textwrap
import subprocess
from spiro.logger import log, debug
def init():
"""makes sure everything runs smoothly later on."""
p = subprocess.run(["sudo", "systemctl", "daemon-reload"], capture_output=False)
def install_reqs():
"""installs hostapd and dnsmasq. returns a tuple of (installation status, output of failed command)."""
reqs = ['dnsmasq', 'hostapd']
for r in reqs:
p = subprocess.run(['dpkg', '-l', r], capture_output=False)
if p.returncode != 0:
# install requirement
p = subprocess.run(["apt", "install", "-y", r], capture_output=False)
if p.returncode != 0:
# installation failed
return (False, p.stderr)
# stop the service as it is not properly configured yet
p = subprocess.run(["systemctl", "stop", r], capture_output=False)
return (True, None)
def config_hostapd():
"""sets up hostapd config, see also get_ssid()"""
# use last 6 digits of MAC address as unique id
u = str(uuid.uuid1())
id = "spiro-" + u[30:36]
pwd = u[0:8]
with open("/etc/hostapd/hostapd.conf", "w") as f:
f.write(textwrap.dedent("""\
# auto-generated by spiro software
interface=wlan0
driver=nl80211
ssid={0}
hw_mode=g
channel=7
wmm_enabled=0
macaddr_acl=0
auth_algs=1
ignore_broadcast_ssid=0
wpa=2
wpa_passphrase={1}
wpa_key_mgmt=WPA-PSK
wpa_pairwise=TKIP
rsn_pairwise=CCMP
""".format(id, pwd)))
def config_dnsmasq():
"""sets up dnsmasq config. all dns queries are forwarded to the local ip (192.168.138.1)"""
with open("/etc/dnsmasq.conf", "w") as f:
f.write(textwrap.dedent("""\
# auto-generated by spiro software
interface=wlan0
dhcp-range=192.168.138.10,192.168.138.254,12h
address=/#/192.168.138.1
no-resolv
no-poll
no-hosts
domain=spiro.local
resolv-file=/dev/null
"""))
with open("/etc/default/dnsmasq", "w") as f:
f.write(textwrap.dedent("""\
ENABLED=1
DNSMASQ_EXCEPT=lo
"""))
def config_dhcpcd(enable):
"""enables/disables static ip for the wlan0 interface. replaces the system dhcpcd.conf with our own."""
with open("/etc/dhcpcd.conf", "w") as f:
f.write(textwrap.dedent("""\
# auto-generated by spiro software
hostname
clientid
persistent
option rapid_commit
option domain_name_servers, domain_name, domain_search, host_name
option classless_static_routes
option interface_mtu
option ntp_servers
require dhcp_server_identifier
slaac private
"""))
if enable == True:
f.write(textwrap.dedent("""\
interface wlan0
static ip_address=192.168.138.1/24
nohook wpa_supplicant
"""))
def restart_services():
"""restarts the required services after config updates. returns False if any restart command returns
a non-zero exit code."""
services = ['dhcpcd', 'dnsmasq', 'hostapd']
codes = []
for s in services:
p = subprocess.run(["sudo", "systemctl", "restart", s], capture_output=False)
debug("Restarted service {0} with status code {1}.".format(s, p.returncode))
codes.append(p.returncode)
return(all(c == 0 for c in codes))
def enable_services():
# make sure wlan interface is not blocked
p = subprocess.run(['rfkill', 'unblock', 'wlan'], capture_output=False)
services = ['dnsmasq', 'hostapd']
for s in services:
p = subprocess.run(['sudo', 'systemctl', 'unmask', s], capture_output=False)
p = subprocess.run(['sudo', 'systemctl', 'enable', s], capture_output=False)
def disable_services():
services = ['dnsmasq', 'hostapd']
for s in services:
p = subprocess.run(['sudo', 'systemctl', 'stop', s], capture_output=False)
p = subprocess.run(['sudo', 'systemctl', 'disable', s], capture_output=False)
def start_ap():
if not is_ready():
# initial steps, run 'sudo spiro --enable-hotspot'
log("Setting up dependencies...")
init()
install_reqs()
log("Configuring system...")
config_dnsmasq()
config_hostapd()
config_dhcpcd(enable=True)
log("Starting services...")
enable_services()
r = restart_services()
if not r:
log("Failed to restart services.")
log("Setting up access point failed.")
return(1)
else:
ssid, pwd = get_ssid()
log("Access point configured and enabled. Below are the details for connecting to it:")
log("\nSSID: " + ssid)
log("Password: " + pwd)
log("\nConnect to the web interface using the address http://spiro.local")
return(0)
def stop_ap():
log("Disabling services...")
init()
config_dhcpcd(enable=False)
p = subprocess.run(['sudo', 'systemctl', 'restart', 'dhcpcd'], capture_output=False)
disable_services()
log("Access point disabled.")
def get_ssid():
"""reads ssid/password from hostapd.conf. doesn't handle comments."""
ssid = None
pwd = <PASSWORD>
params = {}
try:
with open("/etc/hostapd/hostapd.conf", "r") as f:
for l in f:
lgrp = re.match(r'\s*(\w+)\s*=\s*([\w-]+)\s*', l)
if lgrp:
params[lgrp.group(1)] = lgrp.group(2)
except OSError as e:
debug('get_ssid(): ' + str(e))
return None, None
return params.get('ssid'), params.get('wpa_passphrase')
def is_ready():
"""returns True if wifi hotspot is ready to be used (i.e., spiro --enable-hotspot was
executed at some point)"""
inst_reqs = ['dnsmasq', 'hostapd']
for r in inst_reqs:
p = subprocess.run(['dpkg', '-l', r], capture_output=True)
if p.returncode != 0:
debug("is_ready: dpkg failed for " + r)
return False
cfg_reqs = ["/etc/hostapd/hostapd.conf", "/etc/dnsmasq.conf", "/etc/dhcpcd.conf"]
for cfg_file in cfg_reqs:
with open(cfg_file, "r") as f:
if not re.search(r'# auto-generated by spiro software', f.read()):
debug("is_ready: no spiro signature in " + cfg_file)
return False
return True
def is_enabled():
"""checks if hostapd and dnsmasq services are running. possibly too minimal."""
services = ['hostapd', 'dnsmasq']
for svc in services:
p = subprocess.run(['systemctl', 'is-enabled', svc], capture_output=True)
if p.returncode != 0:
return False
return True
``` |
{
"source": "jonasoh/spiro",
"score": 2
} |
#### File: jonasoh/spiro/setup.py
```python
from setuptools import setup, find_packages
# from miniver
def get_version_and_cmdclass(package_path):
"""Load version.py module without importing the whole package.
Template code from miniver
"""
import os
from importlib.util import module_from_spec, spec_from_file_location
spec = spec_from_file_location("version", os.path.join(package_path, "_version.py"))
module = module_from_spec(spec)
spec.loader.exec_module(module)
return module.__version__, module.cmdclass
version, cmdclass = get_version_and_cmdclass("spiro")
setup(name = 'spiro',
version = version,
cmdclass = cmdclass,
packages = find_packages(),
scripts = ['bin/spiro'],
install_requires = ['picamera==1.13', 'RPi.GPIO==0.7.0', 'Flask==1.1.1', 'waitress==2.0.0', 'numpy==1.18.1'],
author = '<NAME>',
author_email = '<EMAIL>',
description = 'Control software for the SPIRO biological imaging system',
url = 'https://github.com/jonasoh/spiro',
include_package_data = True,
zip_safe = False,
)
``` |
{
"source": "jonasoh/thermosampler",
"score": 3
} |
#### File: jonasoh/thermosampler/stoich.py
```python
import sys, os
import pandas as pd
import collections
import itertools
import argparse
import re
# Import functions
from mdf import parse_equation, read_reactions
# Define functions
def sWrite(string):
sys.stdout.write(string)
sys.stdout.flush()
def sError(string):
sys.stderr.write(string)
sys.stderr.flush()
# Main code block
def main(reaction_file, outfile_name, proton_name='C00080'):
# Load stoichiometric matrix
S_pd = read_reactions(open(reaction_file, 'r').read(), proton_name)
# Write stoichiometric matrix to outfile
S_pd.to_csv(outfile_name, "\t")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'reactions', type=str,
help='Load reactions.'
)
parser.add_argument(
'outfile', type=str,
help='Write stoichiometric matrix in tab-delimited format.'
)
parser.add_argument(
'-H', '--proton_name', default='C00080',
help='Name used to identify protons.'
)
args = parser.parse_args()
main(args.reactions, args.outfile, args.proton_name)
``` |
{
"source": "jo-nas/openhtf",
"score": 2
} |
#### File: openhtf/core/history.py
```python
import collections
import logging
import sys
import threading
from openhtf.util import conf
from openhtf.util import data
from openhtf.util import threads
_LOG = logging.getLogger(__name__)
conf.declare('max_history_size_mb', default_value=256)
class HistorySyncError(Exception):
"""Raised when some part of the history gets out of sync with the rest."""
HistoryEntry = collections.namedtuple('HistoryEntry', ['test_uid', 'record'])
class TestHistory(object):
"""This class encapsulates a history of TestRecords from completed tests.
This class provides basic deque functionality, with some additional
approximate size tracking.
"""
def __init__(self):
self.entries = collections.deque()
self.entry_bytes = 0
def __len__(self):
return len(self.entries)
def __iter__(self):
return iter(self.entries)
@property
def size_mb(self):
return (self.entry_bytes + sys.getsizeof(self.entries)) / 1024.0 / 1024.0
@property
def last_start_time(self):
"""start_time_millis of most recent record, or 0 if no entries."""
return self.entries[0].record.start_time_millis if self.entries else 0
def pop(self):
"""Pop the oldest record and return the test_uid it was associated with."""
popped_entry = self.entries.pop()
self.entry_bytes -= data.total_size(popped_entry)
return popped_entry.test_uid
def append(self, test_uid, record):
"""Append a new record associated with the given test uid."""
entry = HistoryEntry(test_uid, record)
self.entries.appendleft(entry)
self.entry_bytes += data.total_size(entry)
class History(object):
def __init__(self):
# Track history on a per-Test basis.
self.per_test_history = collections.defaultdict(TestHistory)
# Track a history of all records (completed tests).
self.all_tests_history = TestHistory()
self._lock = threading.Lock()
@property
def size_mb(self):
return ((
(self.all_tests_history.size_mb * 1024.0 * 1024.0) +
sys.getsizeof(self.per_test_history) +
sum(sys.getsizeof(test) for test in self.per_test_history)) /
(1024.0 * 1024.0))
def _maybe_evict(self):
size_mb = self.size_mb
if size_mb < conf.max_history_size_mb:
return
_LOG.debug('History (%.2f MB) over max size (%.2f MB), evicting...',
size_mb, conf.max_history_size_mb)
# We're over size, evict the oldest records, down to 80% capacity.
while self.all_tests_history and size_mb > conf.max_history_size_mb * .8:
test_uid = self.all_tests_history.pop()
if test_uid != self.per_test_history[test_uid].pop():
raise HistorySyncError('Per-test history had invalid Test uid')
# If we have no more history entries for this test_uid, delete the key
# from the per_test_history dictionary.
if not self.per_test_history[test_uid]:
del self.per_test_history[test_uid]
# Re-calculate our total size.
size_mb = self.size_mb
_LOG.debug('Done evicting, history now %.2f MB', size_mb)
@threads.synchronized
def append_record(self, test_uid, record):
"""Append the given record for the given test UID to the history.
Args:
test_uid: UID of test whose history to update, can be obtained from an
openhtf.Test instance via the 'UID' attribute.
record: The test_record.TestRecord instance to append.
"""
_LOG.debug('Appending record at start_time_millis %s for %s',
record.start_time_millis, test_uid)
# For now, check history size on every append. If this proves to have a
# performance impact, we can reduce the frequency of this check.
self._maybe_evict()
self.per_test_history[test_uid].append(test_uid, record)
self.all_tests_history.append(test_uid, record)
@threads.synchronized
def for_test_uid(self, test_uid, start_after_millis=0):
"""Copy history for the given test UID."""
return list(entry.record for entry in self.per_test_history[test_uid]
if entry.record.start_time_millis > start_after_millis)
@threads.synchronized
def last_start_time(self, test_uid):
"""Get the most recent start time for the given test UID.
This is used to identify how up-to-date the history is, we know that all
records in the history started before or at the return value of this
method, so we can limit RPC traffic based on this knowledge.
Defaults to returning 0 if there are no records for the given test_uid.
"""
return self.per_test_history[test_uid].last_start_time
# Create a singleton instance and bind module-level names to its methods. For
# OpenHTF itself, this singleton instance will be used. The frontend server
# will need to create multiple instances itself, however, since it tracks
# multiple stations at once.
HISTORY = History()
# pylint: disable=invalid-name
append_record = HISTORY.append_record
for_test_uid = HISTORY.for_test_uid
last_start_time = HISTORY.last_start_time
``` |
{
"source": "jonasoreland/avanza",
"score": 3
} |
#### File: avanza/avanza/avanza_socket.py
```python
import asyncio
import logging
import json
import websockets
from typing import Any, Callable, Sequence
from .constants import (
ChannelType
)
WEBSOCKET_URL = 'wss://www.avanza.se/_push/cometd'
logger = logging.getLogger("avanza_socket")
class AvanzaSocket:
def __init__(self, push_subscription_id, cookies):
self._socket = None
self._client_id = None
self._message_count = 1
self._push_subscription_id = push_subscription_id
self._connected = False
self._subscriptions = {}
self._cookies = cookies
self._subscribe_event = None
async def init(self):
asyncio.ensure_future(self.__create_socket())
await self.__wait_for_websocket_to_be_connected()
async def __wait_for_websocket_to_be_connected(self):
timeout_count = 40
timeout_value = 0.250
# Waits for a maximum of 10 seconds for the connection to be complete
for _ in range(0, timeout_count):
if self._connected:
return
await asyncio.sleep(timeout_value)
raise TimeoutError('\
We weren\'t able to connect \
to the websocket within the expected timeframe \
')
async def __create_socket(self):
async with websockets.connect(
WEBSOCKET_URL,
extra_headers={'Cookie': self._cookies}
) as self._socket:
await self.__send_handshake_message()
await self.__socket_message_handler()
async def __send_handshake_message(self):
await self.__send({
'advice': {
'timeout': 60000,
'interval': 0
},
'channel': '/meta/handshake',
'ext': {'subscriptionId': self._push_subscription_id},
'minimumVersion': '1.0',
'supportedConnectionTypes': [
'websocket',
'long-polling',
'callback-polling'
],
'version': '1.0'
})
async def __send_connect_message(self):
await self.__send({
'channel': '/meta/connect',
'clientId': self._client_id,
'connectionType': 'websocket',
'id': self._message_count
})
async def __socket_subscribe(
self,
subscription_string,
callback: Callable[[str, dict], Any],
wait_for_reply_timeout_seconds
):
if self._subscribe_event is None:
self._subscribe_event = asyncio.Event()
self._subscriptions[subscription_string] = {
'callback': callback
}
self._subscribe_event.clear()
await self.__send({
'channel': '/meta/subscribe',
'clientId': self._client_id,
'subscription': subscription_string
})
# Wait for subscription ack message.
if wait_for_reply_timeout_seconds is not None:
try:
await asyncio.wait_for(self._subscribe_event.wait(), timeout=wait_for_reply_timeout_seconds)
except asyncio.TimeoutError:
logger.warning('timeout waiting for subscription reply!')
async def __send(self, message):
wrapped_message = [
{
**message,
'id': str(self._message_count)
}
]
logger.info(f'Outgoing message: {wrapped_message}')
await self._socket.send(json.dumps(wrapped_message))
self._message_count = self._message_count + 1
async def __handshake(self, message: dict):
if message.get('successful', False):
self._client_id = message.get('clientId')
await self.__send({
'advice': {'timeout': 0},
'channel': '/meta/connect',
'clientId': self._client_id,
'connectionType': 'websocket'
})
return
advice = message.get('advice')
if advice and advice.get('reconnect') == 'handshake':
await self.__send_handshake_message()
async def __connect(self, message: dict):
successful = message.get('successful', False)
advice = message.get('advice', {})
reconnect = advice.get('reconnect') == 'retry'
interval = advice.get('interval')
connect_successful = successful and (
not advice or (reconnect and interval >= 0)
)
if connect_successful:
await self.__send({
'channel': '/meta/connect',
'clientId': self._client_id,
'connectionType': 'websocket'
})
if not self._connected:
self._connected = True
await self.__resubscribe_existing_subscriptions()
elif self._client_id:
await self.__send_connect_message()
async def __resubscribe_existing_subscriptions(self):
for key, value in self._subscriptions.items():
if value.get('client_id') != self._client_id:
await self.__socket_subscribe(
key,
value['callback']
)
async def __disconnect(self, message):
await self.__send_handshake_message()
async def __register_subscription(self, message):
subscription = message.get('subscription')
if subscription is None:
raise ValueError('No subscription channel found on subscription message')
self._subscriptions[subscription]['client_id'] = self._client_id
self._subscribe_event.set()
async def __socket_message_handler(self):
message_action = {
'/meta/disconnect': self.__disconnect,
'/meta/handshake': self.__handshake,
'/meta/connect': self.__connect,
'/meta/subscribe': self.__register_subscription
}
async for message in self._socket:
message = json.loads(message)[0]
message_channel = message.get('channel')
error = message.get('error')
logger.info(f'Incoming message: {message}')
if error:
logger.error(error)
action = message_action.get(message_channel)
# Use user subscribed action
if action is None:
callback = self._subscriptions[message_channel]['callback']
callback(message)
else:
await action(message)
async def subscribe_to_id(
self,
channel: ChannelType,
id: str,
callback: Callable[[str, dict], Any],
wait_for_reply_timeout_seconds,
):
return await self.subscribe_to_ids(channel, [id], callback, wait_for_reply_timeout_seconds)
async def subscribe_to_ids(
self,
channel: ChannelType,
ids: Sequence[str],
callback: Callable[[str, dict], Any],
wait_for_reply_timeout_seconds
):
valid_channels_for_multiple_ids = [
ChannelType.ORDERS,
ChannelType.DEALS,
ChannelType.POSITIONS
]
if (
len(ids) > 1 and
channel not in valid_channels_for_multiple_ids
):
raise ValueError(f'Multiple ids is not supported for channels other than {valid_channels_for_multiple_ids}')
subscription_string = f'/{channel.value}/{",".join(ids)}'
await self.__socket_subscribe(subscription_string, callback, wait_for_reply_timeout_seconds)
``` |
{
"source": "JonasPC/BayesFactorModel",
"score": 3
} |
#### File: BayesFactorModel/model/gibbssampler.py
```python
class GibbsSampler(self, data, number_of_factors=3):
"""
Samples parameters of posterior given data and hyper parameters.
===
- Parameters -
data: Must be a df of dimension (N x p)
number_of_factors: must be a series
"""
import numpy as np
def __init__(self):
self.data = data
self.n = len(data) #num of observations
self.k = len(self.data.columns) #num of covariates
self.p = len(number_of_factors) #num of factors
def cov_matrix(self, type='identity'):
"""
Sets the cov_matrix to a fixed identity matrix or allows it to
be simulated.
"""
if type == 'identity':
self.cov = np.identity(self.n)
elif type == 'simulate':
self.cov = pd.DataFrame(np.ones((self.n, self.n)))
else:
raise Exception("type must be 'identity' or 'simulate'")
```
#### File: BayesFactorModel/model/simulatedata.py
```python
import numpy as np
import pandas as pd
class SimulateData(object):
"""Simulating data from some SCM
Parameters
==========
n_obs : (int)
number of observation
mean : (np.array)
array of mean for the variables
covariance : (np.matrix)
covariance matrix calculated with numpy
"""
def __init__(self, mean, covariance):
self.mu = mu
self.sigma = sigma
def cholesky_decomposition()
```
#### File: model/tests/test_calculatemoments.py
```python
import pandas as pd
import numpy as np
import pytest
from model.calculatemoments import CalculateMoments
@pytest.fixture
def setup_mean_matrix1():
x1 = [1 for i in range(1000)]
x2 = [2 for i in range(1000)]
x3 = [3 for i in range(1000)]
return pd.DataFrame({'x1': x1, 'x2': x2, 'x3': x3})
@pytest.fixture
def setup_cm1(setup_mean_matrix1):
""" calulate moments matrix 1"""
return CalculateMoments(setup_mean_matrix1)
def test_calc_mean(setup_cm1):
m1 = setup_cm1.mu()
m2 = np.matrix([[1., 2., 3.]])
np.testing.assert_array_equal(m1, m2)
def test_cov_shape(setup_cm1):
assert setup_cm1.cov().shape == (3, 3)
```
#### File: BayesFactorModel/model/utils.py
```python
import pandas as pd
def read_clean_kv17(drop_party_key=False):
"""Reads the cleaned data from kv17
Parameters
----------
drop_party_key : (bool)
If true the partyKey column is dropped from the returned DataFrame
Returns
-------
type : (object) pd.DataFrame
DataFrame with questions answered by politicians in the municipal elections 2017
"""
df = pd.read_csv('data//clean_kv17.csv', index_col=0)
if drop_party_key is True:
df.drop('partyKey', axis=1, inplace=True)
return df
``` |
{
"source": "JonasPed/lvmcache2mqtt",
"score": 2
} |
#### File: lvmcache2mqtt/lvmcache2mqtt/collect.py
```python
import json
import subprocess
class StatsCollector:
def collect(self, device):
result = subprocess.run(["lvs",
"--reportformat", "json",
"--noheadings",
"-o", "cache_read_hits,cache_read_misses,cache_write_hits,cache_write_misses,cache_dirty_blocks",
device],
capture_output=True,
check=True)
json_result = json.loads(result.stdout.decode('UTF-8'))
# TODO Do some checkong on the output.
return self._zero_when_blank(json_result['report'][0]['lv'][0])
def _zero_when_blank(self, json):
for item in json:
if json.get(item) is "":
json[item] = "0"
return json
``` |
{
"source": "JonasPf/SimpleMenus",
"score": 3
} |
#### File: simplemenus/test/test_main.py
```python
from __future__ import print_function
import unittest
import io
import datetime
import sys
import collections
import simplemenus.main
from simplemenus import *
def string_io_class():
if sys.version_info >= (3, 0):
return io.StringIO
else:
return io.BytesIO
class IOTestCase(unittest.TestCase):
def setUp(self):
sys.stdout = string_io_class()()
self.single_character_index = -1
def tearDown(self):
reset_config()
def mockInput(self, input):
sys.stdin = string_io_class()(input)
def mockSingleCharacterInput(self, input):
def mock_result():
self.single_character_index += 1
if len(input) > self.single_character_index:
return input[self.single_character_index]
else:
return input[-1:]
simplemenus.main._getch = mock_result
def assertOutput(self, expected):
self.assertEqual(expected, sys.stdout.getvalue())
class Test_get_string(IOTestCase):
def test_should_print_prompt(self):
self.mockInput('\n')
get_string('Enter text')
self.assertOutput("Enter text> ")
def test_should_return_input(self):
self.mockInput('123')
self.assertEqual("123", get_string())
def test_should_return_default_when_no_input_given(self):
self.mockInput('\n')
self.assertEqual("123", get_string(default='123'))
def test_should_return_empty_when_no_input_given_and_no_default_defined(self):
self.mockInput('\n')
self.assertIs('', get_string())
def test_wait_for_enter_should_print_prompt(self):
self.mockInput('\n')
wait_for_enter()
self.assertOutput("Press enter to continue> ")
class Test_get_character(IOTestCase):
def test_should_print_prompt(self):
self.mockSingleCharacterInput('x')
get_character('Enter character')
self.assertOutput("Enter character> x\n")
def test_should_print_result_and_newline(self):
self.mockSingleCharacterInput('x')
get_character('Enter character')
self.assertOutput("Enter character> x\n")
def test_should_return_input(self):
self.mockSingleCharacterInput('a')
self.assertEqual("a", get_character())
def test_should_return_default_when_return_given(self):
self.mockSingleCharacterInput('\r')
self.assertEqual("a", get_character(default='a'))
def test_should_return_return_when_no_default_configure(self):
self.mockSingleCharacterInput('\r')
self.assertEqual("\r", get_character())
class Test_get_boolean(IOTestCase):
def test_should_print_prompt(self):
self.mockSingleCharacterInput('y')
get_boolean('Answer yes or no')
self.assertOutput("Answer yes or no> y\n")
def test_should_return_true_when_yes(self):
self.mockSingleCharacterInput('y')
self.assertTrue(get_boolean())
def test_should_return_false_when_no(self):
self.mockSingleCharacterInput('n')
self.assertFalse(get_boolean())
def test_should_return_default_when_no_input_given(self):
self.mockSingleCharacterInput('\r')
self.assertTrue(get_boolean(default=True))
self.assertFalse(get_boolean(default=False))
def test_should_loop_until_valid_input(self):
# \r is invalid if no default defined
self.mockSingleCharacterInput("1\rn")
self.assertIs(False, get_boolean())
self.assertOutput("""> 1
Must be one of: ['y', 'n']
> \r
Must be one of: ['y', 'n']
> n
""")
class Test_get_integer(IOTestCase):
def test_should_print_prompt(self):
self.mockInput('123\n')
get_integer('Enter a number')
self.assertOutput("Enter a number> ")
def test_should_return_integer(self):
self.mockInput('123\n')
self.assertEqual(123, get_integer())
def test_should_return_default_when_no_input_given(self):
self.mockInput('\n')
self.assertEqual(123, get_integer(default=123))
def test_should_loop_until_valid_input(self):
self.mockInput('a\n\n123\n')
self.assertEqual(123, get_integer())
self.assertOutput("""> Not a number: a
> Not a number:
> """)
class Test_get_date(IOTestCase):
def test_should_print_prompt(self):
self.mockInput('15/12/2013\n')
get_date('Enter a date')
self.assertOutput("Enter a date> ")
def test_should_return_date(self):
self.mockInput('15/12/2013\n')
self.assertEqual(datetime.date(2013, 12, 15), get_date())
def test_should_return_default_when_no_input_given(self):
self.mockInput('\n')
self.assertEqual(datetime.date(2013, 12, 15), get_date(default=datetime.date(2013, 12, 15)))
def test_should_loop_until_valid_input(self):
self.mockInput('a\n33/12/2013\n\n15/12/2013\n')
self.assertEqual(datetime.date(2013, 12, 15), get_date())
self.assertOutput("""> Not a date: a
> Not a date: 33/12/2013
> Not a date:
> """)
def test_should_use_configured_date_format(self):
configure('date_format', '%d-%m-%y')
self.mockInput('15-12-13\n')
self.assertEqual(datetime.date(2013, 12, 15), get_date())
class Test_get_option(IOTestCase):
def setUp(self):
IOTestCase.setUp(self)
self.my_list = ["mouse", "elephant", "dog"]
self.my_single_list = ["m", "e", "d"]
def test_should_print_prompt(self):
self.mockInput('mouse\n')
get_option(self.my_list, "Choose an animal")
self.assertOutput("Choose an animal> ")
def test_should_return_selection_when_multiple_characters_are_entered(self):
self.mockInput('mouse\n')
self.assertEqual('mouse', get_option(self.my_list))
def test_should_return_selection_when_single_character_is_entered(self):
self.mockSingleCharacterInput('m')
self.assertEqual('m', get_option(self.my_single_list))
def test_should_force_return_when_configured(self):
configure('force_return', True)
self.mockInput('m\n')
self.assertEqual('m', get_option(self.my_single_list))
def test_should_return_default_when_no_input_given(self):
self.mockInput('\n')
self.assertEqual('mouse', get_option(self.my_list, default='mouse'))
def test_should_loop_until_valid_input(self):
self.mockInput('snake\n\nmouse\n')
self.assertEqual('mouse', get_option(self.my_list))
self.assertOutput("""> Must be one of: ['mouse', 'elephant', 'dog']
> Must be one of: ['mouse', 'elephant', 'dog']
> """)
class Test_get_from_list(IOTestCase):
def setUp(self):
IOTestCase.setUp(self)
self.my_list = ["mouse", "elephant", "dog"]
def test_should_print_list_and_prompt(self):
self.mockSingleCharacterInput('a')
get_from_list(self.my_list, "Choose an animal")
self.assertOutput("""a) mouse
b) elephant
c) dog
0) Cancel
Choose an animal> a
""")
def test_should_print_list_without_cancel_when_cancel_is_disabled(self):
self.mockSingleCharacterInput('a')
get_from_list(self.my_list, show_cancel=False)
self.assertOutput("""a) mouse
b) elephant
c) dog
> a
""")
def test_should_return_selection(self):
self.mockSingleCharacterInput('a')
self.assertEqual('mouse', get_from_list(self.my_list))
def test_should_return_none_when_canceled(self):
self.mockSingleCharacterInput('0')
self.assertIs(None, get_from_list(self.my_list))
def test_should_return_default_when_no_input_given(self):
self.mockSingleCharacterInput('\r')
self.assertEqual('mouse', get_from_list(self.my_list, default='mouse'))
def test_should_loop_until_valid_input(self):
self.mockSingleCharacterInput('d\rb')
self.assertEqual('elephant', get_from_list(self.my_list))
self.assertOutput("""a) mouse
b) elephant
c) dog
0) Cancel
> d
Must be one of: ['a', 'b', 'c', '0']
> \r
Must be one of: ['a', 'b', 'c', '0']
> b
""")
def test_should_not_accept_cancel_when_cancel_is_disabled(self):
self.mockSingleCharacterInput('0b')
self.assertEqual('elephant', get_from_list(self.my_list, show_cancel=False))
self.assertOutput("""a) mouse
b) elephant
c) dog
> 0
Must be one of: ['a', 'b', 'c']
> b
""")
def test_should_force_return_when_configured(self):
configure('force_return', True)
self.mockInput('a\n')
self.assertEqual('mouse', get_from_list(self.my_list))
def test_should_return_configure_empty_text(self):
configure('empty_text', 'Nothing')
self.mockSingleCharacterInput('0')
get_from_list([])
self.assertOutput("""Nothing
0) Cancel
> 0
""")
def test_should_show_configured_cancel(self):
configure('cancel_option', 'x')
configure('cancel_text', 'Exit')
self.mockSingleCharacterInput('x')
get_from_list(['a', 'b'])
self.assertOutput("""a) a
b) b
x) Exit
> x
""")
def test_should_show_configured_list_format(self):
configure('list_format', '{option} >>> {text}')
self.mockSingleCharacterInput('a')
get_from_list(['a', 'b'])
self.assertOutput("""a >>> a
b >>> b
0 >>> Cancel
> a
""")
class Test_get_from_dictionary(IOTestCase):
def setUp(self):
IOTestCase.setUp(self)
self.my_dict = collections.OrderedDict()
self.my_dict['Mickey Mouse'] = 'mouse'
self.my_dict['Dumbo'] = 'elephant'
self.my_dict['Lassie'] = 'dog'
def test_should_print_list_and_prompt(self):
self.mockSingleCharacterInput('a')
get_from_dictionary(self.my_dict)
self.assertOutput("""a) Mickey Mouse
b) Dumbo
c) Lassie
0) Cancel
> a
""")
def test_should_print_list_without_cancel_when_cancel_is_disabled(self):
self.mockSingleCharacterInput('a')
get_from_dictionary(self.my_dict, show_cancel=False)
self.assertOutput("""a) Mickey Mouse
b) Dumbo
c) Lassie
> a
""")
def test_should_return_selection(self):
self.mockSingleCharacterInput('a')
self.assertEqual('mouse', get_from_dictionary(self.my_dict))
def test_should_return_none_when_canceled(self):
self.mockSingleCharacterInput('0')
self.assertIs(None, get_from_dictionary(self.my_dict))
def test_should_loop_until_valid_input(self):
self.mockSingleCharacterInput('d\rb')
self.assertEqual('elephant', get_from_dictionary(self.my_dict))
self.assertOutput("""a) Mickey Mouse
b) Dumbo
c) Lassie
0) Cancel
> d
Must be one of: ['a', 'b', 'c', '0']
> \r
Must be one of: ['a', 'b', 'c', '0']
> b
""")
def test_should_not_accept_cancel_when_cancel_is_disabled(self):
self.mockSingleCharacterInput('0b')
self.assertEqual('elephant', get_from_dictionary(self.my_dict, show_cancel=False))
self.assertOutput("""a) Mickey Mouse
b) Dumbo
c) Lassie
> 0
Must be one of: ['a', 'b', 'c']
> b
""")
def test_should_force_return_when_configured(self):
configure('force_return', True)
self.mockInput('b\n')
self.assertEqual('elephant', get_from_dictionary(self.my_dict, show_cancel=False))
class Test_show_functions(IOTestCase):
def test_show_enumerated_list(self):
show_enumerated_list(['mouse', 'elephant', 'dog'])
self.assertOutput("""a) mouse
b) elephant
c) dog
""")
def test_show_enumerated_list_when_empty(self):
show_enumerated_list([])
self.assertOutput("No entries\n")
def test_show_headline(self):
show_headline("Hello World")
self.assertOutput("""
+-------------+
| Hello World |
+-------------+
""")
def test_show_small_headline(self):
show_small_headline("Hello World")
self.assertOutput("""+--- Hello World ---+
""")
class Test_start_menu(IOTestCase):
def test_start_menu(self):
def first():
print("first called")
def second():
print("second called")
self.menu = collections.OrderedDict()
self.menu['First Entry'] = first
self.menu['Second Entry'] = second
self.mockSingleCharacterInput('a0')
start_menu(self.menu, "Hello World")
self.assertOutput("""
+-------------+
| Hello World |
+-------------+
a) First Entry
b) Second Entry
0) Cancel
> a
first called
+-------------+
| Hello World |
+-------------+
a) First Entry
b) Second Entry
0) Cancel
> 0
""")
def test_start_menu(self):
def first():
print("first called")
def second():
print("second called")
self.menu = collections.OrderedDict()
self.menu['First Entry'] = first
self.menu['Second Entry'] = second
self.mockSingleCharacterInput('ab0')
start_menu(self.menu, "Hello World")
self.assertOutput("""
+-------------+
| Hello World |
+-------------+
a) First Entry
b) Second Entry
0) Cancel
> a
first called
+-------------+
| Hello World |
+-------------+
a) First Entry
b) Second Entry
0) Cancel
> b
second called
+-------------+
| Hello World |
+-------------+
a) First Entry
b) Second Entry
0) Cancel
> 0
""")
def test_start_menu_non_repeat(self):
def first():
print("first called")
def second():
print("second called")
self.menu = collections.OrderedDict()
self.menu['First Entry'] = first
self.menu['Second Entry'] = second
self.mockSingleCharacterInput('ab0')
start_menu(self.menu, "Hello World", repeat=False)
self.assertOutput("""
+-------------+
| Hello World |
+-------------+
a) First Entry
b) Second Entry
0) Cancel
> a
first called
""")
def test_start_menu_hide_cancel(self):
def first():
print("first called")
self.menu = collections.OrderedDict()
self.menu['First Entry'] = first
self.mockSingleCharacterInput('a')
start_menu(self.menu, "Hello World", repeat=False, show_cancel=False)
self.assertOutput("""
+-------------+
| Hello World |
+-------------+
a) First Entry
> a
first called
""")
def test_start_menu_with_arguments(self):
def first(firstname, surname):
print("first called with {} {}".format(firstname, surname))
self.menu = collections.OrderedDict()
self.menu['First Entry'] = first
self.mockSingleCharacterInput('a0')
start_menu(self.menu, "Hello World", args=('Mickey', 'Mouse'))
self.assertOutput("""
+-------------+
| Hello World |
+-------------+
a) First Entry
0) Cancel
> a
first called with Mickey Mouse
+-------------+
| Hello World |
+-------------+
a) First Entry
0) Cancel
> 0
""")
def test_start_menu_with_keyword_arguments(self):
def first(surname, firstname): # deliberately in the wrong order to test keyword args
print("first called with {} {}".format(firstname, surname))
self.menu = collections.OrderedDict()
self.menu['First Entry'] = first
self.mockSingleCharacterInput('a0')
start_menu(self.menu, "Hello World", kwargs={'firstname': 'Mickey', 'surname': 'Mouse'})
self.assertOutput("""
+-------------+
| Hello World |
+-------------+
a) First Entry
0) Cancel
> a
first called with Mickey Mouse
+-------------+
| Hello World |
+-------------+
a) First Entry
0) Cancel
> 0
""")
class Test_configure(IOTestCase):
"""
These are just the general configuration tests. EVerything specific to a particular function in the related test class
"""
def test_should_throw_exception_for_unknown_configuration(self):
self.assertRaises(Exception, configure, 'unknown', 'value')
def test_prompt(self):
configure('prompt', '-->')
self.mockInput('\n')
get_string()
self.assertOutput("-->")
``` |
{
"source": "JonasPf/testtools",
"score": 3
} |
#### File: JonasPf/testtools/capture.py
```python
import pythoncom, pyHook, sys, json, os, shutil, sys
from common import *
import threading
import datetime
import docopt
OUTPUT_FILE = 'data.json'
BMP_DIRECTORY = 'expected'
last_event = None
directory = None
last_event = None
running = True
events = []
screenshot_count = 0
screenshot_directory = None
def calc_time_diff():
global last_event
new_event = datetime.datetime.now()
diff = new_event - last_event
last_event = new_event
return diff.total_seconds()
def get_xy_in_window(mx, my):
wx, wy, ww, wh = get_active_window_dimensions()
return (mx - wx, my - wy)
def mouse_event(event):
mx, my = event.Position
x, y = get_xy_in_window(mx, my)
append_wait_event()
if event.MessageName == 'mouse move':
events.append({'event' : 'mousemove', 'x' : x, 'y' : y})
elif event.MessageName == 'mouse left up':
events.append({'event' : 'mouserelease', 'button' : 'left', 'x' : x, 'y' : y})
elif event.MessageName == 'mouse left down':
events.append({'event' : 'mousepress', 'button' : 'left', 'x' : x, 'y' : y})
elif event.MessageName == 'mouse right up':
events.append({'event' : 'mouserelease', 'button' : 'right', 'x' : x, 'y' : y})
elif event.MessageName == 'mouse right down':
events.append({'event' : 'mousepress', 'button' : 'right', 'x' : x, 'y' : y})
else:
raise Exception('Unknown event: {}'.format(event.MessageName))
return True
def screenshot():
global screenshot_count, screenshot_directory
print 'Save screen capture'
screenshot_count += 1;
filename = '{}.bmp'.format(screenshot_count)
capture_screen(screenshot_directory, filename)
return filename
def append_wait_event():
diff = calc_time_diff()
events.append({'event' : 'wait', 'seconds' : diff})
def keyboard_event(event):
global running
print event.GetKey()
if event.MessageName in ('key down', 'key sys down'):
if event.Key == 'F11':
running = False
elif event.Key == 'F12':
filename = screenshot()
append_wait_event()
events.append({'event' : 'screencap', 'file' : filename})
else:
append_wait_event()
events.append({'event' : 'keypress', 'character' : event.Key})
elif event.MessageName in ('key up', 'key sys up'):
if event.Key == 'F12':
pass # ignore F12 key up events
else:
append_wait_event()
events.append({'event' : 'keyrelease', 'character' : event.Key})
else:
raise Exception('Unknown event: {}'.format(event.MessageName))
return True
def write_events(events, directory):
filename = os.path.join(directory, OUTPUT_FILE)
print 'Write to {}'.format(filename)
with open(filename, 'w') as f:
json.dump(events, f, indent=4)
sys.exit(0)
def create_directory(directory):
if not os.path.exists(directory):
os.makedirs(directory)
else:
print 'Error: {} already exists'.format(directory)
sys.exit(1)
if __name__ == "__main__":
arguments = docopt.docopt(__doc__)
create_directory(arguments['<directory>'])
screenshot_directory = os.path.join(arguments['<directory>'], BMP_DIRECTORY)
countdown()
last_event = datetime.datetime.now()
hm = pyHook.HookManager()
hm.MouseAll = mouse_event
hm.KeyAll = keyboard_event
hm.HookMouse()
hm.HookKeyboard()
print 'Start capturing (Exit with F11, Screenshot with F12)'
while running:
pythoncom.PumpWaitingMessages()
print 'End capturing'
write_events(events, arguments['<directory>'])
```
#### File: JonasPf/testtools/replay.py
```python
import sys, json, time, datetime
import docopt
from common import *
import pyHook
INPUT_FILE = 'data.json'
def _convert_to_keycode(key):
if key.lower() in '<KEY>':
return ord(key)
else:
return pyHook.HookConstants.VKeyToID('VK_' + key.upper())
def get_xy_in_screen(mx, my):
wx, wy, ww, wh = get_active_window_dimensions()
return (mx + wx, my + wy)
def process(events, bmp_directory):
for record in events:
event = record['event']
print event
if event == 'wait':
time.sleep(record['seconds'])
elif event == 'keypress':
win32api.keybd_event(_convert_to_keycode(record['character']), 0, 0, 0)
elif event == 'keyrelease':
win32api.keybd_event(_convert_to_keycode(record['character']), 0, win32con.KEYEVENTF_KEYUP, 0)
elif event == 'screencap':
capture_screen(bmp_directory, record['file'])
elif event == 'mousemove':
x, y = get_xy_in_screen(record['x'], record['y'])
win32api.SetCursorPos((x, y))
elif event == 'mousepress':
x, y = get_xy_in_screen(record['x'], record['y'])
if record['button'] == 'left':
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,x,y)
elif record['button'] == 'right':
win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTDOWN,x,y)
else:
raise Exception('Unknown button: {}'.format(record['button']))
elif event == 'mouserelease':
x, y = get_xy_in_screen(record['x'], record['y'])
if record['button'] == 'left':
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,x,y)
elif record['button'] == 'right':
win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTUP,x,y)
else:
raise Exception('Unknown button: {}'.format(record['button']))
if __name__ == "__main__":
arguments = docopt.docopt(__doc__)
directory = arguments['<directory>']
with open(os.path.join(directory, INPUT_FILE), 'r') as f:
events = json.load(f)
# allow some time to switch windows
countdown()
process(events, os.path.join(directory, datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')))
``` |
{
"source": "jonaspleyer/PhysiCell-SimRunner",
"score": 3
} |
#### File: jonaspleyer/PhysiCell-SimRunner/main.py
```python
from src.SimController import Controller
from src.SamplerMethods import MonteCarlo_normal, Linear
if __name__ == "__main__":
# Project_folder relative to main.py
project_folder = "PhysiCellProject"
# xml_file relative to project_folder
xml_file = "config/PhysiCell_settings.xml"
# binary_name should be in project_folder
binary_name = "secretion_project"
# Initialize the controller class
Cont = Controller(project_folder, binary_name, xml_file, parallel_sims=3)
# Define a method to sample parameters
Cont.add_sampler_method("MonteCarlo_normal", MonteCarlo_normal, init_info={"N_params":4})
# This parameter is just for information purposes and does not change
Cont.add_variable_param(name="x_min", param_type=float, node_structure=["domain", "x_min"], info={"bound_low":-200.0, "bound_high":-20.0}, method_name="MonteCarlo_normal")
# This parameter will be changed for different simulations by the MoncteCarlo_normal method
Cont.add_static_param(name="x_max", param_type=float, node_structure=["domain", "x_max"])
# Now an example for correlated parameters
Cont.add_correlated_param("dx", int, ["domain", "dx"])
# Define the function by which the parameters should be correlated
# Note that the return statement is always a list
def calculate_dx(x_max, x_min):
N_voxels = 50
dx = (x_max-x_min)/N_voxels
return [int(round(dx,0))]
# Now correlate the parameters via the previously defined function
Cont.correlate_params(
params_static=["x_max"],
params_variable=["x_min"],
params_result=["dx"],
correlation_func=calculate_dx
)
# cont.generateOutputs()
Cont.run()
``` |
{
"source": "jonasprobst/hoergrete-rfid",
"score": 3
} |
#### File: jonasprobst/hoergrete-rfid/startup-sound.py
```python
from subprocess import call
from requests import get
from time import sleep
from timeit import default_timer as timer
start = timer()
def sleepOrExit(naptime=1):
lap = timer()
if lap - start >= 300:
# time is up: play game over sound and abandone ship.
call(["aplay", "-N", "-f", "cd", "/home/pi/hoergrete-rfid/gameover.wav"])
exit()
else:
sleep(naptime)
return
try:
# wait for mopidy service
while True: #wait for mopidy service
status = call(["systemctl", "is-active", "--quiet", "mopidy"])
if status == 0:
break
else:
sleepOrExit(1)
# Wait for iris webui
while True :
try:
resp = get("http://localhost:6680/iris")
if resp.status_code < 400:
call(["aplay", "-N", "-f", "cd", "/home/pi/hoergrete-rfid/fanfare.wav"])
break
else:
print("Error: iris is running but returned status code: ", resp.status_code)
exit()
except Exception as e:
# Iris isn't running yet, let's keep trying...
sleepOrExit(5)
except Exception as e:
print(e)
finally:
exit()
``` |
{
"source": "jonasprobst/wandering-pillar-cli",
"score": 3
} |
#### File: jonasprobst/wandering-pillar-cli/mode.py
```python
import argparse, json, requests, logging, sys, datetime
def setMode(url, mode):
try:
r = requests.put(url, json=mode)
except Exception as e:
logging.debug("POST-Error: {0}".format(e))
return False
return True
def main(argv):
#wandering pillar protocoll
CONST_MODE_MAN = 0
CONST_MODE_AUTO = 1
#setup logging
logging.basicConfig(filename="wapi.log", level=logging.DEBUG)
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
logging.info("\n\n>> MODE.PY - {0}".format(now))
#hack to disable security warning. CHeck the follwoing docs. i know it's not pretty...
#https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning
logging.captureWarnings(True)
parser = argparse.ArgumentParser(description="check the current mode (auto or manual) of a wandering pillar.", version="0.1")
parser.add_argument("id", type=int, help="device id")
parser.add_argument("-u", "--baseurl", default="https://wapi.firebaseio.com", dest="baseurl", help="firebase baseurl.")
parser.add_argument("-m", "--mode", dest="mode", choices=["auto", "manual"], help="set mode (post)" )
args = parser.parse_args()
url = args.baseurl + "/devices/{0}/mode.json".format(args.id)
if args.mode:
mode = str(args.mode)
setMode(url, mode)
else:
#send get requests
try:
r = requests.get(url)
if r.status_code == 200 and r.json() == "auto":
#set to auto mode
print CONST_MODE_AUTO
else:
#set to manual mode
print CONST_MODE_MAN
except Exception as e:
print CONST_MODE_MAN
logging.debug("GET-Error: {0}".format(e))
sys.exit(1)
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: jonasprobst/wandering-pillar-cli/step.py
```python
import argparse, json, requests, logging, sys, datetime
#wandering pillar protocoll
CONST_CMD_STOP = 0
CONST_CMD_FORWARD = 1
CONST_CMD_BACKWARD = 2
CONST_CMD_RIGHT = 3
CONST_CMD_LEFT = 4
CONST_CMD_RESTART = 5
def getStep(url):
try:
r = requests.get(url)
print r.status_code
print r.text
if r.status_code == 200:
program = r.json()
cmd = ""
if program['command'] == "stop":
cmd = CONST_CMD_STOP
elif program['command'] == "forward":
cmd = CONST_CMD_FORWARD
elif program['command'] == "backward":
cmd = CONST_CMD_BACKWARD
elif program['command'] == "right":
cmd = CONST_CMD_RIGHT
elif program['command'] == "left":
cmd = CONST_CMD_LEFT
else:
cmd = CONST_CMD_STOP
print str(cmd) + "," + str(program['duration']) + "," + str(program['gear'])
else:
#set to manual mode
print CONST_CMD_STOP
except Exception as e:
print CONST_CMD_STOP
logging.debug("GET-Error: {0}".format(e))
sys.exit(1)
def main(argv):
#setup logging
logging.basicConfig(filename="wapi.log", level=logging.DEBUG)
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
logging.info("\n\n>> NEXTSTEP.PY - {0}".format(now))
#hack to disable security warning. CHeck the follwoing docs. i know it's not pretty...
#https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning
logging.captureWarnings(True)
parser = argparse.ArgumentParser(description="check the current mode (auto or manual) of a wandering pillar.", version="0.1")
parser.add_argument("id", type=int, help="device id")
parser.add_argument("mode", choices=["a", "auto", "m", "manual"], help="current mode")
parser.add_argument("-u", "--baseurl", default="https://wapi.firebaseio.com", dest="baseurl", help="firebase baseurl.")
parser.add_argument("-s", "--step", type=int, help="step number")
args = parser.parse_args()
if args.mode == "auto" or args.mode == "a":
#get programm
url = args.baseurl + "/devices/{0}/program.json".format(args.id)
try:
r = requests.get(url)
except Exception as e:
#stop if anything is fishy
print CONST_CMD_STOP
logging.debug("POST-Error: {0}".format(e))
program = r.json()
if program:
#what's the next step??
if args.step:
#resuming
if args.step >= len(program):
#we've reached the end of the program
#loop back
print CONST_CMD_RESTART
else:
#get
url = args.baseurl + "/devices/{0}/program/{1}.json".format(args.id, args.step)
getStep(url)
else:
#starting
#get step 0
url = args.baseurl + "/devices/{0}/program/{1}.json".format(args.id, 0)
getStep(url)
else:
#there is no program
print CONST_CMD_STOP
else:
#manual mode
url = args.baseurl + "/devices/{0}/cmd.json".format(args.id)
try:
r = requests.get(url)
if r.status_code == 200:
cmd = r.json()
if cmd == "stop":
print CONST_CMD_STOP
elif cmd == "forward":
print CONST_CMD_FORWARD
elif cmd == "backward":
print CONST_CMD_BACKWARD
elif cmd == "right":
print CONST_CMD_RIGHT
elif cmd == "left":
print CONST_CMD_LEFT
else:
#set to manual mode
print CONST_CMD_STOP
except Exception as e:
print CONST_CMD_STOP
logging.debug("GET-Error: {0}".format(e))
sys.exit(1)
if __name__ == "__main__":
main(sys.argv[1:])
``` |
{
"source": "JonasProg/animated-couscous",
"score": 3
} |
#### File: JonasProg/animated-couscous/VC_database.py
```python
def rewriter():
#moden schleife
for y in range(1,19):
#q schleife
for z in range(-6, 7):
#state schleife
for a in range(1, 5):
template= open('C:\\Users\\Jonas\\Desktop\\Ergebnisse\\Database\\pyr_s0_3_000.log')
zmatrix= open('C:\\Users\\Jonas\\Documents\\Orca_Computations\\Moden_inputs\\Mode'+str(y)+'_inputs\\'+str(z)+'\\_zmatrix.dat')
energie= open('C:\\Users\\Jonas\\Desktop\\Ergebnisse\\Mode'+str(y)+'_dat\\Mode'+str(y)+'_Energien.csv')
liste=template.readlines()
zmatrix_liste=zmatrix.readlines()[1:-1]
ausgabe_liste=[]
energie_liste=energie.readlines()
for x in range(0, 6):
ausgabe_liste.append(liste[x])
for x in range(0, 8):
zmatrix_zeile=zmatrix_liste[x].split()
zeile1= liste[x+6][0:34]+leerzeichen_macher(zmatrix_zeile[0])+leerzeichen_macher(zmatrix_zeile[1])+leerzeichen_macher(zmatrix_zeile[2])+'\n'
ausgabe_liste.append(zeile1)
ausgabe_liste.append(liste[14])
ausgabe_liste.append(liste[15])
energie_zeile= energie_liste[-z+6].split()
energie_zeile[a] = energie_zeile[a].replace(';', '').replace(',', '.')
energie_zeile[a]= str(float(energie_zeile[a])/27.2113961317875)
zeile= liste[16][0:22]+energie_zeile[a]+liste[16][36:len(liste[16])]
ausgabe_liste.append(zeile)
ausgabe_liste.append(liste[17])
ausgabe_liste.append(liste[18])
open('C:\\Users\\Jonas\\Desktop\\Ergebnisse\\Database\\propynamide_Mode'+str(y)+'_'+str(a)+'_'+str(z)+'.log', "w", newline="").writelines(ausgabe_liste)
def leerzeichen_macher(eingabestring):
while len(eingabestring) < 12:
eingabestring=' '+ eingabestring
return eingabestring
rewriter()
```
#### File: JonasProg/animated-couscous/zmatrix.py
```python
import os
import shutil
def _zmatrix_maker():
for n in range(6,24):
Mode = open(
'C:\\Users\\Jonas\\Documents\\Orca_Computations\\propynamid_traj_alleModen\\Mode'+str(n)+'\\propyn_normal_mode.m' + str(n) + '.xyz')
liste = Mode.readlines()
liste2 = []
liste3 = []
liste4 = []
liste5 = []
zmatrix = []
outputdirectory= ('C:\\Users\\Jonas\\Documents\\Orca_Computations\\propynamid_traj_alleModen\\Mode'+ str(n))
for x in range(0,len(liste)):
if "*" in liste[x]:
dir1 = 'C:\\Users\\Jonas\\Documents\\Orca_Computations\\Input_Files'
liste2 = [liste[x+1],liste[x+2], liste[x+3], liste[x+4], liste[x+5], liste[x+6], liste[x+7], liste[x+8]]
if os.path.exists(outputdirectory+"\\"+liste[x][36:-6].strip()) == False:
os.mkdir(outputdirectory+"\\"+liste[x][36:-6].strip())
dir1 = ('C:\\Users\\Jonas\\Documents\\Orca_Computations\\Input_Files\\')
dir2 = (outputdirectory + "\\" + liste[x][36:-6].strip() + "\\")
for files in os.listdir(dir1):
shutil.copy2(dir1 + files, dir2 + files)
file = open("{0}\\{1}\\_zmatrix.dat".format(outputdirectory, liste[x][36:-6].strip()), "w", newline="")
file.write("geometry angstroms\n")
for y in range(0, len(liste2)):
liste3 = liste2[y].split()
liste4.append(liste3)
for z in liste4:
liste5 = [z[1],z[2],z[3], ladungszuordnung(z[0]),z[0]]
file.write(" ".join(liste5)+"\n")
zmatrix.append(liste5)
liste4.clear()
file.write("end\n")
zmatrix.clear()
def ladungszuordnung(symbol):
if symbol == "O":
return "8.0"
if symbol == "N":
return "7.0"
if symbol == "C":
return "6.0"
if symbol == "H":
return "1.0"
print(_zmatrix_maker())
``` |
{
"source": "Jonasprogramer/poo-python3",
"score": 3
} |
#### File: Jonasprogramer/poo-python3/4 - property.py
```python
class Produto:
def __init__(self, nome, preco):
self.nome = nome
self.preco = preco #valor
def desconto(self, percentual):
self.preco = self.preco - (self.preco * (percentual / 100))
#getter
@property
def preco(self):
return self._preco
#setter
@preco.setter
def preco(self, valor):
if isinstance(valor, str):
valor = float(valor.replace('R$', ''))
self._preco = valor
p1 = Produto('Camisa', 'R$50')
p1.desconto(10)
print(p1.preco)
```
#### File: Jonasprogramer/poo-python3/private.py
```python
class livro:
def __init__(self):
self.__pages = None
def set_pages(self, qto):
self.__pages = qto
def get_pages(self):
return self.__pages
def __contar_pagina(self):
print('estou sendo ultilizdo')
#escopo global
obj_livro = livro()
'''
#visibilidade public - tenho acesso para modificar e exibir
obj_livro.pages = 120
print(obj_livro.pages)
'''
'''
atributos em python não ficam realmente privados como em outras liguagens de programação
__ - mesmo usando isso para proteger, os arquivos podemos alterá-los.
'''
#visibilidade privado - não terá acesso nem exibição ou modificação
obj_livro.__pages = 120
print(obj_livro.__pages)
obj_livro.set_pages(150)
print(obj_livro.get_pages())
#obj_livro.__contar_pagina()
``` |
{
"source": "Jonas-Quinn/deliverance",
"score": 2
} |
#### File: deliverance/bazaar/decorators.py
```python
from django.core.exceptions import PermissionDenied
from django.utils import timezone
from .models import Item
from datetime import datetime
def active_auction(function):
def wrap(request, *args, **kwargs):
item = Item.objects.get(slug=kwargs['slug'])
if item.end_of_auction > timezone.now():
return function(request, *args, **kwargs)
else:
raise PermissionDenied
wrap.__doc__ = function.__doc__
wrap.__name__ = function.__name__
return wrap
```
#### File: deliverance/bazaar/forms.py
```python
from django import forms
from .models import Item_Image, Item, Bid
from django.contrib.auth.models import User
from django.forms import DateTimeField
from django.contrib.admin import widgets
from django.core.validators import ValidationError, MinValueValidator
from django.utils.safestring import mark_safe
from crispy_forms.helper import FormHelper
import datetime
import decimal
import math
class ItemCreateForm(forms.ModelForm):
class Meta:
model = Item
fields = (
'title',
'description',
'price',
'end_of_auction',
'condition',
'main_image',
)
widgets = {
'title': forms.TextInput(
attrs={'style': 'width: 100%; background: rgba(235, 255, 87, 0.6);', } ),
'description': forms.Textarea(
attrs={'style': 'width: 100%; background: rgba(235, 255, 87, 0.6);', } ),
'price': forms.NumberInput(
attrs={'style': 'width: 200px; background: rgba(235, 255, 87, 0.6);', } ),
'end_of_auction': forms.DateTimeInput(
attrs={'class': 'cosmicbutton','style': 'background: rgba(235, 255, 87, 0.6);',} ),
}
def __init__(self, *args, **kwargs):
super(ItemCreateForm, self ).__init__( *args, **kwargs )
self.fields['end_of_auction'].label = ""
class ItemCreateDatetimeForm(forms.ModelForm):
class Meta:
model = Item
fields = (
'end_of_auction',
)
class ItemImageForm(forms.ModelForm):
model = Item_Image
def __init__(self, *args, **kwargs):
super(ItemImageForm, self).__init__(*args, **kwargs)
if 'instance' in kwargs:
item_image = kwargs['instance']
# to add an extra field, add something like this
self.fields['extra_field'] = forms.CharField(max_length=30)
class Meta:
fields = ('image',)
class ItemEditForm(forms.ModelForm):
class Meta:
model = Item
fields = (
'title',
'description',
'main_image',
)
class BiddingForm(forms.ModelForm):
class Meta:
model = Bid
fields = (
'bid',
)
# here we pass old_price variable outside the form
def __init__(self, *args, **kwargs):
self.old_price = kwargs.pop('old_price')
super(BiddingForm, self).__init__(*args, **kwargs)
def clean(self):
cd = self.cleaned_data
if not ('bid' in cd.keys()):
raise forms.ValidationError("Please fill out missing field.")
new_price = cd['bid']
limit = round(
max(decimal.Decimal(0.01), math.ceil(self.old_price*decimal.Decimal(105))/100),2
)
if math.ceil(new_price*decimal.Decimal(100))/100 < limit:
raise ValidationError(mark_safe("The new price must be at least 5% higher then the previous one. <br />"
"The lowest possible bid: ${0}, your bid: ${1}. <br />"
" Draw conclusions.".format(limit, new_price)))
return cd
``` |
{
"source": "jonasrains/RaspPi-Projects",
"score": 3
} |
#### File: RaspPi-Projects/Sense HAT Projects/Sense HAT test.py
```python
import time
import math
from sense_emu import SenseHat
hat = SenseHat()
arrows = [False, False, False, False, False]
# colors
red = [255, 0, 0]
orange = [255, 127, 0]
yellow = [255, 255, 0]
lightGreen = [0, 255, 0]
green = [0, 150, 0]
blue = [0, 0, 255]
lightBlue = [0, 255, 255]
purple = [170, 0, 255]
pink = [255, 0, 255]
brown = [127, 64, 0]
white = [255, 255, 255]
black = [0, 0, 0]
# presets
def line(color, line, direction):
# draws a verticle or horizontal line
if direction == "horizontal":
for i in range(64):
if (line - 1) * 8 <= i < (line - 1) * 8 + 8:
pixels[i] = color
elif direction == "verticle":
for i in range(8):
for ii in range(1, 9):
if ii == line:
pixels[i * 8 + ii - 1] = color
def fill(color):
# use as pixels = fill()
return([color for i in range(64)])
def pixel(color, x, y):
for i in range(1, 9):
for ii in range(1, 9):
if i == y and ii == x:
pixels[(i - 1) * 8 + ii-1] = color
def getPixel(x, y):
for i in range(1, 9):
for ii in range(1, 9):
if i == y and ii == x:
return(pixels[(i - 1) * 8 + ii-1])
def testPixel(x, y, color):
if(getPixel(x, y) == color):
return(True)
else:
return(False)
def mergeColor(color1, color2):
return([round((color1[0] + color2[0]) / 2), round((color1[1] + color2[1]) / 2),
round((color1[2] + color2[2]) / 2)])
def drawNumber(color, x, y, number):
if number == 0:
drawZero(color, x, y)
elif number == 1:
drawOne(color, x, y)
elif number == 2:
drawTwo(color, x, y)
elif number == 3:
drawThree(color, x, y)
elif number == 4:
drawFour(color, x, y)
elif number == 5:
drawFive(color, x, y)
elif number == 6:
drawSix(color, x, y)
elif number == 7:
drawSeven(color, x, y)
elif number == 8:
drawEight(color, x, y)
elif number == 9:
drawNine(color, x, y)
else:
drawNumber(color, x, y, int(str(number)[0]))
drawNumber(color, x + 5, y, int(str(number)[1]))
def drawZero(color, x, y):
pixel(color, x, y - 2)
pixel(color, x - 1, y - 2)
pixel(color, x - 1, y - 1)
pixel(color, x - 1, y)
pixel(color, x - 1, y + 1)
pixel(color, x - 1, y + 2)
pixel(color, x, y + 2)
pixel(color, x + 1, y - 2)
pixel(color, x + 1, y - 1)
pixel(color, x + 1, y)
pixel(color, x + 1, y + 1)
pixel(color, x + 1, y + 2)
def drawOne(color, x, y):
pixel(color, x - 1, y + 2)
pixel(color, x, y + 2)
pixel(color, x + 1, y + 2)
pixel(color, x, y + 1)
pixel(color, x, y)
pixel(color, x, y - 1)
pixel(color, x, y - 2)
pixel(color, x - 1, y - 2)
def drawTwo(color, x, y):
pixel(color, x - 1, y - 2)
pixel(color, x, y - 2)
pixel(color, x + 1, y - 1)
pixel(color, x + 1, y)
pixel(color, x, y)
pixel(color, x - 1, y + 1)
pixel(color, x - 1, y + 2)
pixel(color, x, y + 2)
pixel(color, x + 1, y + 2)
def drawThree(color, x, y):
pixel(color, x - 1, y - 2)
pixel(color, x, y - 2)
pixel(color, x + 1, y - 1)
pixel(color, x + 1, y)
pixel(color, x, y)
pixel(color, x + 1, y + 1)
pixel(color, x - 1, y + 2)
pixel(color, x, y + 2)
def drawFour(color, x, y):
pixel(color, x - 1, y - 2)
pixel(color, x + 1, y - 2)
pixel(color, x - 1, y - 1)
pixel(color, x + 1, y - 1)
pixel(color, x - 1, y)
pixel(color, x, y)
pixel(color, x + 1, y)
pixel(color, x + 1, y + 1)
pixel(color, x + 1, y + 2)
def drawFive(color, x, y):
pixel(color, x - 1, y + 2)
pixel(color, x, y + 2)
pixel(color, x + 1, y + 1)
pixel(color, x - 1, y)
pixel(color, x, y)
pixel(color, x - 1, y - 1)
pixel(color, x + 1, y - 2)
pixel(color, x, y - 2)
pixel(color, x - 1, y - 2)
def drawSix(color, x, y):
pixel(color, x - 1, y + 2)
pixel(color, x, y + 2)
pixel(color, x + 1, y + 1)
pixel(color, x - 1, y)
pixel(color, x, y)
pixel(color, x - 1, y - 1)
pixel(color, x + 1, y - 2)
pixel(color, x, y - 2)
pixel(color, x - 1, y + 1)
pixel(color, x + 1, y + 2)
pixel(color, x + 1, y)
def drawSeven(color, x, y):
pixel(color, x - 1, y - 2)
pixel(color, x, y - 2)
pixel(color, x + 1, y - 2)
pixel(color, x + 1, y - 1)
pixel(color, x, y)
pixel(color, x - 1, y + 1)
pixel(color, x - 1, y + 2)
def drawEight(color, x, y):
pixel(color, x, y - 2)
pixel(color, x - 1, y - 1)
pixel(color, x + 1, y - 1)
pixel(color, x, y)
pixel(color, x - 1, y + 1)
pixel(color, x, y + 2)
pixel(color, x + 1, y + 1)
def drawNine(color, x, y):
pixel(color, x + 1, y - 2)
pixel(color, x, y - 2)
pixel(color, x - 1, y - 1)
pixel(color, x + 1, y)
pixel(color, x, y)
pixel(color, x + 1, y + 1)
pixel(color, x - 1, y + 2)
pixel(color, x, y + 2)
pixel(color, x + 1, y - 1)
pixel(color, x - 1, y - 2)
pixel(color, x - 1, y)
def drawDigNumber(color, x, y, number):
if number == 0:
numList = [True, True, True, False, True, True, True]
elif number == 1:
numList = [False, False, False, False, False, True, True]
elif number == 2:
numList = [False, True, True, True, True, True, False]
elif number == 3:
numList = [False, False, True, True, True, True, True]
elif number == 4:
numList = [True, False, False, True, False, True, True]
elif number == 5:
numList = [True, False, True, True, True, False, True]
elif number == 6:
numList = [True, True, True, True, True, False, True]
elif number == 7:
numList = [False, False, True, False, False, True, True]
elif number == 8:
numList = [True for i in range(7)]
elif number == 9:
numList = [True, False, True, True, True, True, True]
else:
numList = [False for i in range(7)]
drawDigNumber(color, x, y, int(str(number)[0]))
drawDigNumber(color, x + 4, y, int(str(number)[1]))
if numList[0]:
pixel(color, x - 1, y - 2)
pixel(color, x - 1, y - 1)
if numList[1]:
pixel(color, x - 1, y + 1)
pixel(color, x - 1, y +2)
if numList[2]:
pixel(color, x, y - 3)
pixel(color, x + 1, y - 3)
if numList[3]:
pixel(color, x, y)
pixel(color, x + 1, y)
if numList[4]:
pixel(color, x, y + 3)
pixel(color, x + 1, y + 3)
if numList[5]:
pixel(color, x + 2, y - 2)
pixel(color, x + 2, y - 1)
if numList[6]:
pixel(color, x + 2, y + 1)
pixel(color, x + 2, y + 2)
# code
pixels = fill(black)
while True:
for event in hat.stick.get_events():
if event.direction == "up":
if event.action == "held" or event.action == "pressed":
arrows[0] = True
else:
arrows[0] = False
if event.direction == "left":
if event.action == "held" or event.action == "pressed":
arrows[1] = True
else:
arrows[1] = False
if event.direction == "right":
if event.action == "held" or event.action == "pressed":
arrows[2] = True
else:
arrows[2] = False
if event.direction == "down":
if event.action == "held" or event.action == "pressed":
arrows[3] = True
else:
arrows[3] = False
if event.direction == "middle":
if event.action == "held" or event.action == "pressed":
arrows[4] = True
else:
arrows[4] = False
pixels = fill(lightBlue)
hat.set_pixels(pixels)
``` |
{
"source": "jonasrauber/clipping-aware-rescaling",
"score": 3
} |
#### File: jonasrauber/clipping-aware-rescaling/clipping_aware_rescaling_minimal_numpy.py
```python
import numpy as np
def l2_clipping_aware_rescaling_minimal_numpy(x, delta, eps):
"""Calculates eta such that
norm(clip(x + eta * delta, 0, 1) - x) == eps.
Args:
x: A 1-dimensional NumPy array.
delta: A 1-dimensional NumPy array.
eps: A non-negative float.
Returns:
eta: A non-negative float.
"""
delta2 = np.square(delta)
space = np.where(delta >= 0, 1 - x, x)
f2 = np.square(space) / delta2
ks = np.argsort(f2)
f2_sorted = f2[ks]
m = np.cumsum(delta2[ks[::-1]])[::-1]
dx = np.ediff1d(f2_sorted, to_begin=f2_sorted[0])
dy = m * dx
y = np.cumsum(dy)
j = np.flatnonzero(y >= eps**2)[0]
eta2 = f2_sorted[j] - (y[j] - eps**2) / m[j]
eta = np.sqrt(eta2).item()
return eta
if __name__ == "__main__":
# This is a minimal, self-contained NumPy implementation.
# For the full, generic EagerPy-based implementation with support for
# batches as well as custom data domain bounds, please see
# clipping_aware_rescaling.py
np.random.seed(22)
x = np.random.rand(784)
delta = np.random.randn(784)
eps = 3.6
print(f"target norm: {eps}")
eta = l2_clipping_aware_rescaling_minimal_numpy(x, delta, eps)
print(f"eta: {eta:.3f}")
diff = np.clip(x + eta * delta, 0, 1) - x
norm = np.linalg.norm(diff, axis=-1)
print(f"output norm: {norm}")
```
#### File: jonasrauber/clipping-aware-rescaling/test_rescaling.py
```python
import pytest
import eagerpy as ep
import numpy as np
import torch
import jax
import tensorflow as tf
from clipping_aware_rescaling import l2_clipping_aware_rescaling
from naive_rescaling import l2_naive_rescaling
def get_effective_norms(x, eta, delta, a, b):
x, eta, delta = ep.astensors(x, eta, delta)
xp = ep.clip(x + eta * delta, a, b)
diff = xp - x
N = diff.shape[0]
norms = ep.norms.l2(diff.reshape((N, -1)), axis=-1)
norms = norms.numpy()
return norms
def assert_clipping_aware_correct(x, eta, delta, a, b, eps):
norms = get_effective_norms(x, eta, delta, a, b)
print(f"clipping-aware rescaling norms:\n{norms}")
assert np.allclose(norms, eps)
def assert_naive_too_small(x, eta, delta, a, b, eps):
norms = get_effective_norms(x, eta, delta, a, b)
print(f"naive rescaling norms:\n{norms}")
assert not np.allclose(norms, eps)
assert np.all(np.logical_or(np.isclose(norms, eps), norms < eps))
def generate_numpy_data(n, shape, a, b):
np.random.seed(22)
x = np.random.rand(n, *shape) * (b - a) + a
delta = np.random.randn(n, *shape)
return x, delta
def to_numpy(x):
return x
def to_pytorch(x):
return torch.from_numpy(x)
def to_tensorflow(x):
return tf.convert_to_tensor(x)
def to_jax(x):
return jax.device_put(x)
@pytest.mark.parametrize("n", [1, 10])
@pytest.mark.parametrize("shape", [(784,), (28, 28)])
@pytest.mark.parametrize("a,b,eps", [(0, 1, 3.6), (100, 120, 22.0), (-5, 5, 11.0)])
# @pytest.mark.parametrize("convert", [to_numpy, to_pytorch, to_tensorflow, to_jax])
@pytest.mark.parametrize("convert", [to_numpy, to_pytorch, to_jax])
@pytest.mark.parametrize("eagerpy", [False, True])
def test_rescaling(n, shape, a, b, eps, convert, eagerpy):
x, delta = generate_numpy_data(n, shape, a, b)
x = convert(x)
delta = convert(delta)
if eagerpy:
x = ep.astensor(x)
delta = ep.astensor(delta)
eta = l2_naive_rescaling(delta, eps)
assert_naive_too_small(x, eta, delta, a, b, eps)
eta = l2_clipping_aware_rescaling(x, delta, eps, a, b)
assert_clipping_aware_correct(x, eta, delta, a, b, eps)
``` |
{
"source": "jonasrauber/plotspikes.py",
"score": 4
} |
#### File: jonasrauber/plotspikes.py/ugly.py
```python
from matplotlib import pyplot as plt
import numpy as np
def main():
"""Example how to use plotspikes"""
# create a figure and plot some random signal
plt.figure(0, figsize=(15,3))
np.random.seed(22)
xsignal = 20 * np.arange(100)
ysignal = 6. + np.cumsum(np.random.randn(100))
plt.plot(xsignal, ysignal, 'm', hold=True)
# create random spikes and plot them using plotspikes
spiketimes = 40 * np.arange(0, 50) + 40 * np.random.randn(50)
plt.plot(spiketimes, -2.5 * np.ones(len(spiketimes)), 'o')
# add labels and axis limits
plt.xlabel("time in ms")
# save the plot without axes
plt.axis('off')
plt.savefig("images/ugly.png", bbox_inches='tight')
plt.axis('on')
# show the complete plot
plt.show()
if __name__ == "__main__":
main()
``` |
{
"source": "jonasrdt/Wirtschaftsinformatik2",
"score": 3
} |
#### File: Code-Vorlesungen/VL-5/Einstieg-Funktion-3.py
```python
def trenner(anzahl_striche):
for i in range(anzahl_striche):
print("*", end="")
print()
def abschluss():
print("Ich bin fertig und das Programm wird beendet.")
def maximum_3_zahlen(zahl1, zahl2, zahl3):
if zahl1 >= zahl2: # Ist die erste Zahl, die wir als Parameter eingegeben haben, größer als die zweite Zahl?
maximum = zahl1 # Wenn das True ist, dann ist das maximum die erste Zahl
else:
maximum = zahl2 # Wenn die erste Zahl NICHT größer ist als die zweite, dann MUSS die zweite größer sein
if maximum < zahl3: # Wenn der Gewinner des ersten Vergleichs KLEINER ist als die dritte Zahl, MUSS die dritte Zahl das Maximum sein
maximum = zahl3
return maximum # return gibt nur den Wert über die Funktion zurück, der danach verarbeitet muss
trenner(75)
# Der Nutzer wird gebeten drei Zahlen einzugeben
erste_zahl = int(input("Bitte geben Sie die erste Zahl ein: "))
zweite_zahl = int(input("Bitte geben Sie die zweite Zahl ein: "))
dritte_zahl = int(input("Bitte geben Sie die dritte Zahl ein: "))
trenner(75)
# Aufrufen der Funktion zum Vergleichen der drei Nutzereingaben
print("Das ermittelte Maximum beträgt:", maximum_3_zahlen(erste_zahl, zweite_zahl, dritte_zahl))
```
#### File: Code-Vorlesungen/VL-5/Einstieg-Funktionen-2.py
```python
def trenner(anzahl_sterne):
for i in range(anzahl_sterne):
print("*", end="")
print()
# Diese Funktion berechnet die größte Zahl aus drei eingegebenen Zahlen
def berechnung_maximum(zahl1, zahl2, zahl3):
if zahl1 >= zahl2: # Ist die erste Zahl, die wir als Parameter eingegeben haben, größer als die zweite Zahl?
maximum = zahl1
else:
maximum = zahl2 # Wenn die erste Zahl NICHT größer ist als die zweite, dann MUSS die zweiter größer sein
if maximum < zahl3:
maximum = zahl3 # Wenn der Gewinner des ersten Vergleichs KLEINER ist als die dritte Zahl, muss die dritte Zahl das Maximum sein
return maximum # return gibt nur den Wert über die Funktion zurück, der dann noch verarbeitet werden muss
trenner(35)
print("Geben Sie drei Zahlen ein und es wird Ihnen die größte Zahl zurückgegeben.")
trenner(35)
erste_zahl = int(input("Bitte geben Sie die erste Zahl ein:"))
zweite_zahl = int(input("Bitte geben Sie die zweite Zahl ein:"))
dritte_zahl = int(input("Bitte geben Sie die dritte Zahl ein:"))
trenner(35)
# Aufrufen der Funktion
print("Die größte Zahl ist:", berechnung_maximum(erste_zahl, zweite_zahl, dritte_zahl))
```
#### File: Code-Vorlesungen/VL-5/Einstieg-Funktionen.py
```python
def trenner(anzahl_sterne):
for _ in range(anzahl_sterne):
print("*", end="")
print()
# Definieren (def) einer Funktion mit dem Namen "abschluss" die den Abschluss des Programms darstellt
def abschluss():
print("Ich bin fertig.")
# Definieren der Funktion
def maximum_3_zahlen(zahl1, zahl2, zahl3):
if zahl1 >= zahl2: # Ist die erste Zahl, die wir als Parameter eingeben haben, größer als die zweite Zahl?
maximum = zahl1 # Wenn das True ist, ist das maximum erstmal die erste Zahl
else:
maximum = zahl2 # Wenn die erste Zahl NICHT größer ist als die zweite, dann MUSS die zweite größer sein
if maximum < zahl3: # Wenn der Gewinner des ersten Vergleichs KLEINER ist als die dritte Zahl, MUSS die dritte Zahl das Maximum sein
maximum = zahl3
return maximum # return gibt nur den Wert über die Funktion zurück, der dann noch verarbeitet werden muss
trenner(10)
# Nutzer bitten drei Zahlen einzugeben
erste_zahl = int(input("Bitte geben Sie die erste Zahl ein:"))
zweite_zahl = int(input("Bitte geben Sie die zweite Zahl ein:"))
dritte_zahl = int(input("Bitte geben Sie die dritte Zahl ein:"))
trenner(10)
# Aufrufen der Funktion zum Vergleichen der drei Nutzereingaben
print("Das ermittelte Maximum beträgt:", maximum_3_zahlen(erste_zahl, zweite_zahl, dritte_zahl))
```
#### File: Code-Vorlesungen/VL-5/Mathematische-Brechnungen.py
```python
PI = 3.1415 # Globale Variable
def kreisumfang(radius):
kreisumfang = 2 * PI * radius
return kreisumfang
def zylinder(radius, hoehe):
return hoehe * kreisumfang(radius)
print(zylinder(50, 20))
```
#### File: Wiederholung/2/test.py
```python
def a():
count = 0
for z in (100,2,3):
count += 1
return count
print(a())
```
#### File: Code-Vorlesungen/Wiederholung/Wiederholung-Wortlaenge.py
```python
def anzahlwoerter(nutzereingabe):
nutzereingabe_splitted = nutzereingabe.split(" ")
return nutzereingabe_splitted
def wortlaenge(nutzereingabe, nutzereingabe_splitted):
durchschnittliche_wortlaenge = len(nutzereingabe) / len(nutzereingabe_splitted)
return durchschnittliche_wortlaenge
nutzereingabe = input("Bitte geben Sie einen Satz ein: ")
gesplittete_nutzereingabe = anzahlwoerter(nutzereingabe)
print("Die Anzahl der Wörter in Ihrem Satz ist:", len(gesplittete_nutzereingabe))
print("Die durchschnittliche Wortanzahl beträgt:", wortlaenge(nutzereingabe, gesplittete_nutzereingabe), "Zeichen.")
```
#### File: SoSe-21/Uebung-5/A3-Ausdauersport-Wiederholung.py
```python
def berecheneOptimalerPuls(alter):
optimaler_puls = int(165 - 0.75 * alter)
return optimaler_puls
# Grundsätzlich, gehen wir von einer falschen Eingabe aus
korrekte_eingabe = False
# Solange wie keine gültige Eingabe erfolgt ist, wird diese Schleife ausgeführt
while not korrekte_eingabe:
try:
alter_nutzer = int(input("Bitte geben Sie Ihr Alter ein: ")) # Versuchen eine Nutzereingabe zu erhalten
if (6 <= alter_nutzer <= 105): # Bedingung, dass das Alter größer sein muss als 6
korrekte_eingabe = True # Abbruchbedingung für die while-Schleife
else:
print("Bitte geben Sie ein Alter zwischen 6 und 105 Jahren ein.")
except:
print("Bitte geben Sie ein gültiges Alter in Form einer Zahl ein: ") # Zu neuer Eingabe auffordern, wenn Eingabe ungültig war
# Ausgabe der berechneten Werte
print("Bei einem Alter von", alter_nutzer, "beträgt der optimale Puls für Ausdauersportarten:", berecheneOptimalerPuls(alter_nutzer))
```
#### File: SoSe-21/Uebung-5/Zufallsgenerator-Spiel-mit-Funktionen.py
```python
import random
def berechnung(zahl1, zahl2, operator):
if operator == "+":
ergebnis = zahl1 + zahl2
elif operator == "-":
ergebnis = zahl1 - zahl2
elif operator == "*":
ergebnis = zahl1 * zahl2
elif operator == "/":
ergebnis = zahl1 / zahl2
else:
print("Der von Ihnen eingegebene Operator", operator, "ist nicht gültig.")
print(zahl1, operator, zahl2)
return ergebnis
weiterspielen = True
while weiterspielen:
# Belegen der Variablen mit randomisierten Zahlen
erste_zahl = random.randint(1,10)
zweite_zahl = random.randint(1,10)
wrong_operator = True
while wrong_operator:
operator = input("Was wollen Sie rechnen *, -, +, / : ")
if operator == "*" or operator == "-" or operator == "/" or operator == "+":
wrong_operator = False
else:
print("Das war leider kein gültiger Operator. Bitte geben Sie nur + - * / ein.")
ergebnis = berechnung(erste_zahl, zweite_zahl, operator)
for zaehler in range(3):
# Nutzer zur Eingabe einer Lösung auffordern
nutzer_loesung = int(input("Bitte geben Sie die Lösung ein: "))
if nutzer_loesung == ergebnis:
print("Glückwunsch", nutzer_loesung, "ist richtig.")
break
else:
print("Schade, das war leider nicht korrekt.")
if zaehler == 2:
print("Die richtige Lösung wäre gewesen:", ergebnis)
# Fragen, ob der Nutzer weiterspielen will
nutzer_nachfrage = input("Wollen Sie weiterspielen (ja/nein): ")
# Wenn nicht, dann wird "weiterspielen" auf False gesetzt
if nutzer_nachfrage.lower() == "nein":
weiterspielen = False
# Andernfalls, geht es weiter!
else:
print("Weiter geht die wilde Fahrt.")
```
#### File: SoSe-21/Uebung-7/A2-Produktionsliste.py
```python
stueckliste = []
def teilhinzufuegen(bestandteil):
stueckliste.append(bestandteil)
def trenner(anzahl):
for i in range(anzahl):
print("*", end="")
print()
trenner(125)
print("Programm zur Erfassung der Stückliste für das SpaceX Starship")
korrekte_eingabe = False
while not korrekte_eingabe:
try:
menge = int(input("Wie viele Teile wollen Sie der Stückliste hinzufügen? "))
if menge <= 0:
print("Bitte geben Sie eine ganze Zahl größer 0 ein.")
else:
korrekte_eingabe = True
except:
print("Bitte geben Sie eine ganze Zahl größer 0 ein.")
trenner(125)
for i in range (1, menge+1):
print("Sie wollen", menge, "Teile erfassen.")
print ("Bitte geben Sie das", i ,"Teil der Stückliste an:")
bestandteil = input()
teilhinzufuegen(bestandteil)
print("Hier sind die Teile Ihrer Stückliste:", stueckliste)
```
#### File: WiSe-2122/Sonderaufgaben/pizzaparty.py
```python
Pi = 3.14159 # Globale Variable Pi zur Berechnung des Umfangs und der Fläche
# Definition der Funktion zur Berechnung des Kreisumfangs in Zentimetern einer Pizza auf Basis des Radius
def pizzaumfang(Radius):
# Berechnung des Kreisumfangs und Runden auf 2 Nachkommastellen und Zuweisen des Ergebnisses zur lokalen Variable pizzaumfang
pizzaumfang = round(2 * Pi * Radius, 2)
# Verfügbarmachen des Wertes für den pizzaumfang aus dem lokalen Kontext in den globalen
return pizzaumfang
# Berechnung der Grundfläche einer Pizza in Quadratzentimetern, basierend auf dem Radius und der Anzahl der Pizzen
def pizzaflaeche(Radius, Anzahl):
# Berechnung der Grundfläche und Runden auf 2 Nachkommastellen und Zuweisen des Ergebnisses zur lokalen Variable pizzaflaeche
pizzaflaeche = round(Pi * (Radius * Radius) * Anzahl, 2)
# Verfügbarmachen des Wertes für die Grundfläche aus dem lokalen Kontext in den globalen
return pizzaflaeche
# Berechnung des Preises pro Quadratzentimeter basierend auf dem Preis und der Pizzafläche
def quadratzentimeter_berechnung(Preis, Pizzaflaeche):
# Zuweisen des Ergebnisses zur lokalen Variable preisproqz
preisproqz = round(Preis / (Pizzaflaeche), 5)
# Verfügbarmachen des Wertes für die Grundfläche aus dem lokalen Kontext in den globalen
return preisproqz
# Setzen einer Variable zum Umsetzen der Abbruchmethode der while-Schleife
ungueltige_eingabe = True
# Ausführen der while-Schleife für die Abfrage der Anzahl, des Durchmessers sowie Preises
while ungueltige_eingabe:
# Technische Prüfung, ob der User auch wirklich int/float eingegeben hat
try:
# Abfragen von Anzahl, Durchmesser, Preis und zuweisen zu gleichnamigen globalen Variablen
anzahl = int(input("Wie viele Pizzen sollen berechnet werden: "))
radius = float(input("Welchen Radius hat eine Pizza in cm: "))
preis = float(input(("Wie viel kostet das Angebot insgesamt: ")))
ungueltige_eingabe = False
# Abfangen des Fehlers und Ausgabe eines Fehlerhinweises
except:
print("Bitte geben Sie für die Anzahl nur ganze Zahlen und für Durchmesser und Preis nur Kommazahlen ein.")
# Mehrfachausgabe mit Aufruf der Funktion pizzaumfang() mitsamt des Radius und Ausgabe des Ergebnisses
print("Ihr Kreisumfang der Pizza beträgt:", pizzaumfang(radius), "Zentimeter.")
# Mehrfachausgabe mit Aufruf der Funktion pizzaflaeche() mitsamt des Radius und der Anzahl und Ausgabe des Ergebnisses
print("Die Fläche der Pizza beträgt", pizzaflaeche(radius, anzahl), "Quadratzentimeter.")
# Mehrfachausgabe mit Aufruf der Funktion quadratzentimeter_berechnung() mitsamt des Preises
# und dem Ergebnis der Funktion pizzaflaeche() mit den Parametern für die Größe und Anzahl
print("Ein Quadratzentimeter Pizza kostet", quadratzentimeter_berechnung(preis, pizzaflaeche(radius, anzahl)), "€.")
```
#### File: Uebung-11/Gruppe-B/U11-A2.py
```python
import random
### Funktionsdefinition
# Diese Funktion erzeugt schöne Trennstriche
def trenner(anzahl):
for zaehler in range(anzahl):
print("-", end="")
print()
# Diese Funktion prüft, ob der vom Nutzer geratene Buchstabe mit einem der Buchstaben aus dem Wort übereinstimmt
def buchstaben_pruefung():
for char in range(len(ausgewaehltes_wort)):
if geratener_buchstabe.upper() == ausgewaehltes_wort[char].upper():
gesuchtes_wort.insert(char, geratener_buchstabe)
gesuchtes_wort.pop(char+1)
# Diese Funktion prüft, ob noch freie und zu besetzende Stellen in der Liste des gesuchten Wortes sind
def successor():
if "_" not in gesuchtes_wort:
return True
# Variablendefinition
woerter = ["Tee", "Wasser", "Fachhochschule", "Betriebswirtschaftslehre", "Python", "Weihnachten", "Silvester"]
gesuchtes_wort = []
leben = 6
# Erstellt eine zufällige Zahl, um zufällig ein Wort aus der o.g. Liste woerter zu entnehmen
zufaellige_zahl = random.randint(0, (len(woerter)-1))
ausgewaehltes_wort = woerter[zufaellige_zahl]
# Hauptprogramm
trenner(50)
print("Willkommen beim Hangman-Spiel - Dem Wortratespiel")
print("Ihnen wird jetzt zufällig ein Wort zugewiesen.")
print("Sie müssen durch die Eingabe einzelner Buchstaben das Wort Stück für Stück erraten.")
# Schwierigkeitsgrad bestimmen
falsche_eingabe = True
while falsche_eingabe:
# Technische Fehlerabfrage mit try,except
try:
spielmodus = int(input("Welchen Spielmodus möchten Sie wählen: (1) schwierig mit nur 3 Leben (2) mittel mit 6 Leben und (3) für leicht mit 12 Leben: "))
if spielmodus == 1:
leben = 3
falsche_eingabe = False
elif spielmodus == 2:
leben = 6
falsche_eingabe = False
elif spielmodus == 3:
leben = 12
falsche_eingabe = False
else:
print("Deine Eingabe war leider ungültig, du startest standardmäßig mit: ", leben, "Leben.")
falsche_eingabe = False
except:
print("Bitte geben Sie nur ganze Zahlen zwischen 1 und 3 ein.")
trenner(50)
print("Zu Beginn hast du", leben, "Leben.")
trenner(50)
# Ausgeben der Länge des Wortes
print("Dein Wort hat", len(ausgewaehltes_wort),"Zeichen.")
# Initialisieren der Liste in der Länge des gesuchten Wortes
for zaehler in range(len(ausgewaehltes_wort)):
gesuchtes_wort.append("_")
trenner(50)
# Die folgende Schleife läuft solange durch, wie der Nutzer entweder Leben
# oder aber bis er das Wort komplett richtig eraten hat.
while leben > 0:
geratener_buchstabe = input("Rate einen Buchstaben: ")
buchstaben_pruefung()
print(gesuchtes_wort)
# Prüft, ob der geratene Buchstabe überhaupt in dem Wort vorkommt
if geratener_buchstabe.upper() in ausgewaehltes_wort.upper():
# Prüft, ob noch freie Stellen zu besetzen sind, oder das Wort damit abschließend erraten wurde
if successor() == True:
print("Herzlichen Glückwünsch, du hast gewonnen!")
break
else:
leben -= 1
print("Schade, das war leider nicht richtig. Du hast noch", leben, "Leben.")
trenner(50)
```
#### File: Uebung-11/Gruppe-C/U11-A1.py
```python
def trenner(anzahl_striche):
for i in range(anzahl_striche):
print("-", end="")
print()
def fehler():
print("\nFehler: Bitte geben Sie nur Zahlen an, die zur Auswahl stehen.")
def formular():
global vorname, nachname, geburtsort
vorname = input("> Vorname: ")
nachname = input("> Nachname: ")
geburtsort = input("> Geburtsort: ")
def suche():
global index
suche = input("Suchbegriff (Nachname eingeben): ")
index = next((i for i, item in enumerate(ma_kartei) if item["Nachname"] == suche), None)
def eintrag_neu():
print("\nBitte fügen Sie einen neuen Eintrag zur Mitarbeiter-Kartei hinzu: ")
formular()
gueltige_eingabe = False
while not gueltige_eingabe:
try:
auswahl = int(input("\n(1) Speichern (2) Abbrechen\n"))
if auswahl == 1:
gueltige_eingabe = True
eintrag = {"Vorname": vorname,"Nachname": nachname,"Geburtsort": geburtsort}
ma_kartei.append(eintrag)
print("Ihr Eintrag wurde gespeichert und der Kartei hinzugefügt.")
trenner(80)
elif auswahl == 2:
gueltige_eingabe = True
except:
fehler()
def eintrag_bearbeiten():
print("Welchen Eintrag möchten Sie bearbeiten?")
suche()
print("\nBitte überschreiben Sie den alten Eintrag:")
formular()
ma_kartei[index] = {"Vorname": vorname,"Nachname": nachname,"Geburtsort": geburtsort}
print("Ihr Eintrag wurde gespeichert und der Kartei hinzugefügt.")
trenner(80)
def eintrag_loeschen():
print("Welchen Eintrag möchten Sie löschen?")
suche()
print("\nFolgender Eintrag wurde gelöscht:")
print(ma_kartei[index])
ma_kartei.pop(index)
# Programmablauf
print("\n")
trenner(120)
print("Mitarbeiter-Kartei")
trenner(120)
trenner(120)
ma_kartei = []
programm = True
while programm:
print("Was möchten Sie tun?")
gueltige_eingabe = False
while not gueltige_eingabe:
try:
auswahl = int(input("\n(1) Eintrag hinzufügen\n(2) Eintrag bearbeiten\n(3) Eintrag löschen\n(4) Kartei anzeigen\n"))
if auswahl == 1:
gueltige_eingabe = True
eintrag_neu()
elif auswahl == 2:
gueltige_eingabe = True
eintrag_bearbeiten()
elif auswahl == 3:
gueltige_eingabe = True
eintrag_loeschen()
elif auswahl == 4:
gueltige_eingabe = True
print(ma_kartei)
trenner(80)
except:
fehler()
```
#### File: Uebung-11/Gruppe-C/U11-A2.py
```python
import random
def trenner(anzahl):
for i in range(anzahl):
print("-", end="")
print()
def successor():
if "_" in gesuchtes_wort:
return False
else:
return True
def spielmodus():
falsche_eingabe = True
while falsche_eingabe:
try:
modus = int(input("Wählen Sie eine Schwierigkeitsstufe: (1) Schwer - 3 Versuche (2) Mittel - 6 Versuchen (3) Einfach - 9 Versuchen: "))
if modus == 1:
falsche_eingabe = False
return 3
elif modus == 2:
falsche_eingabe = False
return 6
elif modus == 3:
falsche_eingabe = False
return 9
else:
print("Ihre Schwierigkeit wird standardmäßig auf Mittel gesetzt.")
falsche_eingabe = False
return 6
except:
print("Bitte geben Sie nur ganze Zahlen ein.")
# Variablendefinition
words = ["Jessica", "Jonas", "Maike", "Milan", "Damla", "Merda", "Andre", "Sarangoo"]
# Alternative Lösung:
# print(random.choice(words))
zufaelliger_wert = random.randint(0,len(words)-1)
zufaelliges_wort = words[zufaelliger_wert]
gesuchtes_wort = []
versuche = 0
# Hauptprogramm
trenner(50)
print("Willkommen bei Hangman - Dem Wortratespiel")
print("Im Folgenden müssen Sie ein Wort erraten.")
versuche = spielmodus()
print("Dafür haben Sie",versuche,"Versuche.")
trenner(50)
print("Das von Ihnen zu erratene Wort hat", len(zufaelliges_wort), "Zeichen.")
for element in range(len(zufaelliges_wort)):
gesuchtes_wort.append("_")
print(gesuchtes_wort)
while not successor() and versuche > 0:
buchstabe = input("Bitte raten Sie einen Buchstaben: ")
if buchstabe.upper() in zufaelliges_wort.upper():
for char in range(len(zufaelliges_wort)):
if buchstabe.upper() == zufaelliges_wort[char].upper():
# Einfügen des Buchstaben am korrekten Index
gesuchtes_wort.insert(char, buchstabe)
# Entfernen des aufgeschobenen Platzhalters
gesuchtes_wort.pop(char+1)
else:
versuche -= 1
print("Schade, das war nicht richtig. Du hast noch", versuche,"Versuche.")
print(gesuchtes_wort)
if successor():
print("Herzlichen Glückwunsch. Sie haben das Wort", zufaelliges_wort, "erraten.")
if versuche == 0:
print("DUUUU HAST VERLOREN!")
print("Das richtige Wort wäre gewesen:", zufaelliges_wort)
```
#### File: Wiederholung/Gruppe-A/funktion-zahleneingabe.py
```python
def zahleneingabe(text):
falsche_eingabe = True
while falsche_eingabe:
try:
zahl = int(input(text))
falsche_eingabe = False
except:
print("Bitte geben Sie nur ganze Zahlen ein.")
return zahl
alter = zahleneingabe("Bitte geben Sie Ihr Alter ein: ")
print("Alter ist:", alter)
gewicht = zahleneingabe("Bitte geben Sie Ihr Gewicht als Ganzzahl ein: ")
print("Gewicht ist:", gewicht)
menge = zahleneingabe("Bitte geben Sie die Menge ein: ")
print("Menge ist:", menge)
```
#### File: Wiederholung/Gruppe-C/funktionen-global-lokal.py
```python
def addition(a,b):
result = a+b
result_pot = result**2
return result, result_pot
# Funktionsaufruf
print(addition(3,4))
result = 530
``` |
{
"source": "JonasReichhardt/qCharta",
"score": 2
} |
#### File: qCharta/src/qCharta.py
```python
from qiskit.transpiler.basepasses import TransformationPass
from qiskit.transpiler import Layout
from qiskit import QuantumRegister
import random
from sabre import Sabre
from coupling import distance_dict
class qCharta(TransformationPass):
def __init__(self,
coupling_map, seed, layout_option = 'trivial'):
super().__init__()
self.coupling_map = coupling_map
self.seed = seed
self.initial_mapping: Layout
random.seed(seed)
self.layout_option = layout_option
def create_random_layout(self,dag):
nr_qbits = len(self.coupling_map.physical_qubits)
layout_arr = list(range(0,nr_qbits))
random.shuffle(layout_arr)
layout = Layout.from_intlist(layout_arr,*dag.qregs.values())
return layout
def create_heuristic_layout(self,dag):
analysis = self.gate_analysis(dag)
# place most used node in the center
layout_dict = dict.fromkeys(range(0,len(self.coupling_map.physical_qubits)))
layout_dict[31] = analysis[0][0]
distance = 1
position = 0
for qbit in dag.qubits:
candidates = distance_dict[distance]
if layout_dict[candidates[position]] is None:
layout_dict[candidates[position]] = qbit
position = position+1
if len(candidates) == position:
position = 0
distance = distance+1
if len(distance_dict.items()) == distance:
break
return Layout(layout_dict)
def gate_analysis(self, dag):
# analyse the circuit to indentify the most used logical qbit
analysis = {}
for gate in dag.two_qubit_ops():
qbit1 = gate.qargs[0]
qbit2 = gate.qargs[1]
try:
analysis[qbit1] = analysis[qbit1]+1
except KeyError:
analysis[qbit1] = 1
try:
analysis[qbit2] = analysis[qbit2]+1
except KeyError:
analysis[qbit2] = 1
# sort qbits by usage in 2 qbit operations
sorted_logical_qbits = sorted(analysis.items(), key=lambda x: x[1],reverse=True)
return sorted_logical_qbits
# not sure if this function will be needed
def hotspot_anaysis(self, dag, analysis):
hot_qbit = max(analysis, key=analysis.get)
hot_gates = {}
for gate in dag.two_qubit_ops():
if(gate.qargs[0].index == hot_qbit):
try:
hot_gates[gate.qargs[1].index] = hot_gates[gate.qargs[1].index]+1
except KeyError:
hot_gates[gate.qargs[1].index] = 1
if(gate.qargs[1].index == hot_qbit):
try:
hot_gates[gate.qargs[0].index] = hot_gates[gate.qargs[0].index]+1
except KeyError:
hot_gates[gate.qargs[0].index] = 1
return hot_qbit, hot_gates
def run(self, dag):
# filll up a "reserve" register
reg = QuantumRegister(len(self.coupling_map.physical_qubits) - len(dag.qubits), 'r')
dag.add_qreg(reg)
if self.layout_option == 'trivial':
init_layout = Layout.generate_trivial_layout(*dag.qregs.values())
elif self.layout_option == 'random':
init_layout = self.create_random_layout(dag)
elif self.layout_option == 'heuristic':
init_layout = self.create_random_layout(dag)
init_layout = self.create_heuristic_layout(dag)
self.initial_layout = init_layout.copy()
sabre = Sabre(self.coupling_map)
return sabre.sabre_swap(dag.front_layer(), init_layout, dag, self.coupling_map)[0]
``` |
{
"source": "JonasReubelt/PeeEmTee",
"score": 3
} |
#### File: PeeEmTee/peeemtee/tools.py
```python
import os
import numpy as np
from scipy.optimize import curve_fit
from numba import njit
import h5py
import codecs
import datetime
import pytz.reference
from sklearn.neighbors import KernelDensity
TIMEZONE = pytz.reference.LocalTimezone()
def gaussian(x, mean, sigma, A):
return (
A
/ np.sqrt(2 * np.pi)
/ sigma
* np.exp(-0.5 * (x - mean) ** 2 / sigma ** 2)
)
def gaussian_with_offset(x, mean, sigma, A, offset):
return (
A
/ np.sqrt(2 * np.pi)
/ sigma
* np.exp(-0.5 * (x - mean) ** 2 / sigma ** 2)
+ offset
)
def calculate_charges(
waveforms, ped_min, ped_max, sig_min, sig_max, method="sum"
):
"""
Calculates the charges of an array of waveforms
Parameters
----------
waveforms: np.array
2D numpy array with one waveform in each row
[[waveform1],
[waveform2],
...]
ped_min: int
minimum of window for pedestal integration
ped_max: int
maximum of window for pedestal integration
sig_min: int
minimum of window for signal integration
sig_max: int
maximum of window for signal integration
method: string
method used for "integration"
"sum" -> np.sum
"trapz" -> np.trapz
Returns
-------
charges: np.array
1D array with charges matching axis 0 of the waveforms array
"""
sig_ped_ratio = (sig_max - sig_min) / (ped_max - ped_min)
if method == "sum":
func = np.sum
elif method == "trapz":
func = np.trapz
else:
print("unknown method. try sum or trapz!")
return None
pedestals = func(waveforms[:, ped_min:ped_max], axis=1)
signals = func(waveforms[:, sig_min:sig_max], axis=1)
charges = -(signals - pedestals * sig_ped_ratio)
return charges
def calculate_transit_times(
signals, baseline_min, baseline_max, threshold, polarity="negative"
):
"""
Calculates transit times of signals
Parameters
----------
signals: np.array
2D numpy array with one signal waveform in each row
[[signal1],
[signal2],
...]
baseline_min: int
minimum of baseline calculation window
baseline_max: int
maximum of baseline calculation window
threshold: float
transit time is calculated when signal crosses threshold
polarity: str
'positive' if PMT signals have positive polarity,
'negative' if PMT signals have negative polarity
Returns
-------
charges: np.array
1D array with transit times matching axis 0 of the signals array
"""
zeroed_signals = (
signals.T - np.mean(signals[:, baseline_min:baseline_max], axis=1)
).T
if polarity == "negative":
transit_times = np.argmax(zeroed_signals < threshold, axis=1)
elif polarity == "positive":
transit_times = np.argmax(zeroed_signals > threshold, axis=1)
else:
print("polarity has to be 'positive' or 'negative'")
return None
return transit_times[transit_times != 0]
def bin_data(data, bins=10, range=None, density=False):
"""
Calculates values and bin centres of a histogram of a set of data
Parameters
----------
data: list or np.array
1D array of input data
bins: int
number of bins of the histogram
range: tuple(int)
lower and upper range of the bins
normed: boolean
set to True to norm the histogram data
Returns
-------
x: np.array
bin centres of the histogram
y: np.array
values of the histogram
"""
y, x = np.histogram(data, bins=bins, range=range, density=density)
x = x[:-1]
x = x + (x[1] - x[0]) / 2
return x, y
def calculate_persist_data(waveforms, bins=(10, 10), range=None):
"""
Calculates 2D histogram data like persistence mode on oscilloscope
Parameters
----------
waveforms: np.array
2D numpy array with one waveform in each row
[[waveform1],
[waveform2],
...]
bins: tuple(int)
number of bins in both directions
range: tuple(tuple(int))
lower and upper range of the x-bins and y-bins
Returns
-------
x: np.array
x-bin centres of the histogram
y: np.array
y-bin centres of the histogram
z: np.array
z values of the histogram
"""
times = np.tile(np.arange(waveforms.shape[1]), (waveforms.shape[0], 1))
z, xs, ys = np.histogram2d(
times.flatten(), waveforms.flatten(), bins=bins, range=range
)
xs = (xs + (xs[1] - xs[0]) / 2)[:-1]
ys = (ys + (ys[1] - ys[0]) / 2)[:-1]
x = np.array([[x] * bins[1] for x in xs])
y = np.array(list(ys) * bins[0])
return x.flatten(), y.flatten(), z.flatten()
def calculate_mean_signal(signals, shift_by="min"):
"""
Calculates mean signals from several PMT signals with shifting the signals
by their minimum or maximum to correct for time jitter
Parameters
----------
signals: np.array
2D numpy array with one signal (y-values) in each row
[[signal1],
[signal2],
...]
shift_by: str
shift by "min" or "max" of the signal to correct for time jitter
Returns
-------
mean_signal: (np.array, np.array)
x and y values of mean signal
"""
rolled_signals = []
if shift_by == "min":
f = np.argmin
elif shift_by == "max":
f = np.argmax
else:
print("can only shift by 'min' or 'max'")
return None
nx = signals.shape[1]
xs = np.arange(nx)
for signal in signals:
shift = f(signal)
rolled_signals.append(np.roll(signal, -shift + int(nx / 2)))
mean_signal = np.mean(rolled_signals, axis=0)
return mean_signal
@njit
def peak_finder(waveforms, threshold): # pragma: no cover
"""
Finds peaks in waveforms
Parameters
----------
waveforms: np.array
2D numpy array with one waveform (y-values) in each row
[[waveform1],
[waveform2],
...]
threshold: float
voltage value the waveform has to cross in order to identify a peak
Returns
-------
peak_positions: list(list(floats))
x and y values of mean signal
"""
peak_positions = []
I, J = waveforms.shape
for i in range(I):
peaks = []
X = 0
x = 0
for j in range(J):
if waveforms[i][j] <= threshold:
X += j
x += 1
if j + 1 >= J or waveforms[i][j + 1] > threshold:
peaks.append(X / x)
X = 0
x = 0
if len(peaks) > 0:
peak_positions.append(peaks)
return peak_positions
def find_nominal_hv(filename, nominal_gain):
"""
Finds nominal HV of a measured PMT dataset
Parameters
----------
filename: string
nominal gain: float
gain for which the nominal HV should be found
Returns
-------
nominal_hv: int
nominal HV
"""
f = h5py.File(filename, "r")
gains = []
hvs = []
keys = f.keys()
for key in keys:
gains.append(f[key]["fit_results"]["gain"][()])
hvs.append(int(key))
f.close()
gains = np.array(gains)
hvs = np.array(hvs)
diff = abs(np.array(gains) - nominal_gain)
nominal_hv = int(hvs[diff == np.min(diff)])
return nominal_hv
def calculate_rise_times(waveforms, relative_thresholds=(0.1, 0.9)):
"""
Calculates rise times of waveforms
Parameters
----------
waveforms: np.array
2D numpy array with one waveform (y-values) in each row
[[waveform1],
[waveform2],
...]
relative_thresholds: tuple(float)
relative lower and upper threshold inbetween which to calculate rise time
Returns
-------
rise_times: np.array
rise times
"""
mins = np.min(waveforms, axis=1)
argmins = np.argmin(waveforms, axis=1)
rise_times = []
for min, argmin, waveform in zip(mins, argmins, waveforms):
below_first_thr = waveform > (min * relative_thresholds[0])
below_second_thr = waveform > (min * relative_thresholds[1])
try:
first_time = argmin - np.argmax(below_first_thr[:argmin][::-1])
second_time = argmin - np.argmax(below_second_thr[:argmin][::-1])
except ValueError:
first_time = 0
second_time = 0
rise_times.append(second_time - first_time)
return np.array(rise_times)
def read_spectral_scan(filename):
"""Reads wavelengths and currents from spectral PMT or PHD scan
Parameters
----------
filename: str
Returns
-------
(wavelengths, currents): (np.array(float), np.array(float))
"""
data = np.loadtxt(filename, unpack=True, encoding="latin1")
with codecs.open(filename, "r", encoding="utf-8", errors="ignore") as f:
dcs = f.read().split("\n")[-2].split("\t")
wavelengths = data[0]
currents = data[1]
dc = np.linspace(float(dcs[-2]), float(dcs[-1]), len(currents))
currents = currents - dc
return wavelengths, currents
def read_datetime(filename):
"""Reads time of a spectral PMT or PHD scan
Parameters
----------
filename: str
Returns
-------
time: str
"""
f = codecs.open(filename, "r", encoding="utf-8", errors="ignore")
datetime_string = f.read().split("\n")[2]
f.close()
return datetime_string.split(" ")[1] + ";" + datetime_string.split(" ")[2]
def convert_to_secs(date_time):
"""Converts time string to seconds
Parameters
----------
date_time: str
Returns
-------
unix time in seconds: int
"""
t = datetime.datetime.strptime(date_time, "%Y-%m-%d;%H:%M:%S")
return t.timestamp() + TIMEZONE.utcoffset(t).seconds
def choose_ref(phd_filenames, pmt_filename):
"""Chooses reference measurement closest (in time) to the actual measurement
Parameters
----------
phd_filenames: list(str)
pmt_filename: str
Returns
-------
phd_filename: str
"""
diffs = []
pmt_time = convert_to_secs(read_datetime(pmt_filename))
for filename in phd_filenames:
phd_time = convert_to_secs(read_datetime(filename))
diffs.append(abs(pmt_time - phd_time))
phd_filename = phd_filenames[np.argmin(diffs)]
return phd_filename
def remove_double_peaks(peaks, distance=20):
"""Removes secondary peaks with a distance <= distance from the primary
peak from 2D array of peaks
Parameters
----------
peaks: 2D array of peaks
distance: float
Returns
-------
new_peaks: 2D np.array
"""
new_peaks = []
for peak in peaks:
mp = -(distance + 1)
new_peak = []
for p in peak:
if np.fabs(mp - p) >= distance:
new_peak.append(p)
mp = p
new_peaks.append(new_peak)
return np.array(new_peaks)
def peaks_with_signal(peaks, signal_range):
"""Returns peaks with at least one peak in signal_range
Parameters
----------
peaks: 2D array of peaks
signal_range: tuple(float)
(min, max) of signal window
Returns
-------
peaks_with_signal: 2D np.array
"""
peaks_with_signal = []
for peak in peaks:
got_signal = False
for p in peak:
if p > signal_range[0] and p < signal_range[1]:
got_signal = True
if got_signal:
peaks_with_signal.append(peak)
return peaks_with_signal
def estimate_kernel_density(
data, kernel="tophat", bandwidth=0.02, n_sampling_points=200
):
"""Estimates kernel density of given data in order to avoid binning artifacts
Parameters
----------
data: list or np.array
1D array of input data
kernel: str
kernel to use for estimation ("tophat", "gaussian", etc.)
bandwidth: float
bandwidth of the kernel
n_sampling_points: int
number of sample points to return from distribution
Returns
-------
x: np.array(float)
x-values of samples of distribution
y: np.array(float)
y-values of samples of distribution
"""
kde = KernelDensity(bandwidth=bandwidth, kernel=kernel)
kde.fit(data[:, None])
x = np.linspace(np.min(data), np.max(data), n_sampling_points)
y = np.exp(kde.score_samples(x[:, None]))
return x, y
@njit
def align_waveforms(
waveforms, baseline_min=None, baseline_max=None, inplace=True
): # pragma: no cover
"""
Subtracts the mean of (a part of the) waveforms from waveforms (individually)
Parameters
----------
waveforms: np.array
2D numpy array with one waveform in each row
[[waveform1],
[waveform2],
...]
baseline_min: int
index of minimum of window for mean calculation (included)
baseline_max: int
index of maximum of window for mean calculation (excluded)
inplace: bool
perform calculation inplace or not
Returns
-------
waveforms: np.array
aligned waveform array
[[aligned waveform1],
[aligned waveform2],
...]
"""
if not inplace:
waveforms = np.copy(waveforms)
n, m = waveforms.shape
for i in range(n):
mean = np.mean(waveforms[i][baseline_min:baseline_max])
for j in range(m):
waveforms[i][j] -= mean
return waveforms
``` |
{
"source": "jonasrk/cd_dot_cz_price_search",
"score": 3
} |
#### File: cd_dot_cz_price_search/tests/test_cd_dot_cz_price_search.py
```python
import unittest
# from cd_dot_cz_price_search import cd_dot_cz_price_search
class TestCd_dot_cz_price_search(unittest.TestCase):
"""Tests for `cd_dot_cz_price_search` package."""
def setUp(self):
"""Set up test fixtures, if any."""
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_000_something(self):
"""Test something."""
``` |
{
"source": "jonasrk/minimal_telegram_bot",
"score": 3
} |
#### File: minimal_telegram_bot/minimal_telegram_bot/minimal_telegram_bot.py
```python
import ast
import os
from io import StringIO
import pandas as pd
import requests
import telegram
# import pickle
MAX_TELEGRAM_MESSAGE_LENGTH = 4096
BASE_REQUEST_URL = "https://pixometer.io/api/v1/"
FIRST_REQUEST_URL = f"{BASE_REQUEST_URL}access-token/"
SECOND_REQUEST_URL = f"{BASE_REQUEST_URL}readings/?format=csv&o="
FIRST_REQUEST_HEADERS = {"Content-Type": "application/json;charset=UTF-8"}
SECOND_REQUEST_HEADERS = {"Cookie": ("access_token=")}
def main(event=None, context=None): # pylint: disable=unused-argument
"""main docstring"""
pixometer_email = os.environ["PIXOMETER_EMAIL"]
pixometer_password = <PASSWORD>["<PASSWORD>"]
body = (
'{"email":"'
+ pixometer_email
+ '","password":"'
+ <PASSWORD>ometer_password
+ '"}'
)
with requests.session() as session:
first_response = session.post(
FIRST_REQUEST_URL, data=body, headers=FIRST_REQUEST_HEADERS
)
# pickle.dump(first_response, open("first_response.pickle", "wb"))
# first_response = pickle.load(open("first_response.pickle", "rb"))
first_response_string = first_response.content.decode("UTF-8")
first_response_dict = ast.literal_eval(first_response_string)
access_token = first_response_dict["access_token"]
SECOND_REQUEST_HEADERS[
"Cookie"
] = f"{SECOND_REQUEST_HEADERS['Cookie']}{access_token}"
second_response = session.get(
SECOND_REQUEST_URL, headers=SECOND_REQUEST_HEADERS
)
# pickle.dump(second_response, open("second_response.pickle", "wb"))
# second_response = pickle.load(open("second_response.pickle", "rb"))
second_response_string = second_response.content.decode("UTF-8")
tg_msg = interpret_csv(second_response_string)
send_telegram_message(tg_msg)
return "OK"
def send_telegram_message(tg_msg):
"""Sending telegram message."""
telegram_access_token = os.environ["TELEGRAM_ACCESS_TOKEN"]
telegram_chat_id = os.environ["TELEGRAM_CHAT_ID"]
bot = telegram.Bot(token=telegram_access_token)
bot.send_message(
chat_id=telegram_chat_id, text=tg_msg[:MAX_TELEGRAM_MESSAGE_LENGTH],
)
def interpret_csv(input_csv):
"""Interpolating data and more"""
input_csv_f = StringIO(input_csv)
dataframe = pd.read_csv(input_csv_f)
dataframe["Reading date"] = pd.to_datetime(
dataframe["Reading date"], format="%d.%m.%Y %H:%M"
)
dataframe["Reading date"] = dataframe["Reading date"].dt.date
start_date = pd.to_datetime("05.03.2019", format="%d.%m.%Y").date()
mask = dataframe["Reading date"] > start_date
dataframe = dataframe.loc[mask]
output_str = ""
for meter in ["Power Meter", "Gas Meter"]:
df_meter = dataframe[dataframe["Location in building"] == meter]
df_meter = df_meter[["Reading date", "Value"]]
df_meter["Reading date"] = pd.to_datetime(df_meter["Reading date"])
df_meter = df_meter.set_index("Reading date")
df_meter = df_meter.resample("D").interpolate()
output_str += generate_insights(df_meter, meter)
return output_str
def generate_insights(dataframe, meter):
"""Making sense of the CSV data"""
output_str = ""
# Day
output_str += f"--- {meter} ---\n"
output_str += f"\nLast measured day: {str(dataframe.iloc[-1].name)[:10]}\n"
day_value = dataframe.iloc[-1]["Value"]
day_min1_value = dataframe.iloc[-2]["Value"]
day_diff = day_value - day_min1_value
output_str += (
f"\nIn the last day, you consumed:"
f" {kwh_to_euro_string(meter, day_diff)}"
)
day_min2_value = dataframe.iloc[-3]["Value"]
day_min1_diff = day_min1_value - day_min2_value
output_str += (
f"\nIn the previous day, you consumed"
f": {kwh_to_euro_string(meter, day_min1_diff)}"
)
day_rel_diff = day_diff / day_min1_diff
output_str += f"\nIncrease: {int(day_rel_diff * 100 - 100)}%"
# Week
day_value = dataframe.iloc[-1]["Value"]
day_min7_value = dataframe.iloc[-8]["Value"]
week_diff = day_value - day_min7_value
output_str += (
f"\n\nIn the last week, you consumed:"
f" {kwh_to_euro_string(meter, week_diff)}"
)
day_min14_value = dataframe.iloc[-15]["Value"]
week_min1_diff = day_min7_value - day_min14_value
output_str += (
f"\nIn the previous week, you consumed:"
f" {kwh_to_euro_string(meter, week_min1_diff)}"
)
week_rel_diff = week_diff / week_min1_diff
output_str += f"\nIncrease: {int(week_rel_diff * 100 - 100)}%\n\n"
return output_str
def kwh_to_euro_string(meter, val):
"""Convert readings into Euro"""
if meter == "Power Meter":
val = val * 0.277 # eprimo strompreis €/kwh
elif meter == "Gas Meter":
val = val * 18.5 # m3 in kwh
val = val * 0.0542 # eprimo gaspreis €/kwh
return f"{round(val, 2)}€"
if __name__ == "__main__":
main()
``` |
{
"source": "jonasrla/desafio_youse",
"score": 2
} |
#### File: parte_2/Context/create_policy_context.py
```python
from .base_context import BaseContext
from pyspark.sql import functions as f
class CreatePolicyContext(BaseContext):
def __init__(self, file_path):
self.app_name = 'Process Create Policy'
super().__init__(file_path)
def process(self):
df_policy = self.transformation()
self.append_table(df_policy, 'policies')
def transformation(self):
df_result = self.input.selectExpr('payload.policy_number as id',
'payload.order_uuid as order_id',
'payload.insurance_type as insurance_type',
'raw_timestamp as created_at')
df_result = df_result.withColumn('status', f.lit('created'))
df_result = df_result.withColumn('created_at',
f.from_unixtime(df_result.created_at))
return df_result
```
#### File: parte_2/Context/quote_order_context.py
```python
from pyspark.sql.functions import from_unixtime
from .base_context import BaseContext
class QuoteOrderContext(BaseContext):
def __init__(self, file_path):
self.app_name = 'Process Quote Order'
super().__init__(file_path)
def process(self):
df_quote = self.transformation()
self.update_table(df_quote, 'orders')
def transformation(self):
df_quote = self.input.selectExpr('payload.order_uuid as id',
'payload.pricing.monthly_cost as pricing',
'raw_timestamp as updated_at')
df_quote = df_quote.withColumn('updated_at',
from_unixtime(df_quote.updated_at))
return df_quote
```
#### File: parte_2/Context/update_policy_context.py
```python
from pyspark.sql import functions as f
from .base_context import BaseContext
class UpdatePolicyContext(BaseContext):
def __init__(self, file_path, status):
self.status = status
self.app_name = 'Updates policy'
super().__init__(file_path)
def process(self):
data = self.transformation()
self.update_table(data, 'policies')
def transformation(self):
if 'reason' not in self.input.select('payload.*').columns:
df_result = self.input.selectExpr('payload.policy_number as id',
'raw_timestamp as updated_at')
else:
df_result = self.input.selectExpr('payload.policy_number as id',
'raw_timestamp as updated_at',
'payload.reason')
df_result = df_result.withColumn('status', f.lit(self.status))
df_result = df_result.withColumn('updated_at',
f.from_unixtime(df_result.updated_at))
return df_result
```
#### File: test/Context/test_create_policy_context.py
```python
import sqlite3
import pytest
from Context import CreatePolicyContext
@pytest.fixture
def context():
context = CreatePolicyContext('test/extra/data/create.policy.json')
return context
def test_policy(context):
df_policy = context.transformation()
list_data = df_policy.toPandas().to_dict('records')
assert 'id' in df_policy.columns
assert list_data[0]['id'] == 1000075771401371
assert list_data[1]['id'] == 1000041110339228
assert list_data[0]['order_id'] == '4472ef6b-df63-594f-a70a-9cca68dfda09'
assert list_data[1]['order_id'] == '3c8bebd4-9b36-569c-a9fa-0ab0e71bab9f'
assert list_data[0]['insurance_type'] == 'life'
assert list_data[1]['insurance_type'] == 'life'
assert list_data[0]['status'] == 'created'
assert list_data[1]['status'] == 'created'
assert list_data[0]['created_at'] == '2018-03-16 14:23:08'
assert list_data[1]['created_at'] == '2018-03-16 15:23:18'
``` |
{
"source": "jonasrothfuss/Conditional_Density_Estimation",
"score": 2
} |
#### File: Conditional_Density_Estimation/cde/BaseConditionalDensity.py
```python
from sklearn.base import BaseEstimator
from cde.utils.integration import mc_integration_student_t, numeric_integation
from cde.utils.center_point_select import *
import scipy.stats as stats
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import scipy
from cde.utils.optimizers import find_root_newton_method, find_root_by_bounding
""" Default Numerical Integration Standards"""
N_SAMPLES_INT = 10**5
N_SAMPLES_INT_TIGHT_BOUNDS = 10**4
LOWER_BOUND = - 10 ** 3
UPPER_BOUND = 10 ** 3
""" Default Monte-Carlo Integration Standards"""
DOF = 6
LOC_PROPOSAL = 0
SCALE_PROPOSAL = 2
class ConditionalDensity(BaseEstimator):
""" MEAN """
def _mean_mc(self, x_cond, n_samples=10 ** 6):
if hasattr(self, 'sample'):
sample = self.sample
elif hasattr(self, 'simulate_conditional'):
sample = self.simulate_conditional
else:
raise AssertionError("Requires sample or simulate_conditional method")
means = np.zeros((x_cond.shape[0], self.ndim_y))
for i in range(x_cond.shape[0]):
x = np.tile(x_cond[i].reshape((1, x_cond[i].shape[0])), (n_samples, 1))
_, samples = sample(x)
means[i, :] = np.mean(samples, axis=0)
return means
def _mean_pdf(self, x_cond, n_samples=10 ** 6):
means = np.zeros((x_cond.shape[0], self.ndim_y))
for i in range(x_cond.shape[0]):
mean_fun = lambda y: y
if self.ndim_y == 1:
n_samples_int, lower, upper = self._determine_integration_bounds()
func_to_integrate = lambda y: mean_fun(y) * np.squeeze(self._tiled_pdf(y, x_cond[i], n_samples_int))
integral = numeric_integation(func_to_integrate, n_samples_int, lower, upper)
else:
loc_proposal, scale_proposal = self._determine_mc_proposal_dist()
func_to_integrate = lambda y: mean_fun(y) * self._tiled_pdf(y, x_cond[i], n_samples)
integral = mc_integration_student_t(func_to_integrate, ndim=self.ndim_y, n_samples=n_samples,
loc_proposal=loc_proposal, scale_proposal=scale_proposal)
means[i] = integral
return means
""" STANDARD DEVIATION """
def _std_pdf(self, x_cond, n_samples=10**6, mean=None):
assert hasattr(self, "mean_")
assert hasattr(self, "pdf")
if mean is None:
mean = self.mean_(x_cond, n_samples=n_samples)
if self.ndim_y == 1: # compute with numerical integration
stds = np.zeros((x_cond.shape[0], self.ndim_y))
for i in range(x_cond.shape[0]):
mu = np.squeeze(mean[i])
n_samples_int, lower, upper = self._determine_integration_bounds()
func_to_integrate = lambda y: (y-mu)**2 * np.squeeze(self._tiled_pdf(y, x_cond[i], n_samples_int))
stds[i] = np.sqrt(numeric_integation(func_to_integrate, n_samples_int, lower, upper))
else: # call covariance and return sqrt of diagonal
covs = self.covariance(x_cond, n_samples=n_samples)
stds = np.sqrt(np.diagonal(covs, axis1=1, axis2=2))
return stds
def _std_mc(self, x_cond, n_samples=10**6):
if hasattr(self, 'sample'):
sample = self.sample
elif hasattr(self, 'simulate_conditional'):
sample = self.simulate_conditional
else:
raise AssertionError("Requires sample or simulate_conditional method")
stds = np.zeros((x_cond.shape[0], self.ndim_y))
for i in range(x_cond.shape[0]):
x = np.tile(x_cond[i].reshape((1, x_cond[i].shape[0])), (n_samples, 1))
_, samples = sample(x)
stds[i, :] = np.std(samples, axis=0)
return stds
""" COVARIANCE """
def _covariance_pdf(self, x_cond, n_samples=10 ** 6, mean=None):
assert hasattr(self, "mean_")
assert hasattr(self, "pdf")
assert mean is None or mean.shape == (x_cond.shape[0], self.ndim_y)
loc_proposal, scale_proposal = self._determine_mc_proposal_dist()
if mean is None:
mean = self.mean_(x_cond, n_samples=n_samples)
covs = np.zeros((x_cond.shape[0], self.ndim_y, self.ndim_y))
for i in range(x_cond.shape[0]):
x = x = np.tile(x_cond[i].reshape((1, x_cond[i].shape[0])), (n_samples, 1))
def cov(y):
a = (y - mean[i])
# compute cov matrices c for sampled instances and weight them with the probability p from the pdf
c = np.empty((a.shape[0], a.shape[1] ** 2))
for j in range(a.shape[0]):
c[j, :] = np.reshape(np.outer(a[j], a[j]), (a.shape[1] ** 2,))
p = np.tile(np.expand_dims(self.pdf(x, y), axis=1), (1, self.ndim_y ** 2))
res = c * p
return res
integral = mc_integration_student_t(cov, ndim=self.ndim_y, n_samples=n_samples,
loc_proposal=loc_proposal, scale_proposal=scale_proposal)
covs[i] = integral.reshape((self.ndim_y, self.ndim_y))
return covs
def _covariance_mc(self, x_cond, n_samples=10 ** 6):
if hasattr(self, 'sample'):
sample = self.sample
elif hasattr(self, 'simulate_conditional'):
sample = self.simulate_conditional
else:
raise AssertionError("Requires sample or simulate_conditional method")
covs = np.zeros((x_cond.shape[0], self.ndim_y, self.ndim_y))
for i in range(x_cond.shape[0]):
x = np.tile(x_cond[i].reshape((1, x_cond[i].shape[0])), (n_samples, 1))
_, y_sample = sample(x)
c = np.cov(y_sample, rowvar=False)
covs[i] = c
return covs
""" SKEWNESS """
def _skewness_pdf(self, x_cond, n_samples=10 ** 6, mean=None, std=None):
assert self.ndim_y == 1, "this function does not support co-skewness - target variable y must be one-dimensional"
assert hasattr(self, "mean_")
assert hasattr(self, "pdf")
assert hasattr(self, "covariance")
if mean is None:
mean = np.reshape(self.mean_(x_cond, n_samples), (x_cond.shape[0],))
if std is None:
std = np.reshape(np.sqrt(self.covariance(x_cond, n_samples=n_samples)), (x_cond.shape[0],))
skewness = np.empty(shape=(x_cond.shape[0],))
n_samples_int, lower, upper = self._determine_integration_bounds()
for i in range(x_cond.shape[0]):
mu = np.squeeze(mean[i])
sigm = np.squeeze(std[i])
func_skew = lambda y: ((y - mu) / sigm)**3 * np.squeeze(self._tiled_pdf(y, x_cond[i], n_samples_int))
skewness[i] = numeric_integation(func_skew, n_samples=n_samples_int)
return skewness
def _skewness_mc(self, x_cond, n_samples=10 ** 6):
if hasattr(self, 'sample'):
sample = self.sample
elif hasattr(self, 'simulate_conditional'):
sample = self.simulate_conditional
else:
raise AssertionError("Requires sample or simulate_conditional method")
skewness = np.empty(shape=(x_cond.shape[0],))
for i in range(x_cond.shape[0]):
x = np.tile(x_cond[i].reshape((1, x_cond[i].shape[0])), (n_samples, 1))
_, y_sample = sample(x)
skewness[i] = scipy.stats.skew(y_sample)
return skewness
""" KURTOSIS """
def _kurtosis_pdf(self, x_cond, n_samples=10 ** 6, mean=None, std=None):
assert self.ndim_y == 1, "this function does not support co-kurtosis - target variable y must be one-dimensional"
assert hasattr(self, "mean_")
assert hasattr(self, "pdf")
assert hasattr(self, "covariance")
if mean is None:
mean = np.reshape(self.mean_(x_cond, n_samples), (x_cond.shape[0],))
if std is None:
std = np.reshape(np.sqrt(self.covariance(x_cond, n_samples=n_samples)), (x_cond.shape[0],))
n_samples_int, lower, upper = self._determine_integration_bounds()
kurtosis = np.empty(shape=(x_cond.shape[0],))
for i in range(x_cond.shape[0]):
mu = np.squeeze(mean[i])
sigm = np.squeeze(std[i])
func_skew = lambda y: ((y - mu)**4 / sigm**4) * np.squeeze(self._tiled_pdf(y, x_cond[i], n_samples_int))
kurtosis[i] = numeric_integation(func_skew, n_samples=n_samples_int)
return kurtosis - 3 # excess kurtosis
def _kurtosis_mc(self, x_cond, n_samples=10 ** 6):
if hasattr(self, 'sample'):
sample = self.sample
elif hasattr(self, 'simulate_conditional'):
sample = self.simulate_conditional
else:
raise AssertionError("Requires sample or simulate_conditional method")
kurtosis = np.empty(shape=(x_cond.shape[0],))
for i in range(x_cond.shape[0]):
x = np.tile(x_cond[i].reshape((1, x_cond[i].shape[0])), (n_samples, 1))
_, y_sample = sample(x)
kurtosis[i] = scipy.stats.kurtosis(y_sample)
return kurtosis
""" QUANTILES / VALUE-AT-RISK """
def _quantile_mc(self, x_cond, alpha=0.01, n_samples=10 ** 6):
if hasattr(self, 'sample'):
sample = self.sample
elif hasattr(self, 'simulate_conditional'):
sample = self.simulate_conditional
else:
raise AssertionError("Requires sample or simulate_conditional method")
assert x_cond.ndim == 2
VaRs = np.zeros(x_cond.shape[0])
x_cond = np.tile(x_cond.reshape((1, x_cond.shape[0], x_cond.shape[1])), (n_samples,1, 1))
for i in range(x_cond.shape[1]):
_, samples = sample(x_cond[:, i,:])
VaRs[i] = np.percentile(samples, alpha * 100.0)
return VaRs
def _quantile_cdf(self, x_cond, alpha=0.01, eps=1e-8, init_bound=1e3):
# finds the alpha quantile of the distribution through root finding by bounding
cdf_fun = lambda y: self.cdf(x_cond, y) - alpha
init_bound = init_bound * np.ones(x_cond.shape[0])
return find_root_by_bounding(cdf_fun, left=-init_bound, right=init_bound, eps=eps)
""" CONDITONAL VALUE-AT-RISK """
def _conditional_value_at_risk_mc_pdf(self, VaRs, x_cond, alpha=0.01, n_samples=10 ** 6):
assert VaRs.shape[0] == x_cond.shape[0], "same number of x_cond must match the number of values_at_risk provided"
assert self.ndim_y == 1, 'this function only supports only ndim_y = 1'
assert x_cond.ndim == 2
n_samples_int, lower, _ = self._determine_integration_bounds()
CVaRs = np.zeros(x_cond.shape[0])
for i in range(x_cond.shape[0]):
upper = float(VaRs[i])
func_to_integrate = lambda y: y * np.squeeze(self._tiled_pdf(y, x_cond[i], n_samples_int))
integral = numeric_integation(func_to_integrate, n_samples_int, lower, upper)
CVaRs[i] = integral / alpha
return CVaRs
def _conditional_value_at_risk_sampling(self, VaRs, x_cond, n_samples=10 ** 6):
if hasattr(self, 'sample'):
sample = self.sample
elif hasattr(self, 'simulate_conditional'):
sample = self.simulate_conditional
else:
raise AssertionError("Requires sample or simulate_conditional method")
CVaRs = np.zeros(x_cond.shape[0])
x_cond = np.tile(x_cond.reshape((1, x_cond.shape[0], x_cond.shape[1])), (n_samples, 1, 1))
for i in range(x_cond.shape[1]):
_, samples = sample(x_cond[:, i, :])
shortfall_samples = np.ma.masked_where(VaRs[i] < samples, samples)
CVaRs[i] = np.mean(shortfall_samples)
return CVaRs
""" OTHER HELPERS """
def _handle_input_dimensionality(self, X, Y=None, fitting=False):
# assert that both X an Y are 2D arrays with shape (n_samples, n_dim)
if X.ndim == 1:
X = np.expand_dims(X, axis=1)
if Y is not None:
if Y.ndim == 1:
Y = np.expand_dims(Y, axis=1)
assert X.shape[0] == Y.shape[0], "X and Y must have the same length along axis 0"
assert X.ndim == Y.ndim == 2, "X and Y must be matrices"
if fitting: # store n_dim of training data
self.ndim_y, self.ndim_x = Y.shape[1], X.shape[1]
else:
assert X.shape[1] == self.ndim_x, "X must have shape (?, %i) but provided X has shape %s" % (self.ndim_x, X.shape)
if Y is not None:
assert Y.shape[1] == self.ndim_y, "Y must have shape (?, %i) but provided Y has shape %s" % (
self.ndim_y, Y.shape)
if Y is None:
return X
else:
return X, Y
def plot2d(self, x_cond=[0, 1, 2], ylim=(-8, 8), resolution=100, mode='pdf', show=True, prefix='', numpyfig=False):
""" Generates a 3d surface plot of the fitted conditional distribution if x and y are 1-dimensional each
Args:
xlim: 2-tuple specifying the x axis limits
ylim: 2-tuple specifying the y axis limits
resolution: integer specifying the resolution of plot
"""
assert self.ndim_y == 1, "Can only plot two dimensional distributions"
# prepare mesh
# turn off interactive mode is show is set to False
if show == False and mpl.is_interactive():
plt.ioff()
mpl.use('Agg')
fig = plt.figure(dpi=300)
labels = []
for i in range(len(x_cond)):
Y = np.linspace(ylim[0], ylim[1], num=resolution)
X = np.array([x_cond[i] for _ in range(resolution)])
# calculate values of distribution
if mode == "pdf":
Z = self.pdf(X, Y)
elif mode == "cdf":
Z = self.cdf(X, Y)
elif mode == "joint_pdf":
Z = self.joint_pdf(X, Y)
label = "x="+ str(x_cond[i]) if self.ndim_x > 1 else 'x=%.2f' % x_cond[i]
labels.append(label)
plt_out = plt.plot(Y, Z, label=label)
plt.legend([prefix + label for label in labels], loc='upper right')
plt.xlabel("x")
plt.ylabel("y")
if show:
plt.show()
if numpyfig:
fig.tight_layout(pad=0)
fig.canvas.draw()
numpy_img = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
numpy_img = numpy_img.reshape(fig.canvas.get_width_height()[::-1] + (3,))
return numpy_img
return fig
def plot3d(self, xlim=(-5, 5), ylim=(-8, 8), resolution=100, show=False, numpyfig=False):
""" Generates a 3d surface plot of the fitted conditional distribution if x and y are 1-dimensional each
Args:
xlim: 2-tuple specifying the x axis limits
ylim: 2-tuple specifying the y axis limits
resolution: integer specifying the resolution of plot
"""
assert self.ndim_x + self.ndim_y == 2, "Can only plot two dimensional distributions"
if show == False and mpl.is_interactive():
plt.ioff()
mpl.use('Agg')
# prepare mesh
linspace_x = np.linspace(xlim[0], xlim[1], num=resolution)
linspace_y = np.linspace(ylim[0], ylim[1], num=resolution)
X, Y = np.meshgrid(linspace_x, linspace_y)
X, Y = X.flatten(), Y.flatten()
# calculate values of distribution
Z = self.pdf(X, Y)
X, Y, Z = X.reshape([resolution, resolution]), Y.reshape([resolution, resolution]), Z.reshape(
[resolution, resolution])
fig = plt.figure(dpi=300)
ax = fig.gca(projection='3d')
surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm, rcount=resolution, ccount=resolution,
linewidth=100, antialiased=True)
plt.xlabel("x")
plt.ylabel("y")
if show:
plt.show()
if numpyfig:
fig.tight_layout(pad=0)
fig.canvas.draw()
numpy_img = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
numpy_img = numpy_img.reshape(fig.canvas.get_width_height()[::-1] + (3,))
return numpy_img
return fig
def _determine_integration_bounds(self):
if hasattr(self, 'y_std') and hasattr(self, 'y_mean'):
lower = self.y_mean - 10 * self.y_std
upper = self.y_mean + 10 * self.y_std
return N_SAMPLES_INT_TIGHT_BOUNDS, lower, upper
else:
return N_SAMPLES_INT, LOWER_BOUND, UPPER_BOUND
def _determine_mc_proposal_dist(self):
if hasattr(self, 'y_std') and hasattr(self, 'y_mean'):
mu_proposal = self.y_mean
std_proposal = 1 * self.y_std
return mu_proposal, std_proposal
else:
return np.ones(self.ndim_y) * LOC_PROPOSAL, np.ones(self.ndim_y) * SCALE_PROPOSAL
def _tiled_pdf(self, Y, x_cond, n_samples):
x = np.tile(x_cond.reshape((1, x_cond.shape[0])), (n_samples, 1))
return np.tile(np.expand_dims(self.pdf(x, Y), axis=1), (1, self.ndim_y))
``` |
{
"source": "jonasrothfuss/DeepEpisodicMemory",
"score": 2
} |
#### File: DeepEpisodicMemory/core/Memory.py
```python
import os, sklearn, collections
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.externals import joblib
from utils import io_handler
import pandas as pd
import numpy as np
from data_postp import similarity_computations
class Memory:
def __init__(self, memory_df, base_dir, label_col="category", video_path_col="video_file_path", inter_class_pca_path=None):
''' Initializes the Memory class
:param memory_df: pandas dataframe that contains the hidden_reps, labels and video_paths of the episodes
:param label_col: specifies the label column name within the df
:param video_path_col: specifies the video path column name within the df
:param inter_class_pca_path: specifies the path to the model object of a previously trained PCA
'''
assert label_col in memory_df.columns, str(label_col) + ' must be a dataframe category'
assert isinstance(memory_df, pd.DataFrame)
self.memory_df = memory_df
self.hidden_reps = np.stack([h.flatten() for h in memory_df['hidden_repr']])
self.labels = memory_df[["id", label_col]]
self.labels.set_index('id')
self.video_paths = memory_df[["id", video_path_col]]
self.video_paths.set_index('id')
self.base_dir = base_dir
#get fitted PCA object
if inter_class_pca_path:
assert os.path.isfile(inter_class_pca_path)
self.inter_class_pca = joblib.load(inter_class_pca_path)
else:
self.inter_class_pca = fit_inter_class_pca(self.hidden_reps, self.labels, n_components=50, verbose=False)
# PCA transform the hidden_reps
self.hidden_reps_transformed = self.inter_class_pca.transform(self.hidden_reps)
self.check_memory_sanity()
def check_memory_sanity(self):
num_vids_found = np.sum([os.path.isfile( os.path.join(self.base_dir, v_path) ) for v_path in self.video_paths["video_file_path"].values])
num_episodes = self.labels.shape[0]
print("Memory contains %i episodes. Video file exists for %i out of %i episodes" % (num_episodes, num_vids_found, num_episodes))
def store_episodes(self, ids, hidden_reps, metadata_dicts, video_file_paths):
'''
stores provided episodes in mongo database (memory)
:param ids:
:param hidden_reps:
:param metadata_dicts:
:param video_file_path:
:return mongodb_ids corresponding to the persisted documents
'''
assert len(ids) == len(hidden_reps) == len(metadata_dicts) == len(video_file_paths)
assert all([os.path.isfile(path) for path in video_file_paths])
#TODO: store as document in mongodb
#for i, id in enumerate(ids):
# if id not in self.memory_df:
def get_episode(self, id):
'''
queries a single episode by its id
:return: tuple of four objects (id, hidden_reps, metadata_dicts, video_episode)
'''
# TODO
#pass
def matching(self, query_hidden_repr, n_closest_matches = 5, use_transform=False):
'''
finds the closest vector matches (cos_similarity) for a given query vector
:param query_hidden_repr: the query vector
:param n_clostest_matches: (optional) the number of closest matches returned, defaults to 5
:param use_transform: boolean that denotes whether the matching shall performed on transformed hidden vectors
:return: four arrays, containing:
1. the n_clostest_matches by id
2. the n_closest_matches by computed pairwise cos distance
3. the n_closest_matches hidden representations
4. the n_closest_matches absolute paths to the memory episodes in the base directory of the memory
'''
query_hidden_repr = np.expand_dims(query_hidden_repr, axis=0)
if use_transform:
memory_hidden_reps = self.hidden_reps_transformed
query_hidden_repr = self.inter_class_pca.transform(query_hidden_repr)
else:
memory_hidden_reps = self.hidden_reps
assert memory_hidden_reps.ndim == 2 #memory_hidden_reps must have shape (n_episodes, n_dim_repr)
assert query_hidden_repr.ndim == 2 #query_hidden_repr must have shape (1, n_dim_repr)
assert memory_hidden_reps.shape[1] == query_hidden_repr.shape[1]
cos_distances = pairwise_distances(memory_hidden_reps, query_hidden_repr, metric='cosine')[:,0] #shape(n_episodes, 1)
# get indices of n maximum values in ndarray, reverse the list (highest is leftmost)
indices_closest = cos_distances.argsort()[:-n_closest_matches:-1]
relative_paths = self.video_paths.iloc[indices_closest]["video_file_path"].values
absolute_paths = [os.path.join(self.base_dir, path) for i, path in enumerate(relative_paths)]
return indices_closest, cos_distances[indices_closest], memory_hidden_reps[indices_closest], absolute_paths
def mean_vectors_of_classes(hidden_reps, labels):
"""
Computes mean vector for each class in class_column
:param hidden_reps: list of hidden_vectors
:param labels: list of labels corresponding to the hidden_reps
:return: dataframe with labels as index and mean vectors for each class
"""
vector_dict = collections.defaultdict(list)
for label, vector in zip(labels, hidden_reps):
vector_dict[label].append(vector)
return pd.DataFrame.from_dict(dict([(label, np.mean(vectors, axis=0)) for label, vectors in vector_dict.items()]),
orient='index')
def fit_inter_class_pca(hidden_reps, labels, n_components=50, verbose=False, dump_path=None):
'''
Fits a PCA on mean vectors of classes denoted by self.labels
:param n_components: number of pca components
:param verbose: verbosity
:param dump_path: if provided, the pca object is dumped to the provided path
:return pca: fitted pca object
'''
mean_vectors = mean_vectors_of_classes(hidden_reps, labels)
pca = sklearn.decomposition.PCA(n_components).fit(mean_vectors)
if verbose:
print("PCA (n_components= %i: relative variance explained:" % n_components, np.sum(pca.explained_variance_ratio_))
if dump_path:
joblib.dump(pca, dump_path)
return pca
```
#### File: DeepEpisodicMemory/core/Model.py
```python
import tensorflow as tf
import math
import numpy as np
from pprint import pprint
from settings import FLAGS, model
from models import loss_functions
import data_prep.model_input as input
class Model:
def __init__(self):
self.learning_rate = tf.placeholder_with_default(FLAGS.learning_rate, ())
self.iter_num = tf.placeholder_with_default(FLAGS.num_iterations, ())
self.summaries = []
self.noise_std = tf.placeholder_with_default(FLAGS.noise_std, ())
self.opt = tf.train.AdamOptimizer(self.learning_rate)
self.model_name = model.__file__
assert FLAGS.image_range_start + FLAGS.encoder_length + FLAGS.decoder_future_length <= FLAGS.overall_images_count and FLAGS.image_range_start >= 0, \
"settings for encoder/decoder lengths along with starting range exceed number of available images"
assert FLAGS.encoder_length >= FLAGS.decoder_reconst_length, "encoder must be at least as long as reconstructer"
def add_image_summary(self, summary_prefix, frames, encoder_length, decoder_future_length, decoder_reconst_length):
for i in range(decoder_future_length):
self.summaries.append(tf.summary.image(summary_prefix + '_future_gen_' + str(i + 1),
self.frames_pred[i], max_outputs=1))
self.summaries.append(tf.summary.image(summary_prefix + '_future_orig_' + str(i + 1),
frames[:, encoder_length + i, :, :, :], max_outputs=1))
for i in range(decoder_reconst_length):
self.summaries.append(tf.summary.image(summary_prefix + '_reconst_gen_' + str(i + 1),
self.frames_reconst[i], max_outputs=1))
self.summaries.append(tf.summary.image(summary_prefix + '_reconst_orig_' + str(i + 1),
frames[:, i, :, :, :], max_outputs=1))
class TrainModel(Model):
def __init__(self, summary_prefix, scope_name='train_model'):
print("Constructing TrainModel")
with tf.variable_scope(scope_name, reuse=None) as training_scope:
Model.__init__(self)
self.scope = training_scope
tower_grads = []
tower_losses = []
for i in range(FLAGS.num_gpus):
train_batch, _, _ = input.create_batch(FLAGS.tf_records_dir, 'train', FLAGS.batch_size,
int(math.ceil(
FLAGS.num_iterations / (FLAGS.batch_size * 20))),
False)
train_batch = tf.cast(train_batch, tf.float32)
with tf.device('/gpu:%d' % i):
with tf.name_scope('%s_%d' % ('tower', i)):
tower_loss, _, _, _ = tower_operations(train_batch[:,FLAGS.image_range_start:,:,:,:], train=True)
tower_losses.append(tower_loss)
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
if FLAGS.fine_tuning_weights_list is not None:
train_vars = []
for scope_i in FLAGS.fine_tuning_weights_list:
train_vars += tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope_i)
pprint('Finetuning. Training only specified weights: %s' % (FLAGS.fine_tuning_weights_list))
grads = self.opt.compute_gradients(tower_loss, var_list=train_vars)
else:
grads = self.opt.compute_gradients(tower_loss)
tower_grads.append(grads)
with tf.device('/cpu:0'):
#copmute average loss
self.loss = average_losses(tower_losses)
#compute average over gradients of all towers
grads = average_gradients(tower_grads)
# Apply the gradients to adjust the shared variables.
self.train_op= self.opt.apply_gradients(grads)
#measure batch time
self.elapsed_time = tf.placeholder(tf.float32, [])
self.summaries.append(tf.summary.scalar('batch_duration', self.elapsed_time))
self.summaries.append(tf.summary.scalar(summary_prefix + '_loss', self.loss))
self.sum_op = tf.summary.merge(self.summaries)
class ValidationModel(Model):
def __init__(self, summary_prefix, scope_name='valid_model', reuse_scope=None):
print("Constructing ValidationModel")
with tf.variable_scope(scope_name, reuse=None):
Model.__init__(self)
assert reuse_scope is not None
with tf.variable_scope(reuse_scope, reuse=True):
tower_losses, frames_pred_list, frames_reconst_list, hidden_repr_list, label_batch_list, metadata_batch_list, val_batch_list = [], [], [], [], [], [], []
for i in range(FLAGS.num_gpus):
val_batch, label_batch, metadata_batch = input.create_batch(FLAGS.tf_records_dir, 'valid', FLAGS.valid_batch_size, int(
math.ceil(FLAGS.num_iterations / (FLAGS.batch_size * 20))), False)
val_batch = tf.cast(val_batch, tf.float32)
self.val_batch = val_batch
with tf.device('/gpu:%d' % i):
with tf.name_scope('%s_%d' % ('tower', i)):
tower_loss, frames_pred, frames_reconst, hidden_repr = tower_operations(val_batch[:, FLAGS.image_range_start:, :, :, :], train=False)
tower_losses.append(tower_loss)
frames_pred_list.append(tf.pack(frames_pred))
frames_reconst_list.append(tf.pack(frames_reconst))
hidden_repr_list.append(hidden_repr)
val_batch_list.append(val_batch)
label_batch_list.append(label_batch)
metadata_batch_list.append(metadata_batch)
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
with tf.device('/cpu:0'):
# compute average loss
self.loss = average_losses(tower_losses)
# concatenate outputs of towers to one large tensor each
self.frames_pred = tf.unstack(tf.concat(1, frames_pred_list))
self.frames_reconst = tf.unstack(tf.concat(1, frames_reconst_list))
self.hidden_repr = tf.concat(0, hidden_repr_list)
self.label = tf.concat(0, label_batch_list)
self.metadata = tf.concat(0, metadata_batch_list)
val_set = tf.concat(0, val_batch_list)
self.add_image_summary(summary_prefix, val_set, FLAGS.encoder_length, FLAGS.decoder_future_length,
FLAGS.decoder_reconst_length)
# evaluate frame predictions for storing on disk
self.output_frames = self.frames_reconst + self.frames_pred # join arrays of tensors
self.summaries.append(tf.summary.scalar(summary_prefix + '_loss', self.loss))
self.sum_op = tf.summary.merge(self.summaries)
class FeedingValidationModel(Model):
def __init__(self, scope_name='feeding_model', reuse_scope=None):
print("Constructing FeedingModel")
with tf.variable_scope(scope_name, reuse=None):
Model.__init__(self)
assert reuse_scope is not None
with tf.variable_scope(reuse_scope, reuse=True):
"5D array of batch with videos - shape(batch_size, num_frames, frame_width, frame_higth, num_channels)"
self.feed_batch = tf.placeholder(tf.float32, shape=(1, FLAGS.encoder_length, FLAGS.height, FLAGS.width, FLAGS.num_channels), name='feed_batch')
self.frames_pred, self.frames_reconst, self.hidden_repr = \
tower_operations(self.feed_batch[:, FLAGS.image_range_start:, :, :, :], train=False, compute_loss=False)
def tower_operations(video_batch, train=True, compute_loss=True, use_vae_mu=True):
"""
Build the computation graph from input frame sequences till loss of batch
:param device number for assining queue runner to CPU
:param train: boolean that indicates whether train or validation mode
:param compute_loss: boolean that specifies whether loss should be computed (for feed mode / production compute_loss might be disabled)
:return batch loss (scalar)
"""
#only dropout in train mode
keep_prob_dropout = FLAGS.keep_prob_dopout if train else 1.0
mu = sigma = None, None
if FLAGS.loss_function == 'vae':
frames_pred, frames_reconst, hidden_repr, mu, sigma = model.composite_model(video_batch, FLAGS.encoder_length,
FLAGS.decoder_future_length,
FLAGS.decoder_reconst_length, keep_prob_dropout=keep_prob_dropout,
noise_std=FLAGS.noise_std, uniform_init=FLAGS.uniform_init,
num_channels=FLAGS.num_channels, fc_conv_layer=FLAGS.fc_layer)
if use_vae_mu:
hidden_repr = mu
else:
frames_pred, frames_reconst, hidden_repr = model.composite_model(video_batch, FLAGS.encoder_length,
FLAGS.decoder_future_length,
FLAGS.decoder_reconst_length,
keep_prob_dropout=keep_prob_dropout,
noise_std=FLAGS.noise_std,
uniform_init=FLAGS.uniform_init,
num_channels=FLAGS.num_channels,
fc_conv_layer=FLAGS.fc_layer)
if compute_loss:
tower_loss = loss_functions.composite_loss(video_batch, frames_pred, frames_reconst, loss_fun=FLAGS.loss_function,
encoder_length=FLAGS.encoder_length,
decoder_future_length=FLAGS.decoder_future_length,
decoder_reconst_length=FLAGS.decoder_reconst_length,
mu_latent=mu, sigm_latent=sigma)
return tower_loss, frames_pred, frames_reconst, hidden_repr
else:
return frames_pred, frames_reconst, hidden_repr
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(0, grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def average_losses(tower_losses):
"""Calculate the average loss among all towers
Args:
tower_losses: List of tf.Tensor skalars denoting the loss at each tower.
Returns:
loss: tf.Tensor skalar which is the mean over all losses
"""
losses = []
for l in tower_losses:
# Add 0 dimension to the gradients to represent the tower.
expanded_l = tf.expand_dims(l, 0)
# Append on a 'tower' dimension which we will average over below.
losses.append(expanded_l)
# Average over the 'tower' dimension.
loss = tf.concat(0, losses)
loss = tf.reduce_mean(loss, 0)
return loss
def create_model(mode=None, train_model_scope=None):
model = None
if mode is "train":
model = TrainModel('train', scope_name='train_model')
elif mode is 'valid':
assert train_model_scope is not None, "train_model_scope is None, valid mode requires a train scope"
model = ValidationModel('valid', reuse_scope=train_model_scope)
elif mode is 'feeding':
assert train_model_scope is not None, "train_model_scope is None, valid mode requires a train scope"
model = FeedingValidationModel(reuse_scope=train_model_scope)
assert model is not None
return model
```
#### File: DeepEpisodicMemory/core/production_op.py
```python
import tensorflow as tf
import numpy as np
from settings import FLAGS
from utils.io_handler import generate_batch_from_dir
def create_batch_and_feed(initializer, feeding_model):
# TODO
"""
:param initializer:
:param feed_model:
:return:
"""
assert FLAGS.pretrained_model
feed_batch = generate_batch_from_dir(FLAGS.feeding_input_dir, suffix='*.jpg')
print("feed batch has shape: " + str(feed_batch.shape))
hidden_repr = feed(feed_batch, initializer=initializer, feeding_model=feeding_model)
return np.array(np.squeeze(hidden_repr))
def feed(feed_batch, initializer, feeding_model):
'''
feeds the videos inherent feed_batch trough the network provided in feed_model
:param feed_batch: 5D Tensor (batch_size, num_frames, width, height, num_channels)
:param initializer:
:param feed_model:
:return:
'''
assert feeding_model is not None and initializer is not None
assert feed_batch.ndim == 5
tf.logging.info(' --- Starting feeding --- ')
feed_dict = {feeding_model.learning_rate: 0.0, feeding_model.feed_batch: feed_batch}
hidden_repr = initializer.sess.run([feeding_model.hidden_repr], feed_dict)
return hidden_repr
```
#### File: DeepEpisodicMemory/data_postp/scores.py
```python
import numpy as np
import pandas as pd
import sklearn
from data_postp import similarity_computations
from tensorflow.python.platform import flags
FLAGS = flags.FLAGS
def compute_mean_average_precision(df_base, df_query, n_closest_matches=5):
"""
This function computes the mean average precision (MAP) for a set of queries specified by df_query. The average precision
scores for each query are hereby computed based on the provided base instances specified in df_base. For each query,
the nearest neighbor instances within the base are determined and used to compute the precision.
:param df_base: the dataframe to be queried, must contain a 'hidden_repr' column that constitutes the hidden_representation vector
:param df_query: the dataframe from which to query, must contain a 'hidden_repr' column
:param n_closest_matches: number of closest matches to the query that goes into the precision score
:return: a scalar value representing the MAP
"""
assert not df_base.empty and not df_query.empty
df = get_query_matching_table(df_base=df_base, df_query=df_query, n_closest_matches=n_closest_matches)
df_pred_classes = df.filter(like="pred_class")
n_relevant_documents = len(df_pred_classes.columns)
matches = df_pred_classes.isin(df.true_class).as_matrix()
P = np.zeros(shape=matches.shape)
for k in range(1, n_relevant_documents):
P[:, k] = np.mean(matches[:, :k], axis=1)
return np.mean(np.multiply(P, matches))
def get_query_matching_table(df_base, df_query, class_column='category', n_closest_matches=5, df_true_label="true_class",
df_pred_label="pred_class_", df_query_label="category", df_query_id="id"):
"""
Yields a pandas dataframe in which each row contains the n_closest_matches as a result from querying the df for every single
hidden representation in the df dataframe. In addition, every row contains the true label and the query id.
:param df_base: the df to be queried
:param df_query: the df from which to query
:return: pandas dataframe with columns ("id", "true_label", "pred_class_i" for i=1,...,n_closest_matches) and
number of rows equal to df_query rows
"""
assert df_base is not None and df_query is not None
assert 'hidden_repr' in df_base.columns and class_column in df_base.columns
assert 'hidden_repr' in df_query.columns and df_query_label in df_query.columns and df_query_id in df_query.columns
columns = [[df_query_id + "{}".format(i), df_pred_label+"{}".format(i)] for i in range(1, n_closest_matches + 1)]
columns = [e for entry in columns for e in entry] # flatten list in list
columns[:0] = [df_query_id, df_true_label]
query_matching_df = pd.DataFrame(columns=columns)
query_matching_df.set_index(df_query_id, df_true_label)
for hidden_repr, label, id in zip(df_query['hidden_repr'], df_query[df_query_label], df_query[df_query_id]):
closest_vectors = similarity_computations.find_closest_vectors(df_base, hidden_repr=hidden_repr, class_column=class_column,
n_closest_matches=n_closest_matches)
matching_results = [[tpl[2], tpl[1]] for tpl in closest_vectors]
matching_results = sum(matching_results, []) # flatten
matching_results[:0] = [id, label]
row_data = dict(zip(columns, matching_results))
query_matching_df = query_matching_df.append(row_data, ignore_index=True)
#print(query_matching_df.head())
return query_matching_df
def main():
#2.5% valid_file="/common/homes/students/rothfuss/Documents/selected_trainings/6_actNet_20bn_mse/valid_run/metadata_and_hidden_rep_df_08-04-17_19-28-16_valid.pickle"
#3.6%
#valid_file="/PDFData/rothfuss/selected_trainings/7_20bn_mse/valid_run_backup/metadata_and_hidden_rep_df_07-26-17_16-52-09_valid.pickle"
#2.9% valid_file="/common/homes/students/rothfuss/Documents/selected_trainings/5_actNet_20bn_gdl/valid_run/metadata_and_hidden_rep_df_08-03-17_00-34-25_valid.pickle"
#0.014% valid_file="/common/homes/students/rothfuss/Documents/selected_trainings/9_20bn_vae_no_OF/08-06-18_10-21/metadata_and_hidden_rep_df_08-16-18_23-39-21.pickle"
#2.2% valid_file="/common/homes/students/rothfuss/Documents/selected_trainings/8_20bn_gdl_optical_flow/valid_run/metadata_and_hidden_rep_df_08-09-17_17-00-24_valid.pickle"
valid_file="/common/homes/students/rothfuss/Documents/selected_trainings/9_20bn_vae_no_OF/08-06-18_10-21/metadata_and_hidden_rep_df_08-25-18_15-50-39_mu.pickle"
df = pd.read_pickle(valid_file)
# create own train/test split
msk = np.random.rand(len(df)) < 0.8
test_df = df[~msk]
print("number of test samples: ", np.shape(test_df)[0])
train_df = df[msk]
#print(train_df.head())
print("number of train samples: ", np.shape(train_df)[0])
df, df_val = similarity_computations.transform_vectors_with_inter_class_pca(train_df, test_df, class_column='category', n_components=300)
print(compute_mean_average_precision(df, df_val, n_closest_matches=3))
#print(compute_mean_average_precision(train_df, test_df, n_closest_matches=3))
if __name__ == "__main__":
main()
```
#### File: DeepEpisodicMemory/data_prep/avi2tfrecords.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, math
from tensorflow.python.platform import gfile
from tensorflow.python.platform import flags
from tensorflow.python.platform import app
import cv2 as cv2
import numpy as np
import tensorflow as tf
FLAGS = None
FILE_FILTER = '*.avi'
NUM_FRAMES_PER_VIDEO = 15
NUM_CHANNELS_VIDEO = 4
WIDTH_VIDEO = 128
HEIGHT_VIDEO = 128
SOURCE = '/insert/source/here'
DESTINATION = '/insert/destination/here'
FLAGS = flags.FLAGS
flags.DEFINE_integer('num_videos', 1000, 'Number of videos stored in one single tfrecords file')
flags.DEFINE_string('image_color_depth', np.uint8, 'Color depth for the images stored in the tfrecords files. '
'Has to correspond to the source video color depth. '
'Specified as np dtype (e.g. ''np.uint8).')
flags.DEFINE_string('source', SOURCE, 'Directory with video files')
flags.DEFINE_string('output_path', DESTINATION, 'Directory for storing tf records')
flags.DEFINE_boolean('optical_flow', True, 'Indictes whether optical flow shall be computed and added as fourth '
'channel. Defaults to False')
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def get_chunks(l, n):
"""Yield successive n-sized chunks from l.
Used to create n sublists from a list l"""
for i in range(0, len(l), n):
yield l[i:i + n]
def getVideoCapture(path):
cap = None
if path:
cap = cv2.VideoCapture(path)
return cap
def getNextFrame(cap):
ret, frame = cap.read()
if ret == False:
return None
return np.asarray(frame)
def compute_dense_optical_flow(prev_image, current_image):
old_shape = current_image.shape
prev_image_gray = cv2.cvtColor(prev_image, cv2.COLOR_BGR2GRAY)
current_image_gray = cv2.cvtColor(current_image, cv2.COLOR_BGR2GRAY)
assert current_image.shape == old_shape
hsv = np.zeros_like(prev_image)
hsv[..., 1] = 255
flow = cv2.calcOpticalFlowFarneback(prev_image_gray, current_image_gray, 0.8, 15, 5, 10, 5, 1.5, 0)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang*180/np.pi/2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
def save_video_to_tfrecords(source_path, destination_path, videos_per_file=FLAGS.num_videos, video_filenames=None,
dense_optical_flow=False):
"""calls sub-functions convert_video_to_numpy and save_numpy_to_tfrecords in order to directly export tfrecords files
:param source_path: directory where video videos are stored
:param destination_path: directory where tfrecords should be stored
:param videos_per_file: specifies the number of videos within one tfrecords file
:param dense_optical_flow: boolean flag that controls if optical flow should be used and added to tfrecords
"""
global NUM_CHANNELS_VIDEO
assert (NUM_CHANNELS_VIDEO == 3 and (not dense_optical_flow)) or (NUM_CHANNELS_VIDEO == 4 and dense_optical_flow), "correct NUM_CHANNELS_VIDEO"
if video_filenames is not None:
filenames = video_filenames
else:
filenames = gfile.Glob(os.path.join(source_path, FILE_FILTER))
if not filenames:
raise RuntimeError('No data files found.')
print('Total videos found: ' + str(len(filenames)))
filenames_split = list(get_chunks(filenames, videos_per_file))
for i, batch in enumerate(filenames_split):
data = convert_video_to_numpy(batch, dense_optical_flow=dense_optical_flow)
total_batch_number = int(math.ceil(len(filenames)/videos_per_file))
print('Batch ' + str(i+1) + '/' + str(total_batch_number))
save_numpy_to_tfrecords(data, destination_path, 'train_blobs_batch_', videos_per_file, i+1,
total_batch_number)
def save_numpy_to_tfrecords(data, destination_path, name, fragmentSize, current_batch_number, total_batch_number):
"""Converts an entire dataset into x tfrecords where x=videos/fragmentSize.
:param data: ndarray(uint32) of shape (v,i,h,w,c) with v=number of videos, i=number of images, c=number of image
channels, h=image height, w=image width
:param name: filename; data samples type (train|valid|test)
:param fragmentSize: specifies how many videos are stored in one tfrecords file
:param current_batch_number: indicates the current batch index (function call within loop)
:param total_batch_number: indicates the total number of batches
"""
num_videos = data.shape[0]
num_images = data.shape[1]
num_channels = data.shape[4]
height = data.shape[2]
width = data.shape[3]
writer = None
feature = {}
for videoCount in range((num_videos)):
if videoCount % fragmentSize == 0:
if writer is not None:
writer.close()
filename = os.path.join(destination_path, name + str(current_batch_number) + '_of_' + str(total_batch_number) + '.tfrecords')
print('Writing', filename)
writer = tf.python_io.TFRecordWriter(filename)
for imageCount in range(num_images):
path = 'blob' + '/' + str(imageCount)
image = data[videoCount, imageCount, :, :, :]
image = image.astype(FLAGS.image_color_depth)
image_raw = image.tostring()
feature[path]= _bytes_feature(image_raw)
feature['height'] = _int64_feature(height)
feature['width'] = _int64_feature(width)
feature['depth'] = _int64_feature(num_channels)
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
if writer is not None:
writer.close()
def convert_video_to_numpy(filenames, dense_optical_flow=False):
"""Generates an ndarray from multiple video files given by filenames.
Implementation chooses frame step size automatically for a equal separation distribution of the video images.
:param filenames
:param type: processing type for video data
:return if no optical flow is used: ndarray(uint32) of shape (v,i,h,w,c) with v=number of videos, i=number of images,
(h,w)=height and width of image, c=channel, if optical flow is used: ndarray(uint32) of (v,i,h,w,
c+1)"""
global NUM_CHANNELS_VIDEO
if not filenames:
raise RuntimeError('No data files found.')
number_of_videos = len(filenames)
if dense_optical_flow:
# need an additional channel for the optical flow with one exception:
global NUM_CHANNELS_VIDEO
NUM_CHANNELS_VIDEO = 4
num_real_image_channel = 3
else:
# if no optical flow, make everything normal:
num_real_image_channel = NUM_CHANNELS_VIDEO
data = []
def video_file_to_ndarray(i, filename):
image = np.zeros((HEIGHT_VIDEO, WIDTH_VIDEO, num_real_image_channel), dtype=FLAGS.image_color_depth)
video = np.zeros((NUM_FRAMES_PER_VIDEO, HEIGHT_VIDEO, WIDTH_VIDEO, NUM_CHANNELS_VIDEO), dtype=np.uint32)
imagePrev = None
assert os.path.isfile(filename), "Couldn't find video file"
cap = getVideoCapture(filename)
assert cap is not None, "Couldn't load video capture:" + filename + ". Moving to next video."
# compute meta data of video
frameCount = cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
# returns nan, if fps needed a measurement must be implemented
# frameRate = cap.get(cv2.cv.CV_CAP_PROP_FPS)
steps = math.floor(frameCount / NUM_FRAMES_PER_VIDEO)
j = 0
prev_frame_none = False
restart = True
assert not (frameCount < 1 or steps < 1), str(filename) + " does not have enough frames. Moving to next video."
while restart:
for f in range(int(frameCount)):
# get next frame after 'steps' iterations:
# floor used after modulo operation because rounding module before leads to
# unhandy partition of data (big gab in the end)
if math.floor(f % steps) == 0:
frame = getNextFrame(cap)
# special case handling: opencv's frame count != real frame count, reiterate over same video
if frame is None and j < NUM_FRAMES_PER_VIDEO:
if frame and prev_frame_none: break
prev_frame_none = True
# repeat with smaller step size
steps -= 1
if steps == 0: break
print("reducing step size due to error")
j = 0
cap.release()
cap = getVideoCapture(filenames[i])
# wait for image retrieval to be ready
cv2.waitKey(3000)
video.fill(0)
continue
else:
if j >= NUM_FRAMES_PER_VIDEO:
restart = False
break
# iterate over channels
if frame.ndim == 2:
# cv returns 2 dim array if gray
resizedImage = cv2.resize(frame[:, :], (HEIGHT_VIDEO, WIDTH_VIDEO))
else:
for k in range(num_real_image_channel):
resizedImage = cv2.resize(frame[:, :, k], (HEIGHT_VIDEO, WIDTH_VIDEO))
image[:, :, k] = resizedImage
if dense_optical_flow:
# optical flow requires at least two images
if imagePrev is not None:
frameFlow = np.zeros((HEIGHT_VIDEO, WIDTH_VIDEO))
frameFlow = compute_dense_optical_flow(imagePrev, image)
frameFlow = cv2.cvtColor(frameFlow, cv2.COLOR_BGR2GRAY)
else:
frameFlow = np.zeros((HEIGHT_VIDEO, WIDTH_VIDEO))
imagePrev = image.copy()
if dense_optical_flow:
image_with_flow = image.copy()
image_with_flow = np.concatenate((image_with_flow, np.expand_dims(frameFlow, axis=2)), axis=2)
video[j, :, :, :] = image_with_flow
else:
video[j, :, :, :] = image
j += 1
# print('total frames: ' + str(j) + " frame in video: " + str(f))
else:
getNextFrame(cap)
print(str(i + 1) + " of " + str(number_of_videos) + " videos processed", filenames[i])
v = video.copy()
cap.release()
return v
for i, file in enumerate(filenames):
try:
v = video_file_to_ndarray(i, file)
data.append(v)
except Exception as e:
print(e)
return np.array(data)
def main(argv):
save_video_to_tfrecords(FLAGS.source, FLAGS.output_path, FLAGS.num_videos, dense_optical_flow=FLAGS.optical_flow)
if __name__ == '__main__':
app.run()
```
#### File: DeepEpisodicMemory/models/loss_functions.py
```python
import tensorflow as tf
import numpy as np
def gradient_difference_loss(true, pred, alpha=2.0):
"""
computes gradient difference loss of two images
:param ground truth image: Tensor of shape (batch_size, frame_height, frame_width, num_channels)
:param predicted image: Tensor of shape (batch_size, frame_height, frame_width, num_channels)
:param alpha parameter of the used l-norm
"""
#tf.assert_equal(tf.shape(true), tf.shape(pred))
# vertical
true_pred_diff_vert = tf.pow(tf.abs(difference_gradient(true, vertical=True) - difference_gradient(pred, vertical=True)), alpha)
# horizontal
true_pred_diff_hor = tf.pow(tf.abs(difference_gradient(true, vertical=False) - difference_gradient(pred, vertical=False)), alpha)
# normalization over all dimensions
return (tf.reduce_mean(true_pred_diff_vert) + tf.reduce_mean(true_pred_diff_hor)) / tf.to_float(2)
def difference_gradient(image, vertical=True):
"""
:param image: Tensor of shape (batch_size, frame_height, frame_width, num_channels)
:param vertical: boolean that indicates whether vertical or horizontal pixel gradient shall be computed
:return: difference_gradient -> Tenor of shape (:, frame_height-1, frame_width, :) if vertical and (:, frame_height, frame_width-1, :) else
"""
s = tf.shape(image)
if vertical:
return tf.abs(image[:, 0:s[1] - 1, :, :] - image[:, 1:s[1], :, :])
else:
return tf.abs(image[:, :, 0:s[2]-1,:] - image[:, :, 1:s[2], :])
def mean_squared_error(true, pred):
"""L2 distance between tensors true and pred.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
mean squared error between ground truth and predicted image.
"""
return tf.reduce_sum(tf.square(true - pred)) / tf.to_float(tf.size(pred))
def peak_signal_to_noise_ratio(true, pred):
"""Image quality metric based on maximal signal power vs. power of the noise.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
peak signal to noise ratio (PSNR)
"""
return 10.0 * tf.log(1.0 / mean_squared_error(true, pred)) / tf.log(10.0)
def kl_penalty(mu, sigma):
x = tf.square(mu) + tf.square(sigma) - tf.log(1e-8 + tf.square(sigma)) - 1
return 0.5 * tf.reduce_sum(x)
def decoder_loss(frames_gen, frames_original, loss_fun):
"""Sum of parwise loss between frames of frames_gen and frames_original
Args:
frames_gen: array of length=sequence_length of Tensors with each having the shape=(batch size, frame_height, frame_width, num_channels)
frames_original: Tensor with shape=(batch size, sequence_length, frame_height, frame_width, num_channels)
loss_fun: loss function type ['mse',...]
Returns:
loss: sum (specified) loss between ground truth and predicted frames of provided sequence.
"""
loss = 0.0
if loss_fun == 'mse' or loss_fun == 'vae':
for i in range(len(frames_gen)):
loss += mean_squared_error(frames_original[:, i, :, :, :], frames_gen[i])
elif loss_fun == 'gdl':
for i in range(len(frames_gen)):
loss += gradient_difference_loss(frames_original[:, i, :, :, :], frames_gen[i])
elif loss_fun == 'mse_gdl':
for i in range(len(frames_gen)):
loss += 0.4 * gradient_difference_loss(frames_original[:, i, :, :, :], frames_gen[i]) + 0.6 * mean_squared_error(frames_original[:, i, :, :, :], frames_gen[i])
else:
raise Exception('Unknown loss funcion type')
return loss
def decoder_psnr(frames_gen, frames_original):
"""Sum of peak_signal_to_noise_ratio loss between frames of frames_gen and frames_original
Args:
frames_gen: array of length=sequence_length of Tensors with each having the shape=(batch size, frame_height, frame_width, num_channels)
frames_original: Tensor with shape=(batch size, sequence_length, frame_height, frame_width, num_channels)
loss_fun: loss function type ['mse',...]
Returns:
loss: sum of mean squared error between ground truth and predicted frames of provided sequence.
"""
psnr = 0.0
for i in range(len(frames_gen)):
psnr += peak_signal_to_noise_ratio(frames_original[:, i, :, :, :], frames_gen[i])
return psnr
def composite_loss(original_frames, frames_pred, frames_reconst, loss_fun='mse',
encoder_length=5, decoder_future_length=5,
decoder_reconst_length=5, mu_latent=None, sigm_latent=None):
assert encoder_length <= decoder_reconst_length
frames_original_future = original_frames[:, (encoder_length):(encoder_length + decoder_future_length), :, :, :]
frames_original_reconst = original_frames[:, (encoder_length - decoder_reconst_length):encoder_length, :, :, :]
pred_loss = decoder_loss(frames_pred, frames_original_future, loss_fun)
reconst_loss = decoder_loss(frames_reconst, frames_original_reconst, loss_fun)
if loss_fun == 'vae':
assert mu_latent is not None and mu_latent is not None
loss = pred_loss + reconst_loss + kl_penalty(tf.squeeze(mu_latent), tf.squeeze(sigm_latent))
else:
loss = pred_loss + reconst_loss
return loss
```
#### File: models/model_zoo/model_conv5_fc_lstm2_1000_deep_64_vae.py
```python
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.layers.python import layers as tf_layers
from models.conv_lstm import basic_conv_lstm_cell, conv_lstm_cell_no_input
# Amount to use when lower bounding tensors
RELU_SHIFT = 1e-12
FC_LAYER_SIZE = 1000
FC_LSTM_LAYER_SIZE = 1000
VAE_REPR_SIZE = 1000
# kernel size for DNA and CDNA.
DNA_KERN_SIZE = 5
def encoder_model(frames, sequence_length, initializer, keep_prob_dropout=0.9, scope='encoder', fc_conv_layer=False):
"""
Args:
frames: 5D array of batch with videos - shape(batch_size, num_frames, frame_width, frame_higth, num_channels)
sequence_length: number of frames that shall be encoded
scope: tensorflow variable scope name
initializer: specifies the initialization type (default: contrib.slim.layers uses Xavier init with uniform data)
fc_conv_layer: adds an fc layer at the end of the encoder
Returns:
hidden4: hidden state of highest ConvLSTM layer
fc_conv_layer: indicated whether a Fully Convolutional (8x8x16 -> 1x1x1024) shall be added
"""
lstm_state1, lstm_state2, lstm_state3, lstm_state4, lstm_state5, lstm_state6 = None, None, None, None, None, None
for i in range(sequence_length):
frame = frames[:,i,:,:,:]
reuse = (i > 0)
with tf.variable_scope(scope, reuse=reuse):
#LAYER 1: conv1
conv1 = slim.layers.conv2d(frame, 32, [5, 5], stride=2, scope='conv1', normalizer_fn=tf_layers.layer_norm, weights_initializer=initializer,
normalizer_params={'scope': 'layer_norm1'})
conv1 = tf.nn.dropout(conv1, keep_prob_dropout)
#LAYER 2: convLSTM1
hidden1, lstm_state1 = basic_conv_lstm_cell(conv1, lstm_state1, 32, initializer, filter_size=5, scope='convlstm1')
hidden1 = tf_layers.layer_norm(hidden1, scope='layer_norm2')
hidden1 = tf.nn.dropout(hidden1, keep_prob_dropout)
#LAYER 3: conv2
conv2 = slim.layers.conv2d(hidden1, hidden1.get_shape()[3], [5, 5], stride=2, scope='conv2', normalizer_fn=tf_layers.layer_norm, weights_initializer=initializer,
normalizer_params={'scope': 'layer_norm3'})
conv2 = tf.nn.dropout(conv2, keep_prob_dropout)
#LAYER 4: convLSTM2
hidden2, lstm_state2 = basic_conv_lstm_cell(conv2, lstm_state2, 32, initializer, filter_size=5, scope='convlstm2')
hidden2 = tf_layers.layer_norm(hidden2, scope='layer_norm4')
hidden2 = tf.nn.dropout(hidden2, keep_prob_dropout)
#LAYER 5: conv3
conv3 = slim.layers.conv2d(hidden2, hidden2.get_shape()[3], [5, 5], stride=2, scope='conv3', normalizer_fn=tf_layers.layer_norm, weights_initializer=initializer,
normalizer_params={'scope': 'layer_norm5'})
conv3 = tf.nn.dropout(conv3, keep_prob_dropout)
#LAYER 6: convLSTM3
hidden3, lstm_state3 = basic_conv_lstm_cell(conv3, lstm_state3, 32, initializer, filter_size=3, scope='convlstm3')
hidden3 = tf_layers.layer_norm(hidden3, scope='layer_norm6')
hidden3 = tf.nn.dropout(hidden3, keep_prob_dropout)
#LAYER 7: conv4
conv4 = slim.layers.conv2d(hidden3, hidden3.get_shape()[3], [3, 3], stride=2, scope='conv4', normalizer_fn=tf_layers.layer_norm, weights_initializer=initializer,
normalizer_params={'scope': 'layer_norm7'})
conv4 = tf.nn.dropout(conv4, keep_prob_dropout)
#LAYER 8: convLSTM4 (8x8 feature map size)
hidden4, lstm_state4 = basic_conv_lstm_cell(conv4, lstm_state4, 64, initializer, filter_size=3, scope='convlstm4')
hidden4 = tf_layers.layer_norm(hidden4, scope='layer_norm8')
hidden4 = tf.nn.dropout(hidden4, keep_prob_dropout)
#LAYER 8: conv5
conv5 = slim.layers.conv2d(hidden4, hidden4.get_shape()[3], [3, 3], stride=2, scope='conv5', normalizer_fn=tf_layers.layer_norm, weights_initializer=initializer,
normalizer_params={'scope': 'layer_norm9'})
conv5 = tf.nn.dropout(conv5, keep_prob_dropout)
# LAYER 9: convLSTM5 (4x4 feature map size)
hidden5, lstm_state5 = basic_conv_lstm_cell(conv5, lstm_state5, 64, initializer, filter_size=3, scope='convlstm5')
hidden5 = tf_layers.layer_norm(hidden5, scope='layer_norm10')
hidden5 = tf.nn.dropout(hidden5, keep_prob_dropout)
# LAYER 10: Fully Convolutional Layer (4x4x128 --> 1x1xFC_LAYER_SIZE)
# necessary for dimension compatibility with conv lstm cell
fc_conv = slim.layers.conv2d(hidden5, FC_LAYER_SIZE, [4,4], stride=1, scope='fc_conv', padding='VALID', weights_initializer=initializer)
fc_conv = tf.nn.dropout(fc_conv, keep_prob_dropout)
# LAYER 11: Fully Convolutional LSTM (1x1x256 -> 1x1x128)
hidden6, lstm_state6 = basic_conv_lstm_cell(fc_conv, lstm_state6, FC_LSTM_LAYER_SIZE, initializer, filter_size=1, scope='convlstm6')
#no dropout since its the last encoder layer --> hidden repr should be steady
# mu and sigma for sampling latent variable
sigma = slim.layers.fully_connected(inputs=lstm_state6, num_outputs=VAE_REPR_SIZE, activation_fn=tf.nn.softplus)
mu = slim.layers.fully_connected(inputs=lstm_state6, num_outputs=VAE_REPR_SIZE, activation_fn=None)
# do reparamazerization trick to allow backprop flow through deterministic nodes sigma and mu
z = mu + sigma * tf.random_normal(tf.shape(mu), mean=0., stddev=1.)
return z, mu, sigma
def decoder_model(hidden_repr, sequence_length, initializer, num_channels=3, keep_prob_dropout=0.9, scope='decoder', fc_conv_layer=False):
"""
Args:
hidden_repr: Tensor of latent space representation
sequence_length: number of frames that shall be decoded from the hidden_repr
num_channels: number of channels for generated frames
initializer: specifies the initialization type (default: contrib.slim.layers uses Xavier init with uniform data)
fc_conv_layer: adds an fc layer at the end of the encoder
Returns:
frame_gen: array of generated frames (Tensors)
fc_conv_layer: indicates whether hidden_repr is 1x1xdepth tensor a and fully concolutional layer shall be added
"""
frame_gen = []
lstm_state1, lstm_state2, lstm_state3, lstm_state4, lstm_state5, lstm_state0 = None, None, None, None, None, None
assert (not fc_conv_layer) or (hidden_repr.get_shape()[1] == hidden_repr.get_shape()[2] == 1)
for i in range(sequence_length):
reuse = (i > 0) #reuse variables (recurrence) after first time step
with tf.variable_scope(scope, reuse=reuse):
hidden0 = tf.nn.dropout(hidden_repr, keep_prob_dropout)
fc_conv = slim.layers.conv2d_transpose(hidden0, 64, [4, 4], stride=1, scope='fc_conv', padding='VALID', weights_initializer=initializer)
fc_conv = tf.nn.dropout(fc_conv, keep_prob_dropout)
#LAYER 1: convLSTM1
hidden1, lstm_state1 = basic_conv_lstm_cell(fc_conv, lstm_state1, 64, initializer, filter_size=3, scope='convlstm1')
hidden1 = tf_layers.layer_norm(hidden1, scope='layer_norm1')
hidden1 = tf.nn.dropout(hidden1, keep_prob_dropout)
#LAYER 2: upconv1 (8x8 -> 16x16)
upconv1 = slim.layers.conv2d_transpose(hidden1, hidden1.get_shape()[3], 3, stride=2, scope='upconv1', weights_initializer=initializer,
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm2'})
upconv1 = tf.nn.dropout(upconv1, keep_prob_dropout)
#LAYER 3: convLSTM2
hidden2, lstm_state2 = basic_conv_lstm_cell(upconv1, lstm_state2, 64, initializer, filter_size=3, scope='convlstm2')
hidden2 = tf_layers.layer_norm(hidden2, scope='layer_norm3')
hidden2 = tf.nn.dropout(hidden2, keep_prob_dropout)
#LAYER 4: upconv2 (16x16 -> 32x32)
upconv2 = slim.layers.conv2d_transpose(hidden2, hidden2.get_shape()[3], 3, stride=2, scope='upconv2', weights_initializer=initializer,
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm4'})
upconv2 = tf.nn.dropout(upconv2, keep_prob_dropout)
#LAYER 5: convLSTM3
hidden3, lstm_state3 = basic_conv_lstm_cell(upconv2, lstm_state3, 32, initializer, filter_size=3, scope='convlstm3')
hidden3 = tf_layers.layer_norm(hidden3, scope='layer_norm5')
hidden3 = tf.nn.dropout(hidden3, keep_prob_dropout)
# LAYER 6: upconv3 (32x32 -> 64x64)
upconv3 = slim.layers.conv2d_transpose(hidden3, hidden3.get_shape()[3], 5, stride=2, scope='upconv3', weights_initializer=initializer,
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm6'})
upconv3 = tf.nn.dropout(upconv3, keep_prob_dropout)
#LAYER 7: convLSTM4
hidden4, lstm_state4 = basic_conv_lstm_cell(upconv3, lstm_state4, 32, initializer, filter_size=5, scope='convlstm4')
hidden4 = tf_layers.layer_norm(hidden4, scope='layer_norm7')
hidden4 = tf.nn.dropout(hidden4, keep_prob_dropout)
#Layer 8: upconv4 (64x64 -> 128x128)
upconv4 = slim.layers.conv2d_transpose(hidden4, hidden4.get_shape()[3], 5, stride=2, scope='upconv4', normalizer_fn=tf_layers.layer_norm, weights_initializer=initializer, normalizer_params={'scope': 'layer_norm8'})
upconv4 = tf.nn.dropout(upconv4, keep_prob_dropout)
#LAYER 9: convLSTM5
hidden5, lstm_state5 = basic_conv_lstm_cell(upconv4, lstm_state5, 32, initializer, filter_size=5, scope='convlstm5')
hidden5 = tf_layers.layer_norm(hidden5, scope='layer_norm9')
hidden5 = tf.nn.dropout(hidden5, keep_prob_dropout)
upconv5 = slim.layers.conv2d_transpose(hidden5, num_channels, 5, stride=2, scope='upconv5', weights_initializer=initializer)
# no dropout since this layer finally generates the output image
frame_gen.append(upconv5)
assert len(frame_gen)==sequence_length
return frame_gen
def composite_model(frames, encoder_len=5, decoder_future_len=5, decoder_reconst_len=5, noise_std=0.0,
uniform_init=True, num_channels=3, keep_prob_dropout=0.9, fc_conv_layer=True):
"""
Args:
frames: 5D array of batch with videos - shape(batch_size, num_frames, frame_width, frame_higth, num_channels)
encoder_len: number of frames that shall be encoded
decoder_future_sequence_length: number of frames that shall be decoded from the hidden_repr
noise_std: standard deviation of the gaussian noise to be added to the hidden representation
uniform_init: specifies if the weight initialization should be drawn from gaussian or uniform distribution (default:uniform)
num_channels: number of channels for generated frames
fc_conv_layer: indicates whether fully connected layer shall be added between encoder and decoder
Returns:
frame_gen: array of generated frames (Tensors)
"""
assert all([len > 0 for len in [encoder_len, decoder_future_len, decoder_reconst_len]])
initializer = tf_layers.xavier_initializer(uniform=uniform_init)
hidden_repr, mu, sigma = encoder_model(frames, encoder_len, initializer, fc_conv_layer=fc_conv_layer)
# add noise
if noise_std != 0.0:
hidden_repr = hidden_repr + tf.random_normal(shape=hidden_repr.get_shape(), mean=0.0, stddev=noise_std,
dtype=tf.float32)
frames_pred = decoder_model(hidden_repr, decoder_future_len, initializer, num_channels=num_channels, keep_prob_dropout=keep_prob_dropout,
scope='decoder_pred', fc_conv_layer=fc_conv_layer)
frames_reconst = decoder_model(hidden_repr, decoder_reconst_len, initializer, num_channels=num_channels, keep_prob_dropout=keep_prob_dropout,
scope='decoder_reconst', fc_conv_layer=fc_conv_layer)
return frames_pred, frames_reconst, hidden_repr, mu, sigma
```
#### File: DeepEpisodicMemory/utils/augmentation.py
```python
import itertools
import os
import subprocess
import utils.io_handler as io
AVI_SOURCE_DIR = '/data/rothfuss/data/ArmarExperiences/videos/memory'
TARGET_DIR = '/data/rothfuss/data/ArmarExperiences/videos/memory_augmented'
GAMMA_VALS = [0.7, 1.0]
BRIGHTNESS_VALS = [-0.1, 0.0, 0.2]
SATURATION = [0.6, 1.0]
avi_files = io.file_paths_from_directory(AVI_SOURCE_DIR, '*.avi')
def augment_video(video_path, target_dir, gamma, brightness, saturation, i):
video_id = os.path.basename(video_path).split('.')[0]
new_video_path = os.path.join(target_dir, video_id + '_%i.avi'%i)
ffmpeg_str = "/common/homes/students/rothfuss/Documents/ffmpeg/bin/ffmpeg -i %s -vf eq=gamma=%.1f:brightness=%.1f:saturation=%.1f -c:a copy %s"%(video_path, gamma, brightness, saturation, new_video_path)
print(ffmpeg_str)
subprocess.Popen(ffmpeg_str, shell=True)
for file in avi_files:
for i, (gamma, brightness, saturation) in enumerate(itertools.product(GAMMA_VALS, BRIGHTNESS_VALS, SATURATION)):
try:
augment_video(file, TARGET_DIR, gamma, brightness, saturation, i)
print("%i: "%i + "Sucessfully augmented " + file)
except Exception as e:
print("%i: "%i + "Could not augment " + file + " --- " + str(e))
``` |
{
"source": "jonasrothfuss/f-pacoh-torch",
"score": 2
} |
#### File: meta_bo/models/models.py
```python
import torch
import gpytorch
import math
from collections import OrderedDict
from config import device
from meta_bo.models.util import find_root_by_bounding
""" ----------------------------------------------------"""
""" ------------ Probability Distributions ------------ """
""" ----------------------------------------------------"""
from torch.distributions import Distribution
from torch.distributions import TransformedDistribution, AffineTransform
class AffineTransformedDistribution(TransformedDistribution):
r"""
Implements an affine transformation of a probability distribution p(x)
x_transformed = mean + std * x , x \sim p(x)
Args:
base_dist: (torch.distributions.Distribution) probability distribution to transform
normalization_mean: (np.ndarray) additive factor to add to x
normalization_std: (np.ndarray) multiplicative factor for scaling x
"""
def __init__(self, base_dist, normalization_mean, normalization_std):
self.loc_tensor = torch.tensor(normalization_mean).float().reshape((1,)).to(device)
self.scale_tensor = torch.tensor(normalization_std).float().reshape((1,)).to(device)
normalization_transform = AffineTransform(loc=self.loc_tensor, scale=self.scale_tensor)
super().__init__(base_dist, normalization_transform)
@property
def mean(self):
return self.transforms[0](self.base_dist.mean)
@property
def stddev(self):
return torch.exp(torch.log(self.base_dist.stddev) + torch.log(self.scale_tensor))
@property
def variance(self):
return torch.exp(torch.log(self.base_dist.variance) + 2 * torch.log(self.scale_tensor))
class UnnormalizedExpDist(Distribution):
r"""
Creates a an unnormalized distribution with density function with
density proportional to exp(exponent_fn(value))
Args:
exponent_fn: callable that outputs the exponent
"""
def __init__(self, exponent_fn):
self.exponent_fn = exponent_fn
super().__init__()
@property
def arg_constraints(self):
return {}
def log_prob(self, value):
return self.exponent_fn(value)
class FactorizedNormal(Distribution):
def __init__(self, loc, scale, summation_axis=-1):
self.normal_dist = torch.distributions.Normal(loc, scale)
self.summation_axis = summation_axis
def log_prob(self, value):
return torch.sum(self.normal_dist.log_prob(value), dim=self.summation_axis)
class EqualWeightedMixtureDist(Distribution):
def __init__(self, dists, batched=False, num_dists=None):
self.batched = batched
if batched:
assert isinstance(dists, torch.distributions.Distribution)
self.num_dists = dists.batch_shape if num_dists is None else num_dists
event_shape = dists.event_shape
else:
assert all([isinstance(d, torch.distributions.Distribution) for d in dists])
event_shape = dists[0].event_shape
self.num_dists = len(dists)
self.dists = dists
super().__init__(event_shape=event_shape)
@property
def mean(self):
if self.batched:
return torch.mean(self.dists.mean, dim=0)
else:
return torch.mean(torch.stack([dist.mean for dist in self.dists], dim=0), dim=0)
@property
def stddev(self):
return torch.sqrt(self.variance)
@property
def variance(self):
if self.batched:
means = self.dists.mean
vars = self.dists.variance
else:
means = torch.stack([dist.mean for dist in self.dists], dim=0)
vars = torch.stack([dist.variance for dist in self.dists], dim=0)
var1 = torch.mean((means - torch.mean(means, dim=0))**2, dim=0)
var2 = torch.mean(vars, dim=0)
# check shape
assert var1.shape == var2.shape
return var1 + var2
@property
def arg_constraints(self):
return {}
def log_prob(self, value):
if self.batched:
log_probs_dists = self.dists.log_prob(value)
else:
log_probs_dists = torch.stack([dist.log_prob(value) for dist in self.dists])
return torch.logsumexp(log_probs_dists, dim=0) - torch.log(torch.tensor(self.num_dists).float())
def cdf(self, value):
if self.batched:
cum_p = self.dists.cdf(value)
else:
cum_p = torch.stack([dist.cdf(value) for dist in self.dists])
assert cum_p.shape[0] == self.num_dists
return torch.mean(cum_p, dim=0)
def icdf(self, quantile):
left = - 1e8 * torch.ones(quantile.shape)
right = + 1e8 * torch.ones(quantile.shape)
fun = lambda x: self.cdf(x) - quantile
return find_root_by_bounding(fun, left, right)
class CatDist(Distribution):
def __init__(self, dists, reduce_event_dim=True):
assert all([len(dist.event_shape) == 1 for dist in dists])
assert all([len(dist.batch_shape) == 0 for dist in dists])
self.reduce_event_dim = reduce_event_dim
self.dists = dists
self._event_shape = torch.Size((sum([dist.event_shape[0] for dist in self.dists]),))
def sample(self, sample_shape=torch.Size()):
return self._sample(sample_shape, sample_fn='sample')
def rsample(self, sample_shape=torch.Size()):
return self._sample(sample_shape, sample_fn='rsample')
def log_prob(self, value):
idx = 0
log_probs = []
for dist in self.dists:
n = dist.event_shape[0]
if value.ndim == 1:
val = value[idx:idx+n]
elif value.ndim == 2:
val = value[:, idx:idx + n]
elif value.ndim == 2:
val = value[:, :, idx:idx + n]
else:
raise NotImplementedError('Can only handle values up to 3 dimensions')
log_probs.append(dist.log_prob(val))
idx += n
for i in range(len(log_probs)):
if log_probs[i].ndim == 0:
log_probs[i] = log_probs[i].reshape((1,))
if self.reduce_event_dim:
return torch.sum(torch.stack(log_probs, dim=0), dim=0)
return torch.stack(log_probs, dim=0)
def _sample(self, sample_shape, sample_fn='sample'):
return torch.cat([getattr(d, sample_fn)(sample_shape) for d in self.dists], dim=-1)
""" ----------------------------------------------------"""
""" ------------------ Neural Network ------------------"""
""" ----------------------------------------------------"""
class NeuralNetwork(torch.nn.Sequential):
"""Trainable neural network kernel function for GPs."""
def __init__(self, input_dim=2, output_dim=2, layer_sizes=(64, 64), nonlinearlity=torch.tanh,
weight_norm=False, prefix='',):
super(NeuralNetwork, self).__init__()
self.nonlinearlity = nonlinearlity
self.n_layers = len(layer_sizes)
self.prefix = prefix
if weight_norm:
_normalize = torch.nn.utils.weight_norm
else:
_normalize = lambda x: x
self.layers = []
prev_size = input_dim
for i, size in enumerate(layer_sizes):
setattr(self, self.prefix + 'fc_%i'%(i+1), _normalize(torch.nn.Linear(prev_size, size)))
prev_size = size
setattr(self, self.prefix + 'out', _normalize(torch.nn.Linear(prev_size, output_dim)))
def forward(self, x):
output = x
for i in range(1, self.n_layers+1):
output = getattr(self, self.prefix + 'fc_%i'%i)(output)
output = self.nonlinearlity(output)
output = getattr(self, self.prefix + 'out')(output)
return output
def forward_parametrized(self, x, params):
output = x
param_idx = 0
for i in range(1, self.n_layers + 1):
output = F.linear(output, params[param_idx], params[param_idx+1])
output = self.nonlinearlity(output)
param_idx += 2
output = F.linear(output, params[param_idx], params[param_idx+1])
return output
""" ----------------------------------------------------"""
""" ------------ Vectorized Neural Network -------------"""
""" ----------------------------------------------------"""
import torch.nn as nn
import torch.nn.functional as F
class VectorizedModel:
def __init__(self, input_dim, output_dim):
self.input_dim = input_dim
self.output_dim = output_dim
def parameter_shapes(self):
raise NotImplementedError
def named_parameters(self):
raise NotImplementedError
def parameters(self):
return list(self.named_parameters().values())
def set_parameter(self, name, value):
if len(name.split('.')) == 1:
setattr(self, name, value)
else:
remaining_name = ".".join(name.split('.')[1:])
getattr(self, name.split('.')[0]).set_parameter(remaining_name, value)
def set_parameters(self, param_dict):
for name, value in param_dict.items():
self.set_parameter(name, value)
def parameters_as_vector(self):
return torch.cat(self.parameters(), dim=-1)
def set_parameters_as_vector(self, value):
idx = 0
for name, shape in self.parameter_shapes().items():
idx_next = idx + shape[-1]
if value.ndim == 1:
self.set_parameter(name, value[idx:idx_next])
elif value.ndim == 2:
self.set_parameter(name, value[:, idx:idx_next])
else:
raise AssertionError
idx = idx_next
assert idx_next == value.shape[-1]
class LinearVectorized(VectorizedModel):
def __init__(self, input_dim, output_dim):
super().__init__(input_dim, output_dim)
self.weight = torch.normal(0, 1, size=(input_dim * output_dim,), device=device, requires_grad=True)
self.bias = torch.zeros(output_dim, device=device, requires_grad=True)
self.reset_parameters()
def reset_parameters(self):
self.weight = _kaiming_uniform_batched(self.weight, fan=self.input_dim, a=math.sqrt(5), nonlinearity='tanh')
if self.bias is not None:
fan_in = self.output_dim
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, x):
if self.weight.ndim == 2 or self.weight.ndim == 3:
model_batch_size = self.weight.shape[0]
# batched computation
if self.weight.ndim == 3:
assert self.weight.shape[-2] == 1 and self.bias.shape[-2] == 1
W = self.weight.view(model_batch_size, self.output_dim, self.input_dim)
b = self.bias.view(model_batch_size, self.output_dim)
if x.ndim == 2:
# introduce new dimension 0
x = torch.reshape(x, (1, x.shape[0], x.shape[1]))
# tile dimension 0 to model_batch size
x = x.repeat(model_batch_size, 1, 1)
else:
assert x.ndim == 3 and x.shape[0] == model_batch_size
# out dimensions correspond to [nn_batch_size, data_batch_size, out_features)
return torch.bmm(x, W.permute(0, 2, 1)) + b[:, None, :]
elif self.weight.ndim == 1:
return F.linear(x, self.weight.view(self.output_dim, self.input_dim), self.bias)
else:
raise NotImplementedError
def parameter_shapes(self):
return OrderedDict(bias=self.bias.shape, weight=self.weight.shape)
def named_parameters(self):
return OrderedDict(bias=self.bias, weight=self.weight)
def __call__(self, *args, **kwargs):
return self.forward( *args, **kwargs)
class NeuralNetworkVectorized(VectorizedModel):
"""Trainable neural network that batches multiple sets of parameters. That is, each
"""
def __init__(self, input_dim, output_dim, layer_sizes=(64, 64), nonlinearlity=torch.tanh):
super().__init__(input_dim, output_dim)
self.nonlinearlity = nonlinearlity
self.n_layers = len(layer_sizes)
prev_size = input_dim
for i, size in enumerate(layer_sizes):
setattr(self, 'fc_%i'%(i+1), LinearVectorized(prev_size, size))
prev_size = size
setattr(self, 'out', LinearVectorized(prev_size, output_dim))
def forward(self, x):
output = x
for i in range(1, self.n_layers + 1):
output = getattr(self, 'fc_%i' % i)(output)
output = self.nonlinearlity(output)
output = getattr(self, 'out')(output)
return output
def parameter_shapes(self):
param_dict = OrderedDict()
# hidden layers
for i in range(1, self.n_layers + 1):
layer_name = 'fc_%i' % i
for name, param in getattr(self, layer_name).parameter_shapes().items():
param_dict[layer_name + '.' + name] = param
# last layer
layer_name = 'out'
for name, param in getattr(self, layer_name).parameter_shapes().items():
param_dict[layer_name + '.' + name] = param
return param_dict
def named_parameters(self):
param_dict = OrderedDict()
# hidden layers
for i in range(1, self.n_layers + 1):
layer_name = 'fc_%i' % i
for name, param in getattr(self, layer_name).named_parameters().items():
param_dict[layer_name + '.' + name] = param
# last layer
layer_name = 'out'
for name, param in getattr(self, layer_name).named_parameters().items():
param_dict[layer_name + '.' + name] = param
return param_dict
def __call__(self, *args, **kwargs):
return self.forward( *args, **kwargs)
""" Initialization Helpers """
def _kaiming_uniform_batched(tensor, fan, a=0.0, nonlinearity='tanh'):
gain = nn.init.calculate_gain(nonlinearity, a)
std = gain / math.sqrt(fan)
bound = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
with torch.no_grad():
return tensor.uniform_(-bound, bound)
""" ----------------------------------------------------"""
""" ------------------ GP components -------------------"""
""" ----------------------------------------------------"""
from gpytorch.means import Mean
from gpytorch.kernels import Kernel
from gpytorch.functions import RBFCovariance
from gpytorch.utils.broadcasting import _mul_broadcast_shape
class ConstantMeanLight(gpytorch.means.Mean):
def __init__(self, constant=torch.ones(1), batch_shape=torch.Size()):
super(ConstantMeanLight, self).__init__()
self.batch_shape = batch_shape
self.constant = constant
def forward(self, input):
if input.shape[:-2] == self.batch_shape:
return self.constant.expand(input.shape[:-1])
else:
return self.constant.expand(_mul_broadcast_shape(input.shape[:-1], self.constant.shape))
class SEKernelLight(gpytorch.kernels.Kernel):
def __init__(self, lengthscale=torch.tensor([1.0]), output_scale=torch.tensor(1.0)):
super(SEKernelLight, self).__init__(batch_shape=(lengthscale.shape[0], ))
self.length_scale = lengthscale
self.ard_num_dims = lengthscale.shape[-1]
self.output_scale = output_scale
self.postprocess_rbf = lambda dist_mat: self.output_scale * dist_mat.div_(-2).exp_()
def forward(self, x1, x2, diag=False, **params):
if (
x1.requires_grad
or x2.requires_grad
or (self.ard_num_dims is not None and self.ard_num_dims > 1)
or diag
):
x1_ = x1.div(self.length_scale)
x2_ = x2.div(self.length_scale)
return self.covar_dist(x1_, x2_, square_dist=True, diag=diag,
dist_postprocess_func=self.postprocess_rbf,
postprocess=True, **params)
return self.output_scale * RBFCovariance().apply(x1, x2, self.length_scale,
lambda x1, x2: self.covar_dist(x1, x2,
square_dist=True,
diag=False,
dist_postprocess_func=self.postprocess_rbf,
postprocess=False,
**params))
class HomoskedasticNoiseLight(gpytorch.likelihoods.noise_models._HomoskedasticNoiseBase):
def __init__(self, noise_var, *params, **kwargs):
self.noise_var = noise_var
self._modules = {}
self._parameters = {}
@property
def noise(self):
return self.noise_var
@noise.setter
def noise(self, value):
self.noise_var = value
class GaussianLikelihoodLight(gpytorch.likelihoods._GaussianLikelihoodBase):
def __init__(self, noise_var, batch_shape=torch.Size()):
self.batch_shape = batch_shape
self._modules = {}
self._parameters = {}
noise_covar = HomoskedasticNoiseLight(noise_var)
super().__init__(noise_covar=noise_covar)
@property
def noise(self):
return self.noise_covar.noise
@noise.setter
def noise(self, value):
self.noise_covar.noise = value
def expected_log_prob(self, target, input, *params, **kwargs):
mean, variance = input.mean, input.variance
noise = self.noise_covar.noise
res = ((target - mean) ** 2 + variance) / noise + noise.log() + math.log(2 * math.pi)
return res.mul(-0.5).sum(-1)
class LearnedGPRegressionModel(gpytorch.models.ExactGP):
"""GP model which can take a learned mean and learned kernel function."""
def __init__(self, train_x, train_y, likelihood, learned_kernel=None, learned_mean=None, mean_module=None, covar_module=None):
super(LearnedGPRegressionModel, self).__init__(train_x, train_y, likelihood)
if mean_module is None:
self.mean_module = gpytorch.means.ZeroMean()
else:
self.mean_module = mean_module
self.covar_module = covar_module
self.learned_kernel = learned_kernel
self.learned_mean = learned_mean
self.likelihood = likelihood
def forward(self, x):
# feed through kernel NN
if self.learned_kernel is not None:
projected_x = self.learned_kernel(x)
else:
projected_x = x
# feed through mean module
if self.learned_mean is not None:
mean_x = self.learned_mean(x).squeeze()
else:
mean_x = self.mean_module(projected_x).squeeze()
covar_x = self.covar_module(projected_x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
def prior(self, x):
self.train()
return self.__call__(x)
def posterior(self, x):
self.eval()
return self.__call__(x)
def kl(self, x):
return torch.distributions.kl.kl_divergence(self.posterior(x), self.prior(x))
def pred_dist(self, x):
self.eval()
return self.likelihood(self.__call__(x))
def pred_ll(self, x, y):
pred_dist = self.pred_dist(x)
return pred_dist.log_prob(y)
from gpytorch.models.approximate_gp import ApproximateGP
from gpytorch.variational import CholeskyVariationalDistribution
from gpytorch.variational import VariationalStrategy
class LearnedGPRegressionModelApproximate(ApproximateGP):
"""GP model which can take a learned mean and learned kernel function."""
def __init__(self, train_x, train_y, likelihood, learned_kernel=None, learned_mean=None, mean_module=None,
covar_module=None, beta=1.0):
self.beta = beta
self.n_train_samples = train_x.shape[0]
variational_distribution = CholeskyVariationalDistribution(self.n_train_samples)
variational_strategy = VariationalStrategy(self, train_x, variational_distribution,
learn_inducing_locations=False)
super().__init__(variational_strategy)
if mean_module is None:
self.mean_module = gpytorch.means.ZeroMean()
else:
self.mean_module = mean_module
self.covar_module = covar_module
self.learned_kernel = learned_kernel
self.learned_mean = learned_mean
self.likelihood = likelihood
def forward(self, x):
# feed through kernel NN
if self.learned_kernel is not None:
projected_x = self.learned_kernel(x)
else:
projected_x = x
# feed through mean module
if self.learned_mean is not None:
mean_x = self.learned_mean(x).squeeze()
else:
mean_x = self.mean_module(projected_x).squeeze()
covar_x = self.covar_module(projected_x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
def prior(self, x):
return self.forward(x)
def kl(self):
return self.variational_strategy.kl_divergence()
def pred_dist(self, x):
self.eval()
return self.likelihood(self.__call__(x))
def pred_ll(self, x, y):
variational_dist_f = self.__call__(x)
return self.likelihood.expected_log_prob(y, variational_dist_f).sum(-1)
@property
def variational_distribution(self):
return self.variational_strategy._variational_distribution
``` |
{
"source": "jonasrothfuss/meta_learning_pacoh",
"score": 2
} |
#### File: experiments/meta_overfitting_v2/meta-overfitting-nps.py
```python
import os
import sys
import hashlib
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(BASE_DIR)
from absl import flags
from absl import app
flags.DEFINE_integer('n_workers', default=-1, help='number of cpus to use')
flags.DEFINE_boolean('cluster', default=False, help='whether to submit jobs with bsub')
flags.DEFINE_string('datasets', default='sin,cauchy', help='specifies which dataset to use')
FLAGS = flags.FLAGS
N_THREADS = 1
def main(argv):
from experiments.util import AsyncExecutor, generate_launch_commands
import experiments.meta_overfitting_v2.neural_processes_overfitting_base
command_list = []
for dataset in FLAGS.datasets.split(','):
if dataset == 'sin':
n_context_samples = [5]
elif dataset == 'cauchy':
n_context_samples = [20]
else:
raise AssertionError('dataset must be either of [sin, cauchy]')
exp_config = {
'exp_name': ['meta-overfitting-v2-nps-%s'%dataset],
'dataset': [dataset],
'n_threads': [N_THREADS],
'seed': list(range(30, 55)),
'data_seed': [28],
'weight_decay': [0.0, 1e-3, 1e-2, 1e-1, 2e-1, 4e-1],
'r_dim': [256, 512],
'n_iter_fit': [30000],
'lr': [1e-3],
'lr_decay': [0.97],
'n_train_tasks': [2, 4, 8, 16, 32, 64, 128, 256, 512],
'n_test_tasks': [200],
'n_context_samples': n_context_samples,
'n_test_samples': [100],
}
command_list.extend(
generate_launch_commands(experiments.meta_overfitting_v2.neural_processes_overfitting_base, exp_config))
if FLAGS.cluster :
cluster_cmds = []
for python_cmd in command_list:
cmd_hash = hashlib.md5(str.encode(python_cmd)).hexdigest()
bsub_cmd = 'bsub -oo /cluster/project/infk/krause/rojonas/stdout/gp-priors/meta-overfitting/%s.out' \
' -W 03:59'\
' -R "rusage[mem=1048]"' \
' -n %i '% (cmd_hash, N_THREADS)
cluster_cmds.append(bsub_cmd + ' ' + python_cmd)
answer = input("About to submit %i compute jobs to the cluster. Proceed? [yes/no]\n"%len(cluster_cmds))
if answer == 'yes':
for cmd in cluster_cmds:
os.system(cmd)
else:
answer = input("About to run %i compute jobs locally on %i workers. "
"Proceed? [yes/no]\n" %(len(command_list), FLAGS.n_workers))
if answer == 'yes':
exec_fn = lambda cmd: os.system(cmd)
executor = AsyncExecutor(n_jobs=FLAGS.n_workers)
executor.run(exec_fn, command_list)
if __name__ == '__main__':
app.run(main)
``` |
{
"source": "jonasrothfuss/movement_primitives_via_optimization",
"score": 3
} |
#### File: movement_primitives_via_optimization/helpers/math.py
```python
import numpy as np
from scipy.linalg import polar
def is_pos_def(M):
""" checks whether x^T * M * x > 0, M being the matrix to be checked
:param M: the matrix to be checked
:return: True if positive definite, False otherwise
"""
return np.all(np.linalg.eigvals(M) > 0)
def loss_function(traj, traj_j):
"""
indicator loss function for trajectories
:param traj: (ndarray) first trajectory (the one to be minimized) of shape (n,)
:param traj_j: (ndarray) second trajectory (from the demonstrations) of shape (n,)
:return: 0 if trajectories are of the same shape and equal in terms of their elements, 1 otherwise
"""
assert traj.shape == traj_j.shape
if np.linalg.norm(traj - traj_j) < 10**-8:
return 0
return 1
def get_2nd_order_finite_diff_matrix(size):
'''
2nd order finite differencing matrix according to a spring damper system with which new positions are calculated based on
the accelerations in a system.
:param size: size of the quadratic matrix
:return: the differencing matrix of shape (size, size)
'''
return 2 * np.diag(np.ones([size])) + np.diag(-np.ones([size - 1]), k=1) + np.diag(-np.ones([size - 1]), k=-1)
def get_1st_order_finite_diff_matrix(size):
'''
2nd order finite differencing matrix according to a spring damper system with which new positions are calculated based on
the accelerations in a system.
:param size: size of the quadratic matrix
:return: the differencing matrix of shape (size, size)
'''
return np.diag(np.ones([size])) + np.diag(-np.ones([size - 1]), k=1)
# def project_norm_pos_def(M, eps=10**-8):
# """
# Projects a matrix M (norm) onto the cone of pos. (semi) def. matrices
# :param M: a square matrix - numpy array of shape (m,m)
# :return: P, the projection of M on the cone pos. semi-def. matrices
# """
# eigval, eigvec = np.linalg.eigh(M)
# eigval_pos = np.maximum(eigval, eps)
# P = eigvec.dot(np.diag(eigval_pos)).dot(eigvec.T)
# assert P.shape == M.shape
# return P
def project_norm_pos_def(A):
"""
Calculates the nearest (in Frobenius norm) Symmetric Positive Definite matrix to A
https://www.sciencedirect.com/science/article/pii/0024379588902236
:param A: a square matrix
:return A_pd: the projection of A onto the space pf positive definite matrices
"""
assert A.ndim == 2 and A.shape[0] == A.shape[1], "A must be a square matrix"
# symmetrize A into B
B = (A + A.T) / 2
# Compute the symmetric polar factor H of B
_, H = polar(B)
A_pd = (B + H) / 2
# ensure symmetry
A_pd = (A_pd + A_pd.T) / 2
# test that A_pd is indeed PD. If not, then tweak it just a little bit
pd = False
k = 0
while not pd:
eig = np.linalg.eigvals(A_pd)
pd = np.all(eig > 0)
k += 1
if not pd:
mineig = min(eig)
A_pd = A_pd + (-mineig * k ** 2 + 10**-8) * np.eye(A.shape[0])
return A_pd
def ldl_decomp(A):
"""
Computes the LDL decomposition of A
:param A: symmetric matrix A
:return: matrices (L, D) with same shape as A
"""
import numpy as np
A = np.matrix(A)
if not (A.H == A).all():
print("A must be Hermitian!")
return None, None
else:
S = np.diag(np.diag(A))
Sinv = np.diag(1 / np.diag(A))
D = np.matrix(S.dot(S))
Lch = np.linalg.cholesky(A)
L = np.matrix(Lch.dot(Sinv))
return L, D
def get_d_element(A, i):
"""
computes the i-the diagonal element of the D matrix of the LDL decomposition of A
:param A: symmetric Matrix
:param i: integer denoting with
:return:
"""
assert i < A.shape[0]
_, D = ldl_decomp(A)
print(A)
return np.diag(D)[i]
```
#### File: jonasrothfuss/movement_primitives_via_optimization/norm_learning.py
```python
import numpy as np
from scipy.optimize import minimize
import pandas as pd
from movement_primitives_optimization.helpers import math
import itertools
def inner_minimization(traj_i, traj_j, M):
"""
Applies the right term of eq. 19 in "Movement Primitives via Optimization" (Dragan et al., 2015) via Lagrangian
optimization (SLSQP method with constraints as specified in the paper). Each dimension is optimized separately and
their min-values compose a new vector of shape (# dimensions,).
:param traj_i: First trajectory of shape (time steps of trajectory, dimensions)
:param traj_j: Second trajectory of shape (time steps of trajectory, dimensions)
:param norm: A norm under which the optimization process is executed.
:return: A vector of shape (# dimensions,) that is composed of the min-values of each separate dimension
Lagrangian-optimization
"""
assert traj_i.shape == traj_j.shape
assert traj_i.ndim == 1, traj_j.ndim == 1
fun = lambda traj: (traj_i - traj).T.dot(M).dot(traj_i - traj) - math.loss_function(traj, traj_j)
cons = ({'type': 'eq', 'fun': lambda traj: traj[0] - traj_j[0]},
{'type': 'eq', 'fun': lambda traj: traj[-1] - traj_j[-1]})
init_guess = traj_j + np.random.normal(size=(traj_i.shape[0]), scale=0.01)
opt_result = minimize(fun, x0=init_guess, method='SLSQP', constraints=cons,
options={'maxiter': 20000, "disp": False})
return opt_result.x, opt_result.fun
def margin_loss(demonstrations, M):
ndim_traj = demonstrations[0].shape[1]
loss = 0
for traj_i, traj_j, dim in itertools.product(demonstrations, demonstrations, range(ndim_traj)):
_, inner_min_result = inner_minimization(traj_i[:,dim], traj_j[:,dim], M)
loss += (traj_i[:,dim]-traj_j[:,dim]).T.dot(M).dot(traj_i[:,dim]-traj_j[:,dim]) - inner_min_result
return loss
def learn_norm_via_opt(demonstrations, init_norm):
fun = lambda K: margin_loss(demonstrations, K.T.dot(K))
print("init_norm shape", init_norm.shape[0])
#cons = [{'type': 'ineq', 'fun': lambda K: - math.get_d_element(K.T.dot(K),i)} for i in range(init_norm.shape[0])]
opt_result = minimize(fun, x0=init_norm,
options={'maxiter': 20000, "disp": False})
return opt_result.x
def learn_norm(demonstrations, init_norm, alpha=0.01, iterations=1000):
"""
Implementation of norm learning from the paper "Movement Primitives via Optimization" (Dragan et al., 2015)
Specifically, this function learns a norm given that the user provides not only demonstrations but also adaptations
by applying Maximum Margin Planning. The function iteratively applies the following three steps,
given pairs of trajectories (traj_i, traj_j) \in DxD (D being the set of user demonstrations):
1) compute the optimal solution to the "inner minimization problem" (right term in eq. 19)
2) compute the gradient update for the norm with a hyper-parameter alpha, update the norm
3) project the updated norm to the space of pos. def. matrices, repeat
:param demonstrations: the trajectories, can be a pandas DataFrame or a list of ndarrays with shape (time steps,
dimensions)
:param init_norm: the initial norm from where we the norm updates start from
:param alpha: learning rate for the norm update
:param iterations: number of iterations the norm should be updates
:return: the learned norm of the same shape as init_norm
"""
assert demonstrations, "no trajectory given"
assert alpha > 0
assert math.is_pos_def(init_norm)
ndim_traj = demonstrations[0].shape[1]
if isinstance(demonstrations, pd.DataFrame):
# flatten required to convert 2d array to 1d
demonstrations = demonstrations.values.flatten()
M = init_norm
def calculate_gradients(traj_i, traj_j, dim):
traj_ij, _ = inner_minimization(traj_i[:, dim], traj_j[:, dim], M)
grad = (traj_i[:, dim] - traj_j[:, dim]).dot((traj_i[:, dim] - traj_j[:, dim]).T) - (traj_i[:, dim] - traj_ij).dot(
(traj_i[:, dim] - traj_ij).T)
grads.append(grad)
for k in range(iterations):
grads = []
#Parallel(n_jobs=NUM_CORES)(delayed(calculate_gradients)(traj_i, traj_j, dim)
# for traj_i, traj_j, dim in itertools.product(demonstrations, demonstrations, range(ndim_traj)))
for traj_i, traj_j, dim in itertools.product(demonstrations, demonstrations, range(ndim_traj)):
calculate_gradients(traj_i, traj_j, dim)
mean_grad = np.mean(grads)
M -= alpha * mean_grad
M = math.project_norm_pos_def(M)
print("LOSS :", margin_loss(demonstrations, M))
return M
``` |
{
"source": "JonasRoux/infra-ovh-ansible-module",
"score": 2
} |
#### File: plugins/modules/public_cloud_imageid_info.py
```python
from __future__ import (absolute_import, division, print_function)
from ansible.module_utils.basic import AnsibleModule
__metaclass__ = type
DOCUMENTATION = '''
---
module: public_cloud_imageid_info
short_description: Get image id based on human name
description:
- Get imageid based on human name ("Debian 10", "Ubuntu 21.04","Centos 8", etc)
- The imageid change between region
- The retrieved imageid can be used to spawn a new instance
author: Synthesio SRE Team
requirements:
- ovh >= 0.5.0
options:
name:
required: true
description: The human name of the image ("Debian 10", "Ubuntu 21.04","Centos 8", etc)
region:
required: true
description: The region where to lookup for imageid
service_name:
required: true
description: The service_name
'''
EXAMPLES = '''
- name: Get image id
synthesio.ovh.public_cloud_imageid_info:
service_name: "{{ service_name }}"
region: "GRA7"
name: "Debian 10"
delegate_to: localhost
register: image_id
'''
RETURN = ''' # '''
from ansible_collections.synthesio.ovh.plugins.module_utils.ovh import ovh_api_connect, ovh_argument_spec
try:
from ovh.exceptions import APIError
HAS_OVH = True
except ImportError:
HAS_OVH = False
def run_module():
module_args = ovh_argument_spec()
module_args.update(dict(
service_name=dict(required=True),
name=dict(required=True),
region=dict(required=True)
))
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
client = ovh_api_connect(module)
service_name = module.params['service_name']
name = module.params['name']
region = module.params['region']
try:
result = client.get('/cloud/project/%s/image' % (service_name),
region=region
)
for i in result:
if i['name'] == name:
image_id = i['id']
module.exit_json(changed=False, id=image_id)
module.fail_json(msg="Image {} not found in {}".format(name, region), changed=False)
except APIError as api_error:
module.fail_json(msg="Failed to call OVH API: {0}".format(api_error))
def main():
run_module()
if __name__ == '__main__':
main()
``` |
{
"source": "JonasRSV/Friday",
"score": 3
} |
#### File: bulbasaur/architechtures/kaggle.py
```python
import tensorflow as tf
def _kaggle_cnn_vl(x: tf.Tensor,
embedding_dim: int,
mode: tf.estimator.ModeKeys,
regularization: float = 1e-6) -> tf.Tensor:
with tf.variable_scope('kaggle_cnn', reuse=tf.AUTO_REUSE):
x = tf.expand_dims(x, -1)
print("x", x)
x = tf.compat.v1.layers.Conv2D(filters=128,
kernel_size=(6, 6),
strides=(2, 2),
activation=tf.nn.relu,
name="kaggle_cnn_1_c")(x)
# x = tf.compat.v1.layers.MaxPooling2D(pool_size=(1, 3), strides=(1, 3),
# name="kaggle_cnn_1_m")(x)
# x = tf.compat.v1.layers.MaxPooling2D(pool_size=(1, 3), strides=(1, 1), name="kaggle_cnn_1_m")(x)
print("x", x)
x = tf.compat.v1.layers.Conv2D(filters=128,
kernel_size=(1, 7),
activation=tf.nn.relu,
name="kaggle_cnn_2_c")(x)
print("x", x)
x = tf.compat.v1.layers.Conv2D(filters=256,
kernel_size=(2, 9),
# padding="valid",
activation=tf.nn.relu,
name="kaggle_cnn_3_c")(x)
print("x", x)
x = tf.compat.v1.layers.Conv2D(filters=512,
kernel_size=(7, 1),
strides=(2, 1),
activation=tf.nn.relu,
name="kaggle_cnn_4_c")(x)
print("x", x)
x = tf.keras.layers.GlobalMaxPooling2D(name="kaggle_cnn_mp")(x)
print("x", x)
#x = tf.compat.v1.layers.Dropout(rate=0.1, name="kaggle_cnn_dropout")(
# x, training=mode == tf.estimator.ModeKeys.TRAIN,
# )
#print("x", x)
x = tf.compat.v1.layers.Dense(512,
activation=tf.nn.relu,
name="kaggle_cnn_1d")(x)
print("x", x)
embedding = tf.compat.v1.layers.Dense(embedding_dim,
activation=None,
name="kaggle_cnn_2d")(x)
print("x", x)
return embedding
def _kaggle_cnn_v2(x: tf.Tensor,
embedding_dim: int,
mode: tf.estimator.ModeKeys,
regularization: float = 1e-6) -> tf.Tensor:
with tf.variable_scope('kaggle_cnn', reuse=tf.AUTO_REUSE):
x = tf.expand_dims(x, -1)
print("x", x)
x = tf.compat.v1.layers.Conv2D(filters=128,
kernel_size=(6, 6),
strides=(2, 2),
activation=tf.nn.relu,
name="kaggle_cnn_1_c")(x)
# x = tf.compat.v1.layers.MaxPooling2D(pool_size=(1, 3), strides=(1, 3),
# name="kaggle_cnn_1_m")(x)
# x = tf.compat.v1.layers.MaxPooling2D(pool_size=(1, 3), strides=(1, 1), name="kaggle_cnn_1_m")(x)
print("x", x)
x = tf.compat.v1.layers.Conv2D(filters=128,
kernel_size=(1, 7),
activation=tf.nn.relu,
name="kaggle_cnn_2_c")(x)
print("x", x)
x = tf.compat.v1.layers.MaxPooling2D(pool_size=(1, 4),
strides=(1, 1),
name="kaggle_cnn_2_m")(x)
print("x", x)
x = tf.compat.v1.layers.Conv2D(filters=256,
kernel_size=(1, 9),
padding="valid",
activation=tf.nn.relu,
name="kaggle_cnn_3_c")(x)
print("x", x)
x = tf.compat.v1.layers.Conv2D(filters=512,
kernel_size=(7, 1),
strides=(1, 1),
activation=tf.nn.relu,
name="kaggle_cnn_4_c")(x)
print("x", x)
x = tf.keras.layers.GlobalMaxPooling2D(name="kaggle_cnn_mp")(x)
print("x", x)
#x = tf.compat.v1.layers.Dropout(rate=0.1, name="kaggle_cnn_dropout")(
# x, training=mode == tf.estimator.ModeKeys.TRAIN,
# )
#print("x", x)
x = tf.compat.v1.layers.Dense(512,
activation=tf.nn.relu,
name="kaggle_cnn_1d")(x)
print("x", x)
embedding = tf.compat.v1.layers.Dense(embedding_dim,
activation=None,
name="kaggle_cnn_2d")(x)
print("x", x)
return embedding
def _kaggle_cnn_v3(x: tf.Tensor,
embedding_dim: int,
mode: tf.estimator.ModeKeys,
regularization: float = 1e-6) -> tf.Tensor:
with tf.variable_scope('kaggle_cnn', reuse=tf.AUTO_REUSE):
# x = tf.expand_dims(x, -1)
print("x input", x.shape)
# cells = [
# tf.contrib.rnn.LSTMCell(256, name="lstm_1"),
# # tf.contrib.rnn.LSTMCell(256, name="lstm_2"),
# # tf.contrib.rnn.LSTMCell(128),
# ]
# The second output is the last state and we will no use that
# lstm = tf.keras.layers.LSTM(16, name="kaggle_lstm", return_sequences=True)
# x = tf.keras.layers.Bidirectional(lstm, name="kaggle_rnn")(x, training=mode == tf.estimator.ModeKeys.TRAIN)
# print("x", x.shape)
forward_cells = [
tf.contrib.rnn.LSTMCell(256),
]
# The second output is the last state and we will no use that
forward = tf.keras.layers.RNN(forward_cells, return_sequences=False)(
x, training=mode == tf.estimator.ModeKeys.TRAIN)
backward_cells = [
tf.contrib.rnn.LSTMCell(256),
]
backward = tf.keras.layers.RNN(backward_cells, return_sequences=False)(
tf.reverse(x, [1]), training=mode == tf.estimator.ModeKeys.TRAIN)
x = tf.concat([forward, backward], axis=-1)
print("x", x)
x = tf.compat.v1.layers.Dense(512,
activation=tf.nn.relu,
name="kaggle_cnn_1d")(x)
print("x", x)
x = tf.compat.v1.layers.Dense(512,
activation=tf.nn.relu,
name="kaggle_cnn_2d")(x)
print("x", x)
x = tf.compat.v1.layers.Dense(512,
activation=tf.nn.relu,
name="kaggle_cnn_3d")(x)
print("x", x)
embedding = tf.compat.v1.layers.Dense(embedding_dim,
activation=None,
name="kaggle_cnn_4d")(x)
print("x", x)
return embedding
# Taken from https://www.kaggle.com/c/tensorflow-speech-recognition-challenge/discussion/47715
def kaggle_cnn(x: tf.Tensor,
embedding_dim: int,
mode: tf.estimator.ModeKeys,
regularization: float = 1e-6) -> tf.Tensor:
return _kaggle_cnn_vl(x, embedding_dim, mode, regularization)
```
#### File: models/bulbasaur/bulbasaur.py
```python
import sys
import os
# Some systems don't use the launching directory as root
sys.path.append(os.getcwd())
import pathlib
import tensorflow as tf
import models.shared.audio as audio
import argparse
import models.bulbasaur.architechtures as arch
from enum import Enum
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
class Mode(Enum):
train_eval = "train_eval"
export = "export"
class Distance(Enum):
COSINE = "cosine"
EUCLIDEAN = "euclidean"
def create_input_fn(mode: tf.estimator.ModeKeys,
input_prefix: str,
audio_length: int,
sample_rate: int,
parallel_reads: int = 5,
batch_size: int = 32):
feature_description = {
'anchor': tf.io.FixedLenFeature([], tf.string),
'positive': tf.io.FixedLenFeature([], tf.string),
'negative': tf.io.FixedLenFeature([], tf.string),
}
def decode_example(x):
x = tf.io.parse_single_example(x, feature_description)
x["anchor"] = tf.reshape(tf.decode_raw(input_bytes=x["anchor"], out_type=tf.int16), [audio_length])
x["positive"] = tf.reshape(tf.decode_raw(input_bytes=x["positive"], out_type=tf.int16), [audio_length])
x["negative"] = tf.reshape(tf.decode_raw(input_bytes=x["negative"], out_type=tf.int16), [audio_length])
return x
def input_fn():
entries = input_prefix.split("/")
path = "/".join(entries[:-1])
prefix = entries[-1]
files = [str(file) for file in pathlib.Path(path).glob(f"{prefix}")]
dataset = tf.data.TFRecordDataset(filenames=files,
num_parallel_reads=parallel_reads)
dataset = dataset.map(decode_example)
if mode == tf.estimator.ModeKeys.TRAIN:
# If we train we do data augmentation
dataset = dataset.shuffle(buffer_size=100)
dataset = dataset.batch(batch_size=batch_size)
if mode == tf.estimator.ModeKeys.TRAIN:
dataset = dataset.repeat()
return dataset
return input_fn
def cosine_distance(a: tf.Tensor, b: tf.Tensor):
return 1 - tf.reduce_sum(a * b, axis=-1)
def euclidean_distance(a: tf.Tensor, b: tf.Tensor):
return tf.sqrt(tf.reduce_sum(tf.square(a - b), axis=-1))
def cosine_triplet_loss(anchor_embeddings: tf.Tensor,
positive_embeddings: tf.Tensor,
negative_embeddings: tf.Tensor,
margin=1.0):
anchor_embeddings = tf.linalg.l2_normalize(anchor_embeddings, axis=1)
positive_embeddings = tf.linalg.l2_normalize(positive_embeddings, axis=1)
negative_embeddings = tf.linalg.l2_normalize(negative_embeddings, axis=1)
# Minimizing absolute distance rather than relative distance works better
triplet = cosine_distance(anchor_embeddings, positive_embeddings) \
+ 10 * tf.nn.relu(margin - cosine_distance(anchor_embeddings, negative_embeddings))
# triplet = tf.nn.relu(cosine_distance(anchor_embeddings, positive_embeddings) -
# cosine_distance(anchor_embeddings, negative_embeddings) + margin)
# return similarity_loss(left_embeddings, right_embeddings)
return tf.reduce_sum(triplet)
def euclidean_triplet_loss(anchor_embeddings: tf.Tensor,
positive_embeddings: tf.Tensor,
negative_embeddings: tf.Tensor,
margin=1.0):
# Minimizing absolute distance rather than relative distance works better
triplet = euclidean_distance(anchor_embeddings, positive_embeddings) \
+ 10 * tf.nn.relu(margin - euclidean_distance(anchor_embeddings, negative_embeddings))
# triplet = tf.nn.relu(euclidean_distance(anchor_embeddings, positive_embeddings) -
# euclidean_distance(anchor_embeddings, negative_embeddings) + margin)
return tf.reduce_sum(triplet)
def get_predict_ops(distance: Distance,
stored_embeddings: tf.Tensor,
signal_embeddings: tf.Tensor):
d = {
distance.COSINE: lambda *args: cosine_distance(*args),
distance.EUCLIDEAN: lambda *args: euclidean_distance(*args)
}[distance](stored_embeddings, signal_embeddings)
predict_op = tf.argmin(d)
return predict_op, d
def get_metric_ops(distance: Distance,
anchor_embeddings: tf.Tensor,
positive_embeddings: tf.Tensor,
negative_embeddings: tf.Tensor,
margin: float):
metric_ops = {}
loss_op = {
distance.COSINE: lambda *args: cosine_triplet_loss(*args),
distance.EUCLIDEAN: lambda *args: euclidean_triplet_loss(*args)
}[distance](anchor_embeddings, positive_embeddings, negative_embeddings, margin)
distance_op = {
distance.COSINE: cosine_distance,
distance.EUCLIDEAN: euclidean_distance
}[distance]
correct = distance_op(anchor_embeddings, positive_embeddings) < distance_op(anchor_embeddings, negative_embeddings)
metric_ops["correct"] = tf.metrics.mean(correct)
return loss_op, metric_ops
def get_train_ops(distance: Distance,
anchor_embeddings: tf.Tensor,
positive_embeddings: tf.Tensor,
negative_embeddings: tf.Tensor,
margin: float,
learning_rate: float,
save_summaries_every: int,
summary_output_dir: str):
loss_op = {
distance.COSINE: lambda *args: cosine_triplet_loss(*args),
distance.EUCLIDEAN: lambda *args: euclidean_triplet_loss(*args)
}[distance](anchor_embeddings, positive_embeddings, negative_embeddings, margin)
#decay_learning_rate = tf.compat.v1.train.cosine_decay_restarts(
# learning_rate=learning_rate,
# global_step=tf.compat.v1.train.get_global_step(),
# first_decay_steps=1000,
# t_mul=2.0,
# m_mul=1.0,
# alpha=0.0,
# name="learning_rate")
# Add to summary
#tf.summary.scalar("learning_rate", decay_learning_rate)
# Add regularization
reg_loss = tf.compat.v1.losses.get_regularization_loss()
tf.summary.scalar("regularization_loss", reg_loss)
total_loss = loss_op + reg_loss
tf.summary.scalar("total_loss", total_loss)
train_op = tf.compat.v1.train.AdamOptimizer(
learning_rate=learning_rate).minimize(
loss=total_loss,
global_step=tf.compat.v1.train.get_global_step())
loss_op = tf.identity(loss_op, name="loss_op")
train_logging_hooks = [
tf.estimator.LoggingTensorHook(
{"loss": "loss_op"},
every_n_iter=20),
tf.estimator.SummarySaverHook(
save_steps=save_summaries_every,
output_dir=summary_output_dir,
summary_op=tf.compat.v1.summary.merge_all())
]
return loss_op, train_op, train_logging_hooks
def extract_audio_feature(signal: tf.Tensor, sample_rate: int):
# TODO(jonasrsv): Try dropping first 2 MFCC features
# To make invariant to loudness (gain)
return audio.mfcc_feature(signal=signal,
coefficients=40,
sample_rate=sample_rate,
frame_length=1024,
frame_step=512,
fft_length=1024,
num_mel_bins=120,
lower_edge_hertz=1,
upper_edge_hertz=sample_rate / 2)
# return audio.mfcc_feature(signal=signal,
# coefficients=20,
# sample_rate=sample_rate,
# frame_length=2048,
# frame_step=256,
# fft_length=2048,
# num_mel_bins=128,
# lower_edge_hertz=1,
# upper_edge_hertz=4000)
# Because https://www.quora.com/What-are-the-advantages-of-using-spectrogram-vs-MFCC-as-feature-extraction-for-speech-recognition-using-deep-neural-network
# Tony says Mel filterbanks is slightly ahead, so we try it! :D
# return audio.mel_spectrogram_feature(signal=signal,
# sample_rate=sample_rate,
# frame_length=512,
# frame_step=512,
# fft_length=512,
# num_mel_bins=120,
# lower_edge_hertz=1,
# upper_edge_hertz=sample_rate / 2)
def get_embedding(audio_signal: tf.Tensor, sample_rate: int, embedding_dim: int, mode: tf.estimator.ModeKeys):
signal = extract_audio_feature(signal=audio.normalize_audio(audio_signal), sample_rate=sample_rate)
return arch.kaggle_cnn(signal, embedding_dim=embedding_dim, mode=mode)
def make_model_fn(distance: Distance,
embedding_dim: int,
summary_output_dir: str,
margin: float = 1.0,
sample_rate: int = 16000,
save_summaries_every: int = 100,
learning_rate: float = 0.001):
def model_fn(features, labels, mode, config, params):
print("features", features)
loss_op, train_op, train_logging_hooks, eval_metric_ops, predict_op = None, None, None, None, None
if mode == tf.estimator.ModeKeys.TRAIN:
anchor_embeddings = get_embedding(features["anchor"],
sample_rate=sample_rate,
embedding_dim=embedding_dim,
mode=mode)
positive_embeddings = get_embedding(features["positive"],
sample_rate=sample_rate,
embedding_dim=embedding_dim,
mode=mode)
negative_embeddings = get_embedding(features["negative"],
sample_rate=sample_rate,
embedding_dim=embedding_dim,
mode=mode)
loss_op, train_op, train_logging_hooks = get_train_ops(
distance=distance,
anchor_embeddings=anchor_embeddings,
positive_embeddings=positive_embeddings,
negative_embeddings=negative_embeddings,
margin=margin,
learning_rate=learning_rate,
save_summaries_every=save_summaries_every,
summary_output_dir=summary_output_dir)
elif mode == tf.estimator.ModeKeys.EVAL:
anchor_embeddings = get_embedding(features["anchor"],
sample_rate=sample_rate,
embedding_dim=embedding_dim,
mode=mode)
positive_embeddings = get_embedding(features["positive"],
sample_rate=sample_rate,
embedding_dim=embedding_dim,
mode=mode)
negative_embeddings = get_embedding(features["negative"],
sample_rate=sample_rate,
embedding_dim=embedding_dim,
mode=mode)
loss_op, eval_metric_ops = get_metric_ops(distance=distance,
anchor_embeddings=anchor_embeddings,
positive_embeddings=positive_embeddings,
negative_embeddings=negative_embeddings,
margin=margin)
elif mode == tf.estimator.ModeKeys.PREDICT:
embeddings = get_embedding(tf.expand_dims(features["audio"], 0),
sample_rate=sample_rate,
embedding_dim=embedding_dim,
mode=mode)
embeddings = tf.linalg.l2_normalize(embeddings, axis=1)
predict_op, distance_op = get_predict_ops(
distance=distance,
stored_embeddings=features["embeddings"],
signal_embeddings=embeddings,
)
project_op = tf.squeeze(embeddings)
tf.identity(project_op, name="project")
tf.identity(tf.shape(project_op), name="project_shape")
tf.identity(predict_op, name="output")
tf.identity(tf.shape(predict_op), name="output_shape")
tf.identity(distance_op, name="distances")
tf.identity(tf.shape(distance_op), name="distances_shape")
min_distance_op = tf.reduce_min(distance_op)
tf.identity(min_distance_op, name="min_distance")
tf.identity(tf.shape(min_distance_op), name="min_distance_shape")
else:
raise Exception(f"Unknown ModeKey {mode}")
for v in tf.all_variables():
print(v.name)
return tf.estimator.EstimatorSpec(mode=mode,
predictions=predict_op,
loss=loss_op,
train_op=train_op,
training_hooks=train_logging_hooks,
eval_metric_ops=eval_metric_ops)
return model_fn
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--mode",
required=True,
choices=[str(x).split(".")[1] for x in Mode],
help="one of (train_eval or export)")
parser.add_argument(
"--model_directory",
required=True,
help="Model directory -- where events & checkpoints is stored")
parser.add_argument(
"--train_prefix",
help="absolute path to prefix of train files",
type=str,
)
parser.add_argument(
"--eval_prefix",
help="absolute path to prefix of eval files",
type=str,
)
parser.add_argument("--embedding_dim",
required=True,
type=int,
help="Dimension of embeddings")
parser.add_argument("--distance",
required=True,
choices=[x.value for x in Distance],
help="Distance to learn representation for")
parser.add_argument("--margin",
required=True,
type=float,
help="Margin for triplet loss")
parser.add_argument("--clip_length",
required=True,
type=float,
help="Length of audio in seconds")
parser.add_argument("--sample_rate",
default=16000,
type=int,
help="Sample rate of the audio")
parser.add_argument("--batch_size",
default=32,
type=int,
help="Batch size of training")
parser.add_argument("--start_learning_rate",
default=0.001,
type=float,
help="Start learning rate")
parser.add_argument("--save_summary_every",
default=10000,
type=int,
help="Same summary every n steps")
parser.add_argument("--eval_every",
default=10000,
type=int,
help="Evaluates on full dataset n steps")
parser.add_argument("--max_steps",
default=10000000,
type=int,
help="Evaluates on full dataset n steps")
parser.add_argument("--parallel_reads",
default=5,
type=int,
help="Parallel reads of dataset")
args = parser.parse_args()
config = tf.estimator.RunConfig(
model_dir=args.model_directory,
save_summary_steps=args.save_summary_every,
log_step_count_steps=10,
save_checkpoints_steps=args.eval_every,
)
estimator = tf.estimator.Estimator(
model_fn=make_model_fn(
distance=Distance(args.distance),
summary_output_dir=args.model_directory,
embedding_dim=args.embedding_dim,
margin=args.margin,
sample_rate=args.sample_rate,
save_summaries_every=args.save_summary_every,
learning_rate=args.start_learning_rate),
model_dir=args.model_directory,
config=config)
if args.mode == Mode.train_eval.value:
train_spec = tf.estimator.TrainSpec(
input_fn=create_input_fn(mode=tf.estimator.ModeKeys.TRAIN,
input_prefix=args.train_prefix,
audio_length=int(args.clip_length * args.sample_rate),
sample_rate=args.sample_rate,
parallel_reads=args.parallel_reads,
batch_size=args.batch_size),
max_steps=args.max_steps,
)
eval_spec = tf.estimator.EvalSpec(
steps=args.eval_every,
input_fn=create_input_fn(mode=tf.estimator.ModeKeys.EVAL,
input_prefix=args.train_prefix,
audio_length=int(args.clip_length * args.sample_rate),
sample_rate=args.sample_rate,
parallel_reads=args.parallel_reads,
batch_size=args.batch_size),
throttle_secs=5,
)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
elif args.mode == Mode.export.value:
def serving_input_receiver_fn():
audio_length = int(args.clip_length * args.sample_rate)
inputs = {
"audio": tf.placeholder(shape=[audio_length], dtype=tf.int16, name="audio"),
"embeddings": tf.placeholder(shape=[None, args.embedding_dim], dtype=tf.float32, name="embeddings"),
}
return tf.estimator.export.ServingInputReceiver(
features=inputs, receiver_tensors=inputs)
estimator.export_saved_model(
export_dir_base=args.model_directory,
serving_input_receiver_fn=serving_input_receiver_fn)
else:
raise NotImplementedError(f"Unknown mode {args.mode}")
if __name__ == "__main__":
main()
```
#### File: shared/augmentations/speed.py
```python
import numpy as np
import pysndfx
from models.shared.augmentations.core import Augmentation
class Speed(Augmentation):
def __init__(self):
self.normalization = 2 ** 15
def apply(self, audio: np.ndarray, sample_rate: int):
self.effect = (pysndfx.AudioEffectsChain().speed(
factor=np.random.uniform(0.8, 1.15)
))
audio = np.array(audio) / self.normalization
audio = self.effect(audio, sample_in=sample_rate)
audio = (audio * self.normalization).astype(np.int16)
return audio
```
#### File: shared/augmentations/time_stretch.py
```python
import numpy as np
import librosa
from models.shared.augmentations.core import Augmentation
class TimeStretch(Augmentation):
def __init__(self, min_rate: float, max_rate: float):
self.normalization = 2 ** 15
self.min_rate = min_rate
self.max_rate = max_rate
def apply(self, audio: np.ndarray, sample_rate: int):
stretched = librosa.effects.time_stretch(audio / self.normalization,
rate=np.random.uniform(self.min_rate, self.max_rate)
) * self.normalization
stretched = np.array(stretched, dtype=np.int16)
if len(stretched) > len(audio):
return stretched[:len(audio)]
padded_audio = np.zeros_like(audio)
padded_audio[:len(stretched)] = stretched
return padded_audio
```
#### File: pipelines/preprocessing/audio_augmentations.py
```python
import os
import numpy as np
import models.shared.augmentation as augmentation
import pathlib
import models.shared.augmentations as a
from typing import List
class AudioAugmentations:
def __init__(self, sample_rate: int):
self.sample_rate = sample_rate
self.augmenter = augmentation.create_audio_augmentations([
#a.TimeStretch(min_rate=0.98, max_rate=0.99),
#a.PitchShift(min_semitones=-1, max_semitones=2),
#a.Shift(min_rate=-500, max_rate=500),
#a.Gain(min_gain=0.8, max_gain=1.3),
#a.Speed()
a.Reverb(),
a.Background(background_noises=pathlib.Path(f"{os.getenv('FRIDAY_DATA', default='data')}/background_noise"),
sample_rate=sample_rate,
min_voice_factor=0.7,
max_voice_factor=0.95),
a.GaussianNoise(loc=0, stddev=100)
],
p=[
#0.3,
#0.3,
#0.25,
0.8,
0.8,
0.3
]
)
def do(self, audio: np.array) -> np.ndarray:
audio = self.augmenter(audio, sample_rate=self.sample_rate)
audio = np.array(audio, dtype=np.int16)
return audio
```
#### File: pipelines/preprocessing/random_bipadding.py
```python
import numpy as np
def bipadding(length: float, audio: np.ndarray, sample_rate: int) -> np.ndarray:
pad_to = int(sample_rate * length)
num_padding = pad_to - len(audio)
padding = list(np.random.normal(0, 10, size=num_padding).astype(np.int64))
padding_split = np.random.randint(0, num_padding)
head_padding = padding[:padding_split]
tail_padding = padding[padding_split:]
head_padding.extend(audio)
head_padding.extend(tail_padding)
return head_padding
```
#### File: mm/scripts/librispeech_to_mfa.py
```python
import pathlib
import sox
import argparse
import os
from tqdm import tqdm
def locate_transcriptions(chapter_root: pathlib.Path):
transcriptions = list(chapter_root.glob("*.trans.txt"))
if len(transcriptions) > 0:
return transcriptions[0]
else:
return None
def convert_chapter(path: pathlib.Path, sink: pathlib.Path, prefix: str,
transformer: sox.Transformer):
transcriptions = locate_transcriptions(path)
if not sink.is_dir():
os.makedirs(sink)
if transcriptions:
with open(str(transcriptions), "r") as transcriptions:
for line in transcriptions.readlines():
line = line.strip()
end_of_index = line.find(" ")
file_name = line[:end_of_index].strip()
file_name_with_extension = file_name + ".flac"
audio_input_file = path / file_name_with_extension
audio_output_file = sink / f"{prefix}-{file_name}.wav"
label_output_file = sink / f"{prefix}-{file_name}.lab"
label = line[end_of_index:].strip()
transformer.build_file(input_filepath=str(audio_input_file), output_filepath=str(audio_output_file))
with open(str(label_output_file), "w") as label_file:
label_file.write(label)
def convert_speaker(path: pathlib.Path, sink: pathlib.Path, prefix: str,
transformer: sox.Transformer):
for chapter_path in path.glob("*"):
if str(chapter_path.stem).isnumeric():
convert_chapter(chapter_path, sink, prefix, transformer)
def convert_speakers(path: pathlib.Path, sink: pathlib.Path, prefix: str):
transformer = sox.Transformer()
for speaker_path in tqdm(list(path.glob("*"))):
if str(speaker_path.stem).isnumeric():
sub_sink = sink / speaker_path.stem
convert_speaker(speaker_path, sub_sink, prefix, transformer)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--source", type=pathlib.Path, help="source librispeech dataset")
parser.add_argument("--sink", type=pathlib.Path, help="where to write mfa dataset")
parser.add_argument("--prefix", type=str, help="prefix of dataset")
args = parser.parse_args()
convert_speakers(args.source, args.sink, args.prefix)
```
#### File: web/discoverfriday/server.py
```python
from typing import Mapping, Set
from flask import Flask
from flask import render_template, send_from_directory
from flask import request, Response
import random
import time
app = Flask(__name__, template_folder=".", static_url_path='')
class Friday:
def __init__(self, name: str, url: str):
self.name = name
self.url = url
def __hash__(self):
return hash(self.url)
def __eq__(self, other):
return self.url == other.url
DB: Mapping[str, Mapping[str, str]] = {}
@app.route('/')
def home():
remote_addr = None
if "X-Real-IP" in request.headers:
remote_addr = request.headers["X-Real-IP"]
else:
remote_addr = request.remote_addr
items = []
if remote_addr in DB:
items = [(url, name)
for url, name in DB[remote_addr].items()]
return render_template('index.html', items=items)
@app.route('/static/<path:path>')
def send_static(path):
return send_from_directory('static', path)
@app.route('/ping', methods=["PUT"])
def ping():
req = request.json
# This server will be running in dev-mode
# So all quests is executed serially
# No danger of race conditions
# So if this lies behind nginx the remote_addr will just
# be the addr to the nginx reverse proxy
# but the nginx if it is nice will include the remote ip as a header
# we check for that first
remote_addr = None
if "X-Real-IP" in request.headers:
remote_addr = request.headers["X-Real-IP"]
else:
remote_addr = request.remote_addr
print("req", req, "db", DB)
if remote_addr not in DB:
DB[remote_addr] = {}
DB[remote_addr][req["url"]] = req["name"]
return Response(status=200)
app.run(host="0.0.0.0", port="7000")
``` |
{
"source": "jonassagild/Track-to-Track-Fusion",
"score": 3
} |
#### File: Track-to-Track-Fusion/data_association/bar_shalom_hypothesis_associators.py
```python
from data_association.track_to_track_association import test_association_dependent_tracks, \
test_association_independent_tracks
class HypothesisTestIndependenceAssociator():
"""
"""
def __init__(self, alpha=0.05):
"""
"""
super().__init__()
self.alpha = alpha
def associate_tracks(self, tracks1, tracks2, **kwargs):
"""
Performs an hypothesis test to check for association
"""
return test_association_independent_tracks(tracks1[-1], tracks2[-1], alpha=self.alpha)
class HypothesisTestDependenceAssociator:
"""
Uses the hypothesis test derived by Bar-Shalom to check whether two tracks originate from the same target
"""
def __init__(self, alpha=0.05):
"""
"""
super().__init__()
self.alpha = alpha
def associate_tracks(self, track1_mean, track1_cov, track2_mean, track2_cov, **kwargs):
"""
Performs an hypothesis test to check for association
"""
return test_association_dependent_tracks(track1_mean, track1_cov, track2_mean, track2_cov,
cross_cov_ij=kwargs['cross_cov_ij'][-1],
cross_cov_ji=kwargs['cross_cov_ji'][-1], alpha=self.alpha)
```
#### File: Track-to-Track-Fusion/data_association/counting_technique.py
```python
def associate_tracks(tracks_1, tracks_2, previously_associated,
association_distance_threshold=10,
consecutive_hits_confirm_association=3,
consecutive_misses_end_association=2):
"""
"""
if previously_associated:
# calculate distance between last state of tracks_1 and tracks_2
pass
```
#### File: Track-to-Track-Fusion/data_association/track_to_track_association.py
```python
import numpy as np
from scipy.stats.distributions import chi2
def test_association_independent_tracks(track1, track2, alpha=0.05):
"""
Checks whether the tracks are from the same target, under the independence assumption
:param track1: track to check for association
:param track2: track to check for association
:param alpha: desired confidence interval
:return: true if the tracks are from the same target, false else
"""
delta_estimates = track1.state_vector - track2.state_vector
error_delta_estimates = delta_estimates # as the difference of the true states is 0 if it is the same target
error_delta_estimates_covar = track1.covar + track2.covar # under the error independence assumption
d = (error_delta_estimates.transpose() @ np.linalg.inv(error_delta_estimates_covar) @ error_delta_estimates)[0]
# 4 degrees of freedom as we have 4 dimensions in the state vector
d_alpha = chi2.ppf((1 - alpha), df=4)
# Accept H0 if d <= d_alpha
return d <= d_alpha
def test_association_dependent_tracks(track1_mean, track1_cov, track2_mean, track2_cov, cross_cov_ij, cross_cov_ji,
alpha=0.05):
"""
checks whether the tracks are from the same target, when the dependence is accounted for.
:param track1: track to check for association
:param track2: track to check for association
:param cross_cov_ij: cross-covariance of the estimation errors. See article
:param cross_cov_ji:
:param alpha: desired test power
:return: true if the tracks are from the same target, false else
"""
delta_estimates = track1_mean - track2_mean
error_delta_estimates = delta_estimates # as the difference of the true states is 0 if it is the same target
error_delta_estimates_covar = track1_cov + track2_cov - cross_cov_ij - cross_cov_ji
d = (error_delta_estimates.transpose() @ np.linalg.inv(error_delta_estimates_covar) @ error_delta_estimates)[0]
# 4 degrees of freedom as we have 4 dimensions in the state vector
d_alpha = chi2.ppf((1 - alpha), df=4)
# Accept H0 if d <= d_alpha
return d <= d_alpha
```
#### File: Track-to-Track-Fusion/trackers/calc_cross_cov_estimate_error.py
```python
import numpy as np
def calc_cross_cov_estimate_error(h_i, h_j, kalman_gain_i, kalman_gain_j, f, q, prev_cross_cov):
"""
Calculates the cross-covariance of the estimation error. See report for description of variables and formula.
Assumes same transition model for both trackers
:param prev_cross_cov:
:param kalman_gain_j:
:param kalman_gain_i:
:param h_i:
:param h_j:
:param f: assumes same transition models for both trackers
:param q:
:return:
"""
# TODO needs refactoring when decided whether to use semantics or mathematical characters. (uses both currently)
cross_cov = (np.eye(prev_cross_cov.shape[0]) - kalman_gain_i @ h_i) @ (f @ prev_cross_cov @ f.T + q) @ \
(np.eye(prev_cross_cov.shape[0]) - kalman_gain_j @ h_j).T
return cross_cov
def calc_partial_feedback_cross_cov(track1, track2, cross_covar_ij, cross_covar_ji):
"""
Calculates the updated cross_covariance when the partial feedback is used
"""
P_i = track1.covar
P_j = track2.covar
P_ij = cross_covar_ij
P_ji = cross_covar_ji
K_12 = (P_i - P_ij) @ np.linalg.inv(P_i + P_j - P_ij - P_ji)
K_12 = np.linalg.solve((P_i + P_j - P_ij - P_ji).T, (P_i - P_ij).T).T
cross_covar = (np.eye(4) - K_12) @ cross_covar_ij + K_12 @ P_j
return cross_covar
```
#### File: Track-to-Track-Fusion/trackers/kf_independent_fusion_async_sensors.py
```python
import numpy as np
from stonesoup.models.measurement.linear import LinearGaussian
from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, ConstantVelocity
from stonesoup.predictor.kalman import KalmanPredictor
from stonesoup.types.state import GaussianState
from stonesoup.updater.kalman import KalmanUpdater
from stonesoup.types.hypothesis import SingleHypothesis
from stonesoup.types.track import Track
from datetime import timedelta
from data_fusion import track_to_track_fusion
class kalman_filter_independent_fusion:
"""
todo
"""
def __init__(self, start_time, prior: GaussianState,
sigma_process_radar=0.01, sigma_process_ais=0.01, sigma_meas_radar=3, sigma_meas_ais=1):
"""
:param start_time:
:param prior:
:param sigma_process_radar:
:param sigma_process_ais:
:param sigma_meas_radar:
:param sigma_meas_ais:
"""
self.start_time = start_time
# transition models (process models)
self.transition_model_radar = CombinedLinearGaussianTransitionModel([ConstantVelocity(sigma_process_radar),
ConstantVelocity(sigma_process_radar)])
self.transition_model_ais = CombinedLinearGaussianTransitionModel([ConstantVelocity(sigma_process_ais),
ConstantVelocity(sigma_process_ais)])
# Specify measurement model for radar
self.measurement_model_radar = LinearGaussian(
ndim_state=4, # number of state dimensions
mapping=(0, 2), # mapping measurement vector index to state index
noise_covar=np.array([[sigma_meas_radar, 0], # covariance matrix for Gaussian PDF
[0, sigma_meas_radar]])
)
# Specify measurement model for AIS
self.measurement_model_ais = LinearGaussian(
ndim_state=4,
mapping=(0, 2),
noise_covar=np.array([[sigma_meas_ais, 0],
[0, sigma_meas_ais]])
)
# specify predictors
self.predictor_radar = KalmanPredictor(self.transition_model_radar)
self.predictor_ais = KalmanPredictor(self.transition_model_ais)
# specify updaters
self.updater_radar = KalmanUpdater(self.measurement_model_radar)
self.updater_ais = KalmanUpdater(self.measurement_model_ais)
# create prior, both trackers use the same starting point
self.prior_radar = prior
self.prior_ais = prior
def track(self, start_time, measurements_radar, measurements_ais, fusion_rate=1):
"""
returns fused tracks.
"""
time = start_time
tracks_radar = Track()
tracks_ais = Track()
tracks_fused = []
measurements_radar = measurements_radar.copy()
measurements_ais = measurements_ais.copy()
# loop until there are no more measurements
while measurements_radar or measurements_ais:
# get all new measurements
new_measurements_radar = \
[measurement for measurement in measurements_radar if measurement.timestamp <= time]
new_measurements_ais = \
[measurement for measurement in measurements_ais if measurement.timestamp <= time]
# remove the new measurements from the measurements lists
for new_meas in new_measurements_ais:
measurements_ais.remove(new_meas)
for new_meas in new_measurements_radar:
measurements_radar.remove(new_meas)
# for each new_meas, perform a prediction and an update
for measurement in new_measurements_ais:
prediction = self.predictor_ais.predict(self.prior_ais, timestamp=measurement.timestamp)
hypothesis = SingleHypothesis(prediction, measurement)
post = self.updater_ais.update(hypothesis)
tracks_ais.append(post)
self.prior_ais = tracks_ais[-1]
for measurement in new_measurements_radar:
prediction = self.predictor_radar.predict(self.prior_radar, timestamp=measurement.timestamp)
hypothesis = SingleHypothesis(prediction, measurement)
post = self.updater_radar.update(hypothesis)
tracks_radar.append(post)
self.prior_radar = tracks_radar[-1]
# perform a prediction up until this time (the newest measurement might not be at this exact time)
# note that this "prediction" might be the updated posterior, if the newest measurement was at this time
prediction_radar = self.predictor_radar.predict(self.prior_radar, timestamp=time)
prediction_ais = self.predictor_ais.predict(self.prior_ais, timestamp=time)
# fuse these predictions.
tracks_fused.append(self._fuse_track(prediction_radar, prediction_ais))
time += timedelta(seconds=fusion_rate)
return tracks_fused, tracks_radar, tracks_ais
def _fuse_track(self, track_radar, track_ais):
"""
fuses the two tracks. Assumes that the tracks have the same timestamp
"""
# todo check the track-to-track association
fused_posterior, fused_covar = track_to_track_fusion.fuse_independent_tracks(track_radar,
track_ais)
estimate = GaussianState(fused_posterior, fused_covar, timestamp=track_radar.timestamp)
return estimate
def _fuse_tracks(self, tracks_radar, tracks_ais, fusion_rate=1):
tracks_fused = []
for track_radar in tracks_radar:
# find a track in tracks_radar with the same timestamp
estimate = track_radar
for track_ais in tracks_ais:
if track_ais.timestamp == track_radar.timestamp:
# same_target = track_to_track_association.test_association_independent_tracks(track_radar, track_ais,
# 0.01)
same_target = True # ignore association for now
if same_target:
fused_posterior, fused_covar = track_to_track_fusion.fuse_independent_tracks(track_radar,
track_ais)
estimate = GaussianState(fused_posterior, fused_covar, timestamp=track_radar.timestamp)
break
tracks_fused.append(estimate)
return tracks_fused
# plot
# fig = plt.figure(figsize=(10, 6))
# ax = fig.add_subplot(1, 1, 1)
# ax.set_xlabel("$x$")
# ax.set_ylabel("$y$")
# ax.axis('equal')
# ax.plot([state.state_vector[0] for state in ground_truth],
# [state.state_vector[2] for state in ground_truth],
# linestyle="--",
# label='Ground truth')
# ax.scatter([state.state_vector[0] for state in measurements_radar],
# [state.state_vector[1] for state in measurements_radar],
# color='b',
# label='Measurements Radar')
# ax.scatter([state.state_vector[0] for state in measurements_ais],
# [state.state_vector[1] for state in measurements_ais],
# color='r',
# label='Measurements AIS')
#
# # add ellipses to the posteriors
# for state in tracks_radar:
# w, v = np.linalg.eig(measurement_model_radar.matrix() @ state.covar @ measurement_model_radar.matrix().T)
# max_ind = np.argmax(w)
# min_ind = np.argmin(w)
# orient = np.arctan2(v[1, max_ind], v[0, max_ind])
# ellipse = Ellipse(xy=(state.state_vector[0], state.state_vector[2]),
# width=2 * np.sqrt(w[max_ind]), height=2 * np.sqrt(w[min_ind]),
# angle=np.rad2deg(orient),
# alpha=0.2,
# color='b')
# ax.add_artist(ellipse)
#
# for state in tracks_ais:
# w, v = np.linalg.eig(measurement_model_ais.matrix() @ state.covar @ measurement_model_ais.matrix().T)
# max_ind = np.argmax(w)
# min_ind = np.argmin(w)
# orient = np.arctan2(v[1, max_ind], v[0, max_ind])
# ellipse = Ellipse(xy=(state.state_vector[0], state.state_vector[2]),
# width=2 * np.sqrt(w[max_ind]), height=2 * np.sqrt(w[min_ind]),
# angle=np.rad2deg(orient),
# alpha=0.2,
# color='r')
# ax.add_patch(ellipse)
#
# for track_fused in tracks_fused:
# w, v = np.linalg.eig(measurement_model_ais.matrix() @ track_fused[1] @ measurement_model_ais.matrix().T)
# max_ind = np.argmax(w)
# min_ind = np.argmin(w)
# orient = np.arctan2(v[1, max_ind], v[0, max_ind])
# ellipse = Ellipse(xy=(track_fused[0][0], track_fused[0][2]),
# width=2 * np.sqrt(w[max_ind]), height=2 * np.sqrt(w[min_ind]),
# angle=np.rad2deg(orient),
# alpha=0.5,
# color='green')
# ax.add_patch(ellipse)
#
# # add ellipses to add legend todo do this less ugly
# ellipse = Ellipse(xy=(0, 0),
# width=0,
# height=0,
# color='r',
# alpha=0.2,
# label='Posterior AIS')
# ax.add_patch(ellipse)
# ellipse = Ellipse(xy=(0, 0),
# width=0,
# height=0,
# color='b',
# alpha=0.2,
# label='Posterior Radar')
# ax.add_patch(ellipse)
# ellipse = Ellipse(xy=(0, 0),
# width=0,
# height=0,
# color='green',
# alpha=0.5,
# label='Posterior Fused')
# ax.add_patch(ellipse)
#
# ax.legend()
# ax.set_title("Kalman filter tracking and fusion under the error independence assumption")
# fig.show()
# fig.savefig("../results/scenario1/KF_tracking_and_fusion_under_error_independence_assumption.svg")
```
#### File: Track-to-Track-Fusion/utils/calc_metrics.py
```python
import numpy as np
from stonesoup.types.track import Track
from stonesoup.types.groundtruth import GroundTruthPath
from stonesoup.types.state import State
from stonesoup.types.groundtruth import GroundTruthState
import scipy.linalg as la
def calc_nees(tracks: Track, ground_truths: GroundTruthPath):
"""
Calculates NEES. Assumes that tracks and ground_truths are of same length, and that the elements on the same
index correlates.
:param tracks:
:param ground_truths:
:return:
"""
nees = []
for (state, ground_truth) in zip(tracks, ground_truths):
chol_cov = la.cholesky(state.covar, lower=True)
mean_diff = state.state_vector - ground_truth.state_vector
inv_chol_cov_diff = la.solve_triangular(chol_cov, mean_diff, lower=True)
nees.append((inv_chol_cov_diff ** 2).sum())
return nees
def calc_anees(nees):
"""
Calculates anees
:param nees:
:return: np.array containing the anees value
"""
return np.array(nees).mean()
def calc_rmse(tracks: Track, ground_truths: GroundTruthPath):
"""
Calculates the root mean square error
:param tracks:
:param ground_truths:
:return: the scalar rmse
"""
errors = [gt.state_vector - track.state_vector for track, gt in zip(tracks, ground_truths)]
squared_errors = np.array([err.T @ err for err in errors]).flatten()[:, None]
mean_squared_error = squared_errors.mean()
rmse = np.sqrt(mean_squared_error)
return rmse.flatten()[0]
def calc_percentage_nees_within_ci(nees, ci):
"""
Calculates the percentage of NEES within the confidence interval
"""
within_ci = [ind_nees < ci[1] and ind_nees > ci[0] for ind_nees in nees]
return sum(within_ci) / len(nees)
```
#### File: Track-to-Track-Fusion/utils/open_object.py
```python
import pickle
def open_object(filepath):
with open(filepath, 'rb') as inp:
obj = pickle.load(inp)
return obj
```
#### File: Track-to-Track-Fusion/utils/save_figures.py
```python
import os
import matplotlib.pyplot as plt
def save_figure(folder_path, name, fig):
"""
Saves the figure using matplotlibs savefig, and creates the folder in path if it does not exists.
:param folder_path: the path to the folder in which to save the figure. Assumes no trailing '/'
:param name:
:param fig:
:return: nothing
"""
if not os.path.isdir(folder_path):
os.makedirs(folder_path)
fig.savefig(folder_path + "/" + name)
``` |
{
"source": "JonasSchatz/DepixHMM",
"score": 2
} |
#### File: DepixHMM/experiments/experiment_accuracy.py
```python
import logging
import unittest
from PIL import ImageFont
from resources.fonts import DemoFontPaths
from text_depixelizer.depix_hmm import depix_hmm
from text_depixelizer.parameters import PictureParameters, TrainingParameters, LoggingParameters
class PipelineExperiments(unittest.TestCase):
def test_increasing_sample_images(self):
picture_parameters: PictureParameters = PictureParameters(
block_size=6,
pattern=r'\d{8,12}',
font=ImageFont.truetype(str(DemoFontPaths.arial), 50)
)
training_parameters: TrainingParameters = TrainingParameters(
n_img_train=10,
n_img_test=3,
n_clusters=100
)
logging_parameters: LoggingParameters = LoggingParameters(
timer_log_level=logging.INFO
)
depix_hmm(picture_parameters=picture_parameters, training_parameters=training_parameters, logging_parameters=logging_parameters)
```
#### File: DepixHMM/experiments/experiment_generate_sample_images.py
```python
from pathlib import Path
import unittest
from typing import List, Tuple
from PIL import ImageFont
from resources.fonts import DemoFontPaths
from text_depixelizer.parameters import PictureParameters
from text_depixelizer.training_pipeline.training_pipeline import create_training_data
class GenerateSampleImages(unittest.TestCase):
def test_generate_sample_images(self):
# Editable parameters
font_size: int = 30
block_sizes: List[int] = [10] # [1, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20]
offset_ys: List[int] = list(range(8))
font_path: str = str(DemoFontPaths.arial)
text: str = 'v Abjkly 123Bac'
font_color: Tuple[int, int, int] = (255, 255, 255)
background_color: Tuple[int, int, int] = (39, 48, 70)
# Act
folder_name = f'{Path(font_path).stem}_{font_size}'
output_path = Path(__file__).parent.parent / 'resources' / 'images' / folder_name
output_path.mkdir(parents=True, exist_ok=True)
for block_size in block_sizes:
for offset_y in offset_ys:
picture_parameters: PictureParameters = PictureParameters(
pattern=rf'{text}',
block_size=block_size,
randomize_pixelization_origin_x=False,
font=ImageFont.truetype(font_path, font_size),
offset_y=offset_y,
font_color=font_color,
background_color=background_color
)
_, _, pixelized_images, _ = create_training_data(n_img=1, picture_parameters=picture_parameters)
pixelized_images[0].image.save(output_path / f'{text}_blocksize-{block_size}_offset_y-{offset_y}.png')
```
#### File: test/HMM/test_clusterer.py
```python
from unittest import TestCase
from test.utils import demo_picture_parameters
from text_depixelizer.HMM.clusterer import KmeansClusterer
from text_depixelizer.training_pipeline.training_pipeline import create_training_data
class TestKmeansClusterer(TestCase):
def test_kmeans_fit(self):
# Arrange
_, _, _, windows = create_training_data(n_img=1, picture_parameters=demo_picture_parameters)
# Act
kmeans_clusterer: KmeansClusterer = KmeansClusterer(windows=windows[0], k=5)
# Assert
self.assertEqual(kmeans_clusterer.kmeans.n_clusters, 5)
```
#### File: test/HMM/test_depix_hmm.py
```python
import unittest
from pathlib import Path
from typing import List
import numpy as np
from PIL import Image, ImageFont
from resources.fonts import DemoFontPaths
from test.utils import demo_training_parameters, demo_picture_parameters
from text_depixelizer.HMM.depix_hmm import DepixHMM
from text_depixelizer.parameters import PictureParameters, TrainingParameters
from text_depixelizer.training_pipeline.windows import Window
class TestDepixHmm(unittest.TestCase):
demo_picture_parameters: PictureParameters = PictureParameters(
block_size=6,
pattern=r'\d{8,12}',
font=ImageFont.truetype(str(DemoFontPaths.arial), 50)
)
def test_train(self):
# Arrange
training_parameters: TrainingParameters = demo_training_parameters
depix_hmm: DepixHMM = DepixHMM(self.demo_picture_parameters, demo_training_parameters)
# Act
depix_hmm.train()
# Assert
self.assertEqual(depix_hmm.emission_probabilities.shape[1], training_parameters.n_clusters)
self.assertTrue(len(depix_hmm.states) > 5)
self.assertEqual(depix_hmm.emission_probabilities.shape, depix_hmm.log_emission_probabilities.shape)
def test_evaluate(self):
# Arrange
depix_hmm: DepixHMM = DepixHMM(self.demo_picture_parameters, demo_training_parameters)
depix_hmm.train()
# Act
accuracy, average_distance = depix_hmm.evaluate()
# Assert
self.assertGreaterEqual(accuracy, 0)
self.assertLessEqual(accuracy, 1)
self.assertIsInstance(accuracy, float)
self.assertIsInstance(average_distance, float)
def test_get_starting_probabilities(self):
# Arrange
windows: List[Window] = [
Window(characters=('A', 'b'), values=np.ndarray([1, 2, 3]), window_index=0, k=0),
Window(characters=('b',), values=np.ndarray([2, 3, 4]), window_index=1, k=0),
Window(characters=('b',), values=np.ndarray([3, 4, 5]), window_index=2, k=1),
Window(characters=('b', 'c'), values=np.ndarray([4, 5, 6]), window_index=3, k=1),
Window(characters=('d',), values=np.ndarray([5, 6, 7]), window_index=4, k=2),
Window(characters=('X',), values=np.ndarray([6, 7, 8]), window_index=0, k=3)
]
depix_hmm: DepixHMM = DepixHMM(demo_picture_parameters, demo_training_parameters)
# Act
depix_hmm.calculate_hmm_properties(windows_train=windows)
# Assert: Observations
self.assertCountEqual(depix_hmm.observations, (0, 1, 2, 3))
# Assert: States
self.assertCountEqual(depix_hmm.states, (('A', 'b'), ('b',), ('b', 'c'), ('d',), ('X',)))
# Assert: Starting probabilities
self.assertEqual(depix_hmm.starting_probabilities[depix_hmm.states.index(('A', 'b'))], 0.5)
self.assertEqual(depix_hmm.starting_probabilities[depix_hmm.states.index(('b',))], 0.0)
# Assert: Transition Probabilities
self.assertEqual(depix_hmm.transition_probabilities.shape, (len(depix_hmm.states), len(depix_hmm.states)))
self.assertNotEqual(depix_hmm.transition_probabilities[depix_hmm.states.index(('b',)), depix_hmm.states.index(('b',))], 0)
for s in depix_hmm.transition_probabilities.sum(axis=1):
self.assertAlmostEqual(s, 1.0, places=3)
# Assert Emission Probabilities
self.assertEqual(depix_hmm.emission_probabilities.shape, (len(depix_hmm.states), len(depix_hmm.observations)))
for s in depix_hmm.emission_probabilities.sum(axis=1):
self.assertAlmostEqual(s, 1.0, places=3)
def test_test_image(self):
# Arrange
img_path: Path = Path(__file__).parent.parent.parent / 'examples' / 'arial_50_blocksize-8' / 'pixelized_cropped.png'
picture_parameters: PictureParameters = PictureParameters(
pattern=r'\d{9}',
font=ImageFont.truetype(str(DemoFontPaths.arial), 50),
block_size=8,
window_size=4
)
training_parameters: TrainingParameters = TrainingParameters(
n_img_train=100,
n_img_test=1,
n_clusters=150
)
depix_hmm: DepixHMM = DepixHMM(picture_parameters, training_parameters)
depix_hmm.train()
# Act
with Image.open(img_path) as img:
reconstructed_string: str = depix_hmm.test_image(img)
# Assert
self.assertIsInstance(reconstructed_string, str)
```
#### File: test/HMM/test_hmm.py
```python
from typing import List
from unittest import TestCase
import numpy as np
from text_depixelizer.HMM.hmm import HMM
class TestHmm(TestCase):
def create_random_hmm(self, observations, possible_states, possible_observations) -> HMM:
"""
Returns a HMM object with random probabilities
"""
n_possible_states: int = len(possible_states)
n_possible_observations: int = len(possible_observations)
starting_probabilities: np.ndarray = np.random.rand(n_possible_states)
starting_probabilities = starting_probabilities / sum(starting_probabilities)
transition_probabilities: np.ndarray = np.random.rand(n_possible_states, n_possible_states)
transition_probabilities_normalized = transition_probabilities / np.sum(transition_probabilities, axis=1)[:, np.newaxis]
emission_probabilities: np.ndarray = np.random.rand(n_possible_states, n_possible_observations)
emission_probabilities_normalized = emission_probabilities / np.sum(emission_probabilities, axis=1)[:, np.newaxis]
hmm: HMM = HMM(
observations=observations,
states=possible_states,
starting_probabilities=starting_probabilities,
transition_probabilities=transition_probabilities_normalized,
emission_probabilities=emission_probabilities_normalized
)
return hmm
def test_viterbi(self):
# Arrange
hmm: HMM = HMM(
observations=[0, 1, 2],
states=[('A', 'b'), ('b', )],
starting_probabilities=np.array([0.7, 0.3]),
transition_probabilities=np.array([[0.9, 0.1], [0.1, 0.9]]),
emission_probabilities=np.array([[0.1, 0.4, 0.5], [0.3, 0.7, 0.0]])
)
sequence: List[int] = [2, 2, 2, 2, 2, 2, 2]
# Act
result = hmm.viterbi(sequence)
# Assert
self.assertEqual(len(result), len(sequence))
self.assertTrue(all([r in hmm.states for r in result]))
def test_viterbi_fail_for_numerical_underflow(self):
"""
When the observation sequence gets too long, the regular viterbi will fail due to numerical underflow
"""
# Parameters
n_possible_observations: int = 100
n_possible_states: int = 25
observation_length: int = 10000
# Arrange
possible_observations: List[int] = list(range(n_possible_observations))
possible_states: List[int] = list(range(n_possible_states))
observations: List[int] = np.random.choice(possible_observations, size=observation_length)
hmm: HMM = self.create_random_hmm(observations, possible_states, possible_observations)
# Act
result_viterbi = hmm.viterbi(observations)
result_log_viterbi: List[int] = hmm.log_viterbi(observations)
# Assert
self.assertNotEqual(result_viterbi, result_log_viterbi)
def test_compare_viterbi_and_log(self):
"""
Regular viterbi and log-viterbi should return the same values (for shorter sequences)
"""
np.random.seed(0)
# Set parameters
iterations: int = 50
n_possible_observations: int = 100
n_possible_states: int = 25
max_observation_length: int = 100
# Arrange (1)
possible_observations: List[int] = list(range(n_possible_observations))
possible_states: List[int] = list(range(n_possible_states))
for i in range(iterations):
# Arrange (2)
observation_length: int = np.random.randint(1, max_observation_length)
observations: List[int] = np.random.choice(possible_observations, size=observation_length)
hmm: HMM = self.create_random_hmm(observations, possible_states, possible_observations)
# Act
result_viterbi = hmm.viterbi(observations)
result_log_viterbi: List[int] = hmm.log_viterbi(observations)
# Assert
self.assertListEqual(result_viterbi, result_log_viterbi)
```
#### File: test/training_pipeline/test_original_image.py
```python
from typing import Tuple, List
from unittest import TestCase, skip
from PIL import ImageFont, Image
from resources.fonts import DemoFontPaths
from text_depixelizer.training_pipeline.original_image import ImageCreationOptions, generate_image_from_text, \
OriginalImage, draw_character_bounding_boxes, generate_character_bounding_boxes, CharacterBoundingBox
class TestOriginalImage(TestCase):
default_font_size: int = 30
default_font_color: Tuple[int, int, int] = (255, 255, 255)
default_background_color: Tuple[int, int, int] = (0, 0, 0)
default_font: ImageFont = ImageFont.truetype(str(DemoFontPaths.arial), default_font_size)
default_padding: Tuple[int, int] = (30, 30)
def test_create_image(self):
# Arrange
options: ImageCreationOptions = ImageCreationOptions(
self.default_padding, self.default_font, self.default_font_color, self.default_background_color
)
text: str = '123456789'
# Act
original_image: OriginalImage = generate_image_from_text(text, options)
# Assert: Character bounding boxes are added
self.assertEqual(len(original_image.character_bounding_boxes), len(text))
def test_generate_character_bounding_boxes(self):
# Arrange
options: ImageCreationOptions = ImageCreationOptions(self.default_padding, self.default_font)
text: str = 'Asdf'
# Act
character_bounding_boxes: List[CharacterBoundingBox] = generate_character_bounding_boxes(text, options)
# Assert
self.assertEqual(len(character_bounding_boxes), len(text))
self.assertEqual(character_bounding_boxes[0].left, self.default_padding[0])
self.assertTrue(character_bounding_boxes[0].right > character_bounding_boxes[0].left)
self.assertTrue(character_bounding_boxes[0].top >= self.default_padding[1])
self.assertTrue(character_bounding_boxes[0].bottom <= self.default_padding[1] + self.default_font_size)
#@skip('Only needed for visualization')
def test_draw_character_bounding_boxes(self):
# Arrange
background_color: Tuple[int, int, int] = (255, 255, 255)
font_color: Tuple[int, int, int] = (150, 0, 0)
padding: Tuple[int, int] = (0, 0)
options: ImageCreationOptions = ImageCreationOptions(padding, self.default_font, font_color, background_color)
text: str = 'agagA'
original_image: OriginalImage = generate_image_from_text(text, options)
# Act
image_with_bounding_boxes: Image = draw_character_bounding_boxes(original_image)
# Assert
image_with_bounding_boxes.show()
pass
```
#### File: DepixHMM/text_depixelizer/depix_hmm.py
```python
import itertools
import logging
from pathlib import Path
from typing import Optional
from PIL import ImageFont, Image
from resources.fonts import DemoFontPaths
from text_depixelizer.HMM.depix_hmm import DepixHMM
from text_depixelizer.parameters import PictureParameters, TrainingParameters, LoggingParameters, \
PictureParametersGridSearch, TrainingParametersGridSearch
def init_logging(logging_parameters: LoggingParameters):
logging.basicConfig(level=logging_parameters.module_log_level)
time_logger: logging.Logger = logging.getLogger('time_logger')
time_logger.setLevel(logging_parameters.timer_log_level)
def depix_hmm(picture_parameters: PictureParameters,
training_parameters: TrainingParameters,
logging_parameters: LoggingParameters = None,
img_path: Path = None) -> Optional[str]:
if logging_parameters:
init_logging(logging_parameters)
# Train and evaluate the HMM
hmm: DepixHMM = DepixHMM(picture_parameters, training_parameters)
hmm.train()
accuracy, average_distance = hmm.evaluate()
logging.info(f'Accuracy: {accuracy}, Avg. Distance: {average_distance}')
# If a path to an image was given, analyze the image
if img_path:
with Image.open(img_path) as img:
reconstructed_string: str = hmm.test_image(img)
return reconstructed_string
return None
def depix_hmm_grid_search(picture_parameters_grid_search: PictureParametersGridSearch,
training_parameters_grid_search: TrainingParametersGridSearch,
logging_parameters: LoggingParameters = None,
img_path: Path = None) -> Optional[str]:
if logging_parameters:
init_logging(logging_parameters)
best_hmm: Optional[DepixHMM] = None
best_accuracy: float = 0.0
best_avg_distance: float = 1.0
# Iterate through grid and find best
for window_size, n_clusters, n_img_train, offset_y in itertools.product(
*[picture_parameters_grid_search.window_size,
training_parameters_grid_search.n_clusters,
training_parameters_grid_search.n_img_train,
picture_parameters_grid_search.offset_y]):
picture_parameters: PictureParameters = PictureParameters(
pattern=picture_parameters_grid_search.pattern,
font=picture_parameters_grid_search.font,
block_size=picture_parameters_grid_search.block_size,
window_size=window_size,
offset_y=offset_y
)
training_parameters: TrainingParameters = TrainingParameters(
n_img_test=training_parameters_grid_search.n_img_test,
n_img_train=n_img_train,
n_clusters=n_clusters
)
hmm: DepixHMM = DepixHMM(picture_parameters, training_parameters)
hmm.train()
accuracy, average_distance = hmm.evaluate()
logging.info(f'Window Size: {window_size}, Clusters: {n_clusters}, Training Images: {n_img_train}, Offset Y: {offset_y}')
logging.info(f'Accuracy: {accuracy}, Avg. Distance: {average_distance} \n')
if img_path:
with Image.open(img_path) as img:
reconstructed_string: str = hmm.test_image(img)
logging.warning(f'Reconstructed string: {reconstructed_string}')
if accuracy > best_accuracy:
best_hmm = hmm
best_accuracy = accuracy
best_avg_distance = average_distance
# Finalize
logging.warning(f'Found HMM with accuracy {best_accuracy} and average distance {best_avg_distance}')
logging.warning(f'Associated parameters: ')
logging.warning(f' Window Size: {best_hmm.picture_parameters.window_size}')
logging.warning(f' Clusters: {best_hmm.training_parameters.n_clusters}')
logging.warning(f' Training Images: {best_hmm.training_parameters.n_img_train}')
# If a path to an image was given, analyze the image
if img_path:
with Image.open(img_path) as img:
reconstructed_string: str = best_hmm.test_image(img)
return reconstructed_string
return None
if __name__ == '__main__':
image_path: Path = Path(__file__).parent.parent / 'resources' / 'images' / 'arial_50' / '123456789_blocksize-6.PNG'
picture_parameters: PictureParameters = PictureParameters(
pattern=r'\d{8,12}',
font=ImageFont.truetype(str(DemoFontPaths.arial), 50),
block_size=6
)
training_parameters: TrainingParameters = TrainingParameters(
n_img_train=1000,
n_img_test=100,
n_clusters=300
)
depix_hmm(picture_parameters=picture_parameters, training_parameters=training_parameters, img_path=image_path)
```
#### File: text_depixelizer/HMM/hmm_result_reconstructor.py
```python
from typing import List, Tuple
from PIL import ImageFont
def reconstruct_string_from_window_characters(window_characters: List[Tuple[str]], block_size: int, font: ImageFont) -> str:
"""
Reconstruct the string from the HMM results, e.g.
[('a', 'b'), ('b', 'c')] -> 'abc'
"""
reconstructed_result: List[str] = []
estimated_positions: List[Tuple[int, int]] = []
for index, characters_in_one_window in enumerate(window_characters):
block_start_position: int = index * block_size
possible_overlap_area = [
char
for char, pos
in zip(reconstructed_result, estimated_positions)
if pos[1] >= (block_start_position - font.getsize(characters_in_one_window[0])[0])]
overlap: int = get_overlap(possible_overlap_area, characters_in_one_window)
offset: int = 0
for i in range(overlap, len(list(characters_in_one_window))):
character_to_be_added = characters_in_one_window[i]
estimated_start: int = block_start_position + offset
estimated_end: int = block_start_position + font.getsize(character_to_be_added)[0] + offset
estimated_positions.append((estimated_start, estimated_end))
reconstructed_result.append(characters_in_one_window[i])
offset = offset + font.getsize(character_to_be_added)[0]
reconstructed_string: str = ''.join(reconstructed_result)
return reconstructed_string
def get_overlap(reconstructed_data: List[str], new_characters: Tuple[str, ...]) -> int:
largest_overlap = 0
for possible_overlap in range(1, len(new_characters) + 1):
if reconstructed_data[-possible_overlap:] == list(new_characters)[:possible_overlap]:
largest_overlap = possible_overlap
return largest_overlap
def string_similarity(original_string: str, recovered_string: str) -> float:
"""
Modified edit distance, normalizing the Levenshtein distance between 0 and 1,
where 1 indicates a perfect match of the recovered string to the original string
"""
return 1 - levenshteinDistance(original_string, recovered_string)/len(original_string)
def levenshteinDistance(s1: str, s2: str) -> int:
"""
https://stackoverflow.com/questions/2460177/edit-distance-in-python
"""
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = range(len(s1) + 1)
for i2, c2 in enumerate(s2):
distances_ = [i2+1]
for i1, c1 in enumerate(s1):
if c1 == c2:
distances_.append(distances[i1])
else:
distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))
distances = distances_
return distances[-1]
```
#### File: text_depixelizer/training_pipeline/text_generator.py
```python
import random
import string
from abc import ABC, abstractmethod
import rstr
class TextGenerator(ABC):
@abstractmethod
def generate_text(self) -> str:
pass
class RegexTextGenerator(TextGenerator):
def __init__(self, pattern: str):
self.pattern = pattern
def generate_text(self) -> str:
return rstr.xeger(self.pattern)
class NumberTextGenerator(TextGenerator):
def __init__(self, text_length: int):
self.text_length = text_length
def generate_text(self) -> str:
digits = string.digits
return ''.join(random.choice(digits) for i in range(self.text_length))
``` |
{
"source": "jonasschnelli/gui",
"score": 2
} |
#### File: functional/test_framework/wallet.py
```python
from decimal import Decimal
from test_framework.address import ADDRESS_BCRT1_P2WSH_OP_TRUE
from test_framework.messages import (
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
)
from test_framework.script import (
CScript,
OP_TRUE,
)
from test_framework.util import (
assert_equal,
hex_str_to_bytes,
satoshi_round,
)
class MiniWallet:
def __init__(self, test_node):
self._test_node = test_node
self._utxos = []
self._address = ADDRESS_BCRT1_P2WSH_OP_TRUE
self._scriptPubKey = hex_str_to_bytes(self._test_node.validateaddress(self._address)['scriptPubKey'])
def generate(self, num_blocks):
"""Generate blocks with coinbase outputs to the internal address, and append the outputs to the internal list"""
blocks = self._test_node.generatetoaddress(num_blocks, self._address)
for b in blocks:
cb_tx = self._test_node.getblock(blockhash=b, verbosity=2)['tx'][0]
self._utxos.append({'txid': cb_tx['txid'], 'vout': 0, 'value': cb_tx['vout'][0]['value']})
return blocks
def send_self_transfer(self, *, fee_rate, from_node):
"""Create and send a tx with the specified fee_rate. Fee may be exact or at most one satoshi higher than needed."""
self._utxos = sorted(self._utxos, key=lambda k: -k['value'])
largest_utxo = self._utxos.pop() # Pick the largest utxo and hope it covers the fee
vsize = Decimal(96)
send_value = satoshi_round(largest_utxo['value'] - fee_rate * (vsize / 1000))
fee = largest_utxo['value'] - send_value
assert (send_value > 0)
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(largest_utxo['txid'], 16), largest_utxo['vout']))]
tx.vout = [CTxOut(int(send_value * COIN), self._scriptPubKey)]
tx.wit.vtxinwit = [CTxInWitness()]
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
tx_hex = tx.serialize().hex()
txid = from_node.sendrawtransaction(tx_hex)
self._utxos.append({'txid': txid, 'vout': 0, 'value': send_value})
tx_info = from_node.getmempoolentry(txid)
assert_equal(tx_info['vsize'], vsize)
assert_equal(tx_info['fee'], fee)
return {'txid': txid, 'wtxid': tx_info['wtxid'], 'hex': tx_hex}
``` |
{
"source": "jonas-scholz123/boomer-humour-exhumer",
"score": 3
} |
#### File: backend/src/dataloader.py
```python
from __future__ import print_function, division
import os
import pandas as pd
from skimage import io, transform
from skimage.color import rgba2rgb, gray2rgb
from nltk import word_tokenize
import numpy as np
import pickle
import matplotlib.pyplot as plt
import torch
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence
from torchvision import transforms, utils
from dataOrganiser import MetaData
from utils import imshow
import config
def collate_fn(batch):
'''
Overrides original collate fn to include padding process
Padds batch of variable length
'''
x_image, x_text, labels = zip(*batch)
x_image = torch.stack(x_image)
labels = torch.tensor(labels)
lengths = torch.tensor([seq.shape[0] for seq in x_text])
# pad sequences
x_text = pad_sequence(x_text, batch_first=True)
# return lengths for later packing of padded sequences
return x_image, x_text, lengths, labels
class BoomerDataset(Dataset):
'''
Custom pyTorch dataset, loads the metadata file and transforms
images into the right format
'''
def __init__(self, meta_frame, transform, is_training):
'''
PARAMS:
pd.DataFrame meta_frame: metadata dataframe
torchvision.transform: transforms images into desired shape
bool is_training: determines whether it loads training/testing data
'''
self.transform = transform
# dataframe of metadata
if is_training is None:
self.meta_frame = meta_frame
else:
training_entries = meta_frame["is_training"] == is_training
self.meta_frame = meta_frame[training_entries].reset_index()
self.len = self.meta_frame.shape[0]
self.word2id_path = config.paths["word2id"]
with open(self.word2id_path, "rb") as f:
self.word2id = pickle.load(f)
def process_image(self, image):
'''
Apply self.transform to image to normalise, scale etc
'''
# turn RGBA to RGB
if image.shape[-1] == 4:
image = rgba2rgb(image)
# turn greyscale into RGB
elif len(image.shape) == 2:
image = gray2rgb(image)
return self.transform(image).float()
def __len__(self):
return self.len
def __getitem__(self, idx):
'''
Retrieves fpath from metadata frame, loads and transforms image
'''
if torch.is_tensor(idx):
idx = idx.tolist()
fpath = self.meta_frame.loc[idx, "fpath"]
label = self.meta_frame.loc[idx, "is_boomer"].astype("int")
text = self.meta_frame.loc[idx, "text"]
if text:
text = text.lower()
text_ids = [self.word2id[tok] for tok in word_tokenize(text)]
# make padding of 0s
else:
# indicates no text
text_ids = [0]
# else:
# print("WARNING: None found in df, should have NO_TEXT_DETECTED!")
# padding = [0 for _ in range(config.params["sentence_length"] - len(text_ids))]
# text_ids = torch.IntTensor(text_ids + padding)
text_ids = torch.IntTensor(text_ids)
image = io.imread(fpath)
image = self.process_image(image)
return image, text_ids, label
class BoomerDatasetContainer(BoomerDataset):
'''
Wrapper that specifies all the details for
boomer memes in particular
'''
def __init__(self, is_training=None):
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Resize((160, 160)),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
meta_frame = pd.read_pickle(config.paths["metadata"])
super().__init__(meta_frame, transform, is_training)
```
#### File: backend/src/exhume_app.py
```python
import os
from flask import Flask, flash, request, redirect, url_for, jsonify
from flask_cors import CORS, cross_origin
# from flask import Flask, request
from werkzeug.utils import secure_filename
from exhume import ExhumerContainer
UPLOAD_FOLDER = '../data/user_uploads/'
ALLOWED_EXTENSIONS = {'pdf', 'png', 'jpg', 'jpeg', 'gif'}
base_html = '''
<!doctype html>
<title>Upload new File</title>
<h1>Upload new File</h1>
<form method=post enctype=multipart/form-data>
<input type=file name=image>
<input type=submit value=Upload>
</form>
'''
exhumer = ExhumerContainer()
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.secret_key = "super secret key"
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
app.config['CORS_HEADERS'] = 'Authorization'
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/api/exhumepath', methods=["POST"])
def exhume_image_at_path():
im_path = request.form["im_path"]
return str(exhumer.exhume(im_path))
@app.route('/api/exhume', methods=['POST'])
@cross_origin()
def exhume_image():
if 'image' not in request.files:
# 415 => Unsupported Media Type
return jsonify({"error": "no image found"}), 415
file = request.files['image']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
return jsonify({"error": "no filename"}), 415
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
fpath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(fpath)
prob, engine, text = exhumer.exhume_with_meta(fpath)
prob = round(prob * 100, 2)
return jsonify({
'boomerness': prob,
'ocr_engine': engine.__class__.__name__,
'text': text.replace('\n', " "),
}), 200
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
```
#### File: backend/src/model.py
```python
import os
import pickle
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import config
from utils import ConceptNetDict
class CNN(nn.Module):
'''
CNN module processes images. Output is fed into main model.
'''
def __init__(self):
# TODO: pass hyperparameters
#cnn params
kernel_size = 5
pool_size = 2
#cnn channels
in_channels = 3
out_channels_1 = 6
out_channels_2 = 16
#linear params
in_lin1 = 21904 # = 148 ** 2 taken from model.summary()
out_lin1 = 120
super().__init__()
# convolutional layers encode pixels into image features
# 3 in channels, 6 out channels, kernel_size=5
self.conv1 = nn.Conv2d(in_channels, out_channels_1, kernel_size)
# 6 in channels, 16 out channels, kernel_size=5
self.conv2 = nn.Conv2d(out_channels_1, out_channels_2, kernel_size)
# pool
self.pool = nn.MaxPool2d(pool_size, pool_size)
# flatten
self.flat = nn.Flatten()
# fully connected layers make the actual classification
# 148 taken from summary, as there exists no utility
# function for calculating this magic number
self.fc1 = nn.Linear(in_lin1, out_lin1)
def forward(self, x):
'''
INPUTS:
tensor x: (nr_samples, nr_channels (3), nr_x_pixels (32), nr_y_pixels (32))
RETURNS:
boomer probability
'''
# Max pooling over 2x2 window
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.flat(x)
return F.relu(self.fc1(x))
class RNN(nn.Module):
'''
RNN using embeddings to encode wordids to vectors,
then uses a GRU layer to compute outputs of the
sentence. This is used for incorporating text in the
model.
'''
def __init__(self, rebuild_embeddings=False):
self.embedding_matrix_path = config.paths["embedding_matrix"]
self.embed_dict = ConceptNetDict()
self.embed_dim = 300
self.word2id = self._load_word2id()
embedding_matrix = self._load_embedding_matrix(rebuild_embeddings)
super().__init__()
self.embed_layer = nn.Embedding.from_pretrained(embedding_matrix, padding_idx=0)
# input_size = config.params["sentence_length"]
# self.gru = nn.GRU(self.embed_dim, 256, batch_first=True)
self.gru = nn.GRU(self.embed_dim, 256, batch_first=True)
def _load_word2id(self):
path = config.paths["word2id"]
with open(path, "rb") as f:
return pickle.load(f)
def _load_embedding_matrix(self, rebuild=False):
if os.path.exists(self.embedding_matrix_path) and not rebuild:
with open(self.embedding_matrix_path, "rb") as f:
return pickle.load(f)
else:
return self._make_embedding_matrix()
def _make_embedding_matrix(self):
print("making embedding matrix...")
embedding_matrix = np.zeros((len(self.word2id), self.embed_dim))
for word, idx in self.word2id.items():
if word in self.embed_dict:
embedding_matrix[idx] = self.embed_dict[word]
embedding_matrix = torch.FloatTensor(embedding_matrix)
with open(self.embedding_matrix_path, "wb") as f:
pickle.dump(embedding_matrix, f)
print("done")
return embedding_matrix
def forward(self, text_data, text_lengths):
if len(text_data[0]) == 0:
print("No text detected. Any possible text is not taken into account.")
# 256 = final number of outputs
return torch.zeros(1, 256)
x = self.embed_layer(text_data)
x = pack_padded_sequence(x, text_lengths,
batch_first=True, enforce_sorted=False)
#NOTE: last output not 100% sure is really the last output
sequence_outputs, last_output = self.gru(x)
# x, output_lengths = pad_packed_sequence(x, batch_first=True)
a = last_output.view(last_output.shape[1:])
return last_output.view(last_output.shape[1:]) # flatten first dim
class Model(nn.Module):
def __init__(self, rebuild_embeddings=False):
#cnn params
kernel_size = 5
pool_size = 2
#cnn channels
in_channels = 3
out_channels_1 = 6
out_channels_2 = 16
#linear params
in_lin1 = 376 # taken from model.summary()
out_lin1 = 120
out_lin2 = 84
final_out = 1 # boomer probability
super(Model, self).__init__()
self.rnn = RNN(rebuild_embeddings=rebuild_embeddings)
self.cnn = CNN()
# convolutional layers encode pixels into image features
# 3 in channels, 6 out channels, kernel_size=5
self.conv1 = nn.Conv2d(in_channels, out_channels_1, kernel_size)
# 6 in channels, 16 out channels, kernel_size=5
self.conv2 = nn.Conv2d(out_channels_1, out_channels_2, kernel_size)
# pool
self.pool = nn.MaxPool2d(pool_size, pool_size)
# flatten
self.flat = nn.Flatten()
# fully connected layers make the actual classification
# 376 taken from summary, as there exists no utility
# function for calculating this magic number
self.fc1 = nn.Linear(in_lin1, out_lin1)
self.fc2 = nn.Linear(out_lin1, out_lin2)
# output: boomer or not boomer
self.fc3 = nn.Linear(out_lin2, final_out)
def forward(self, x_image, x_text, text_lengths):
'''
INPUTS:
tensor x: (nr_samples, nr_channels (3), nr_x_pixels (32), nr_y_pixels (32))
RETURNS:
boomer probability
'''
h_image = self.cnn(x_image)
h_text = self.rnn(x_text, text_lengths)
h = torch.cat((h_image, h_text), dim=1)
#TODO: figure out correct dimensionality of fc1
h = F.relu(self.fc1(h))
h = F.relu(self.fc2(h))
h = self.fc3(h)
return h.view(-1)
``` |
{
"source": "jonas-scholz123/do-my-taxes",
"score": 3
} |
#### File: do-my-taxes/backend/transactions.py
```python
import dataclasses
import sqlite3
import numpy as np
import pandas as pd
from datetime import datetime
from dataclasses import dataclass, fields, astuple, asdict
from dacite import from_dict
import config
from utils import DBHandler
import yfinance as yf
from typing import Optional
@dataclass
class Transaction:
depot: str
name: str
ticker: str
category: str
quantity: float
account_currency: str
investment_currency: str
buy_price: float
buy_date: str
sell_price: Optional[float] = None
sell_date: Optional[str] = None
def __post_init__(self):
self.currency_codes = set(pd.read_csv(config.paths["currency_codes"])["id"])
self.errors = {}
self.validate()
def validate(self):
valid = True
valid = self.date_is_valid(self.buy_date)
valid = valid and self.ticker_is_valid()
if self.sell_date:
valid = valid and self.date_is_valid(self.sell_date)
valid = self.currency_is_valid(self.investment_currency, "investment_currency") and valid
valid = self.currency_is_valid(self.account_currency, "account_currency") and valid
self.valid = valid
def date_is_valid(self, date):
try:
datetime.strptime(date, "%Y-%m-%d")
return True
except:
print("Date is invalid: ", date)
self.errors["date"] = "invalid"
return False
def ticker_is_valid(self):
invalid = yf.download(self.ticker).empty
if invalid:
self.errors["ticker"] = "Not a valid ticker. Please only use Yahoo finance tickers."
return not invalid
def currency_is_valid(self, currency, name):
valid = currency in self.currency_codes
if not valid:
self.errors[name] = "Invalid currency."
return valid
class TransactionHandler(DBHandler):
def __init__(self):
super().__init__(table_name="transactions")
def create_table(self):
create_table_sql = ''' CREATE TABLE IF NOT EXISTS transactions (
id integer PRIMARY KEY,
depot text NOT NULL,
name text NOT NULL,
ticker text NOT NULL,
category text NOT NULL,
quantity float NOT NULL,
account_currency text NOT NULL,
investment_currency text NOT NULL,
buy_price float NOT NULL,
sell_price float,
buy_date text NOT NULL,
sell_date text
)'''
self.cursor.execute(create_table_sql)
def reset_table(self):
self.drop_table()
self.create_table()
def insert(self, transaction: Transaction):
if not transaction.valid:
print("Error: tried to insert invalid transaction")
return
self.insert_valid_transaction(transaction)
def insert_valid_transaction(self, transaction: Transaction):
insert_sql = ''' INSERT INTO transactions (depot, name, ticker, category,
quantity, account_currency, investment_currency,
buy_price, buy_date, sell_price, sell_date)
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)'''
self.cursor.execute(insert_sql, astuple(transaction))
self.conn.commit()
def insert_csv_transactions(self, path):
# NaNs are turned into "nan" instead of None in transaction parsing
# -> change all nans to Nones
df = pd.read_csv(path).replace({np.nan: None})
for record in df.to_dict(orient="records"):
self.insert(from_dict(data_class=Transaction, data=record))
# df.apply(self.insert_csv_row, axis=1)
def insert_csv_row(self, row):
transaction = Transaction(*row)
self.insert(transaction)
def fetch_by_id(self, transaction_id):
query = f"SELECT * FROM transactions WHERE id={transaction_id}"
return self.query_to_pandas(query)
def fetch_open_transactions(self, after_buy_date="0000-00-00", before_buy_date="9999-99-99"):
query = f'''SELECT * FROM transactions
WHERE sell_date IS NULL
AND buy_date > "{after_buy_date}"
AND buy_date < "{before_buy_date}"
'''
return self.query_to_pandas(query).drop(["sell_date", "sell_price"], axis=1)
def fetch_all_transactions(self):
query = "SELECT * FROM transactions"
return self.query_to_pandas(query)
def fetch_closed_transactions(self, after_buy_date="0000-00-00", before_buy_date="9999-99-99"):
query = f'''SELECT * FROM transactions
WHERE sell_date IS NOT NULL
AND buy_date > "{after_buy_date}"
AND buy_date < "{before_buy_date}"
'''
return self.query_to_pandas(query)
def fetch_open_between(self, start, end):
query = f'''SELECT * FROM transactions
WHERE buy_date < "{end}"
AND (sell_date > "{start}"
OR sell_date IS NULL)
'''
return self.query_to_pandas(query)
def fetch_earliest_transaction_date(self):
query = "SELECT MIN(buy_date) FROM transactions"
return self.cursor.execute(query).fetchall()[0][0]
def backup(self, path):
return self.query_to_pandas("SELECT * FROM transactions").to_csv(path, index=False)
def edit_transaction(self, transaction_id, edited_transaction):
transaction_dict = asdict(edited_transaction)
keys = tuple(transaction_dict.keys())
values = tuple(transaction_dict.values())
set_string = ", ".join([key + " = ?" for key in keys])
query = f'''UPDATE transactions
SET {set_string}
WHERE id = {transaction_id}
'''
self.cursor.execute(query, values)
self.conn.commit()
def delete(self, transaction_id):
query = f"DELETE FROM transactions WHERE id = {transaction_id}"
self.cursor.execute(query)
self.conn.commit()
if __name__ == "__main__":
handler = TransactionHandler()
handler.reset_table()
handler.insert_csv_transactions(config.paths["init_transactions"])
handler.print_all()
df = handler.all_to_pandas()
# %%
``` |
{
"source": "jonassibbesen/hamster-project-scripts",
"score": 3
} |
#### File: hamster-project-scripts/python/compare_hst_sequences.py
```python
import sys
import os
import subprocess
import random
from Bio import SeqIO
from utils import *
def parse_transcripts(filename):
transcripts = {}
for record in SeqIO.parse(filename, "fasta"):
transcript_id = record.id.split("_")[0]
if transcript_id in transcripts:
transcripts[transcript_id].append((record.id, str(record.seq)))
else:
transcripts[transcript_id] = [(record.id, str(record.seq))]
return transcripts
printScriptHeader()
if len(sys.argv) != 4:
print("Usage: python compare_hst_sequences.py <input_name_1> <input_name_2> <output_name>\n")
sys.exit(1)
hts_seqs_1 = parse_transcripts(sys.argv[1])
print("Parsed " + str(len(hts_seqs_1)) + " transcripts")
hts_seqs_2 = parse_transcripts(sys.argv[2])
print("Parsed " + str(len(hts_seqs_2)) + " transcripts")
tsv_out_file = open(sys.argv[3], "w")
tsv_out_file.write("Name1\tName2\n")
for name_1, seqs_1 in hts_seqs_1.items():
if name_1 in hts_seqs_2:
for hts_2 in hts_seqs_2[name_1]:
for hts_1 in seqs_1:
if hts_1[1] == hts_2[1]:
tsv_out_file.write(hts_1[0] + "\t" + hts_2[0] + "\n")
tsv_out_file.close()
print("Done")
```
#### File: hamster-project-scripts/python/convert_vg_sim_to_rpvg.py
```python
import sys
import os
import subprocess
import pickle
import gzip
from Bio.Seq import Seq
from Bio import SeqIO
from utils import *
def parse_path_counts(filename, is_paired):
path_counts = {}
vg_sim_file = gzip.open(filename, "rb")
for line in vg_sim_file:
line_split = line.strip().split("\t")
assert(len(line_split) == 4)
if line_split[0] == "read":
continue
if line_split[1] in path_counts:
if is_paired:
path_counts[line_split[1]] += 0.5
else:
path_counts[line_split[1]] += 1
else:
if is_paired:
path_counts[line_split[1]] = 0.5
else:
path_counts[line_split[1]] = 1
vg_sim_file.close()
return path_counts
def parse_isoforms_lengths(filename):
isoform_lengths = {}
isoforms_file = open(filename, "rb")
for line in isoforms_file:
line_split = line.strip().split("\t")
assert(len(line_split) == 8)
if line_split[0] == "transcript_id":
continue
assert(not line_split[0] in isoform_lengths)
isoform_lengths[line_split[0]] = [int(line_split[2]), float(line_split[3])]
isoforms_file.close()
return isoform_lengths
printScriptHeader()
if len(sys.argv) != 5:
print("Usage: python convert_vg_sim_to_rpvg.py <vg_sim_gz_name> <is_paired (Y|N)> <isoform_length_name> <output_file_name>\n")
sys.exit(1)
assert(sys.argv[2] == "Y" or sys.argv[2] == "N")
path_counts = parse_path_counts(sys.argv[1], sys.argv[2] == "Y")
print(len(path_counts))
isoform_lengths = parse_isoforms_lengths(sys.argv[3])
total_transcript_count = 0
for path, count in path_counts.items():
total_transcript_count += (count / isoform_lengths[path][1])
print(total_transcript_count)
out_file = open(sys.argv[4], "w")
out_file.write("Name\tClusterID\tLength\tEffectiveLength\tHaplotypeProbability\tReadCount\tTPM\n")
for path, count in path_counts.items():
length = isoform_lengths[path]
tpm = (count / length[1]) * 10**6 / total_transcript_count
out_file.write(path + "\t0\t" + str(length[0]) + "\t" + str(length[1]) + "\t1\t" + str(count) + "\t" + str(tpm) + "\n")
out_file.close()
print("Done")
```
#### File: python/imprinting_analysis/imprinting_analysis.py
```python
import sys
import os
import numpy as np
import pandas as pd
import seaborn as sns
import tempfile
import gc
import re
import collections
import gzip
import bisect
import pickle
import itertools
import math
sns.set_style('whitegrid')
# make 2 maps:
# - from transcript ID to row numbers of corresponding haplotype specific transcripts
# - from cluster ID to corresponding transcript IDs
def row_dicts(tab):
tx_rows = {}
cluster_txs = {}
for i in range(tab.shape[0]):
tx_id = tab.Name.values[i].split("_")[0]
clust_id = tab.ClusterID.values[i]
if tx_id not in tx_rows:
tx_rows[tx_id] = []
tx_rows[tx_id].append(i)
if clust_id not in cluster_txs:
cluster_txs[clust_id] = set()
cluster_txs[clust_id].add(tx_id)
for clust_id in cluster_txs:
cluster_txs[clust_id] = sorted(cluster_txs[clust_id])
return tx_rows, cluster_txs
def gene_to_row_dict(tx_rows):
gene_to_tx_rows = {}
for tx_id in tx_id_to_gene:
gene = tx_id_to_gene[tx_id]
if gene not in gene_to_tx_rows:
gene_to_tx_rows[gene] = []
if tx_id in tx_rows:
gene_to_tx_rows[gene].extend(tx_rows[tx_id])
return gene_to_tx_rows
def parse_attr(attr):
attrs = {}
for t in attr.split(";"):
tokens = t.strip().replace("\"", "").split()
if len(tokens) == 0:
continue
tag, val = tokens
attrs[tag] = val
return attrs
def get_haplotypes(chrom, start, end, sample, genotypes):
chrom_start = bisect.bisect_left(genotypes.CHROM.values, chrom)
chrom_end = bisect.bisect_right(genotypes.CHROM.values, chrom)
region_start = bisect.bisect_left(genotypes.POS.values, start, chrom_start, chrom_end)
region_end = bisect.bisect_right(genotypes.POS.values, end, chrom_start, chrom_end)
blocks = []
for i in range(region_start, region_end):
genotype = genotypes[sample].values[i]
phased = "|" in genotype
if len(blocks) == 0 or not phased:
blocks.append({})
al1, al2 = re.split("[\\|\\\\]", genotype)
formatted_alleles = []
for al in (al1, al2):
fal = ""
if al.isdigit():
j = int(al)
if j == 0:
fal = genotypes.REF.values[i]
else:
fal = genotypes.ALT.values[i].split(",")[j - 1]
formatted_alleles.append(fal)
blocks[-1][genotypes.POS.values[i]] = tuple(formatted_alleles)
return blocks
if __name__ == "__main__":
assert(len(sys.argv) == 9)
# gencode annotations
gtf = sys.argv[1]
# list of genes we're interested in
focal_genes = sys.argv[2]
# structured string in format SAMPLE1:rpvg_table1,SAMPLE2:rpvg_table2
tab_string = sys.argv[3]
# structured string in format SAMPLE1:sorted_gibbs_table1,SAMPLE2:sorted_gibbs_table2
gibbs_string = sys.argv[4]
# file constaining list of hst to variant files
hst_variant_list = sys.argv[5]
# file containing list of VCFs (probably reduced to these samples)
vcf_list = sys.argv[6]
# variants for the focal genes in one table
variant_table = sys.argv[7]
# directory for output
out_dir = sys.argv[8]
tabs = []
samples = []
for tab_sample in tab_string.split(","):
assert(":" in tab_sample)
samp, tab = tab_sample.split(":")
tabs.append(tab)
samples.append(samp)
gibbs_tabs = []
gibbs_samples = []
for tab_sample in gibbs_string.split(","):
assert(":" in tab_sample)
samp, tab = tab_sample.split(":")
gibbs_tabs.append(tab)
gibbs_samples.append(samp)
assert(samples == gibbs_samples)
assert(os.path.isdir(out_dir))
assert(os.path.exists(gtf))
assert(os.path.exists(focal_genes))
assert(os.path.exists(vcf_list))
for tab in tabs:
assert(os.path.exists(tab))
for tab in gibbs_tabs:
assert(os.path.exists(tab))
vcfs = []
with open(vcf_list) as f:
for line in f:
if type(line) == bytes:
line = line.decode("utf-8")
vcf = line.strip()
assert(os.path.exists(vcf))
vcfs.append(vcf)
# make a look table for the file name by chromosome
hst_variant_files = {}
with open(hst_variant_list) as f:
for line in f:
if type(line) == bytes:
line = line.decode("utf-8")
fname = line.strip()
with open(fname) as hst_f:
#skip the header
next(hst_f)
hst_line = next(hst_f)
if type(hst_line) == bytes:
hst_line = hst_line.decode("utf-8")
hst_variant_files[hst_line.split()[0]] = fname
tmpdir = tempfile.TemporaryDirectory()
tmppref = tmpdir.name
###############
focal_genes_set = set()
for line in open(focal_genes):
if type(line) == bytes:
line = line.decode("utf-8")
focal_genes_set.add(line.strip().split()[0])
###############
# load the GTF
gencode = pd.read_csv(gtf, sep = "\t", header = None, skiprows = list(range(5)))
gencode.columns = ["chr", "src", "type", "start", "end", "score", "strand", "frame", "attr"]
gencode['chr'] = gencode['chr'].apply(str)
###############
print("loading gene annotations...", file = sys.stderr)
# parse the GTF into useful indexes
gene_coords = {}
tx_models = {}
tx_id_to_name = {}
tx_id_to_gene = {}
exonic_regions = {}
for i in range(gencode.shape[0]):
attrs = parse_attr(gencode.attr.values[i])
gene = attrs["gene_id"]
if gene not in tx_models:
tx_models[gene] = {}
chrom = gencode.chr.values[i]
if chrom.startswith("chr"):
chrom = chrom[3:]
if gene in tx_models:
if gencode.type.values[i] == "gene":
gene_coords[gene] = (chrom, gencode.start.values[i], gencode.end.values[i])
elif gencode.type.values[i] == "exon":
tx_id = attrs["transcript_id"]
if tx_id not in tx_models[gene]:
tx_models[gene][tx_id] = []
tx_models[gene][tx_id].append((chrom, gencode.start.values[i], gencode.end.values[i]))
###############
tx_id_to_gene[tx_id] = gene
###############
if "transcript_id" in attrs and "transcript_name" in attrs:
tx_id_to_name[attrs["transcript_id"]] = attrs["transcript_name"]
###############
if gencode.type.values[i] == "exon":
if chrom not in exonic_regions:
exonic_regions[chrom] = []
exonic_regions[chrom].append([gencode.start.values[i], gencode.end.values[i]])
###############
# reverse the transcript gene table
gene_to_tx_ids = {}
for tx_id in tx_id_to_gene:
gene = tx_id_to_gene[tx_id]
if gene not in gene_to_tx_ids:
gene_to_tx_ids[gene] = []
gene_to_tx_ids[gene].append(tx_id)
###############
all_genes = sorted(gene_to_tx_ids)
###############
# collapse the exonic regions that overlap
for chrom in exonic_regions:
i, j = 0, 0
intervals = exonic_regions[chrom]
intervals.sort()
while j < len(intervals):
if intervals[j][0] <= intervals[i][1]:
intervals[i][1] = max(intervals[i][1], intervals[j][1])
else:
i += 1
intervals[i] = intervals[j]
j += 1
while len(intervals) > i + 1:
intervals.pop()
###############
# this is a big table and we don't need it any more, clear it out
del gencode
gc.collect()
###############
print("computing credible intervals...", file = sys.stderr)
sample_tx_cred_intervals = {}
for samp, tab in zip(gibbs_samples, gibbs_tabs):
tx_cred_intervals = []
sample_tx_cred_intervals[samp] = tx_cred_intervals
def record_cred_interval(hst_exprs, credibility):
if len(hst_exprs) == 0:
return
for hst1, hst2 in sorted(set(tuple(sorted(pair)) for pair in itertools.combinations(hst_exprs, 2))):
ratios = []
hst1_expr = hst_exprs[hst1]
hst2_expr = hst_exprs[hst2]
assert(len(hst1_expr) == len(hst2_expr))
for i in range(len(hst1_expr)):
if hst1_expr[i] == 0.0 or hst2_expr[i] == 0.0:
# log ratio undefined if either is 0
continue
ratios.append(math.log(hst1_expr[i] / hst2_expr[i], 2.0))
if len(ratios) == 0:
continue
# find the credible interval
ratios.sort()
i1 = min(int(round(len(ratios) * (1.0 - credibility) / 2.0)), len(ratios) - 1)
i2 = min(int(round(len(ratios) * (1.0 - (1.0 - credibility) / 2.0))), len(ratios) - 1)
r1 = ratios[i1]
r2 = ratios[i2]
tx_cred_intervals.append((hst1, hst2, r1, r2))
# take either gzip or unzipped file
f = None
if tab.endswith(".gz"):
f = gzip.open(tab)
else:
f = open(tab)
# the credibility i'm using
credibility = .9
curr_tx = None
hst_gibbs_exprs = None
txs_seen = set()
for line in f:
if type(line) == bytes:
line = line.decode("utf-8")
if line.startswith("Name"):
# skip the header
continue
tokens = line.split()
hst = tokens[0]
tx = hst.split("_")[0]
if tx != curr_tx:
# were on to a new transcript, make sure we haven't seen it before
assert(tx not in txs_seen)
txs_seen.add(tx)
if curr_tx is not None:
# record the ratios of the HSTs for the previous transcript
record_cred_interval(hst_gibbs_exprs, credibility)
# fresh data structures for this transcript
curr_tx = tx
hst_gibbs_exprs = {}
# record the row of expression values
hst_gibbs_exprs[hst] = [float(tokens[i]) for i in range(2, len(tokens))]
if curr_tx is not None:
# the final transcript
record_cred_interval(hst_gibbs_exprs, credibility)
sample_tx_cred_intervals_output = os.path.join(out_dir, "sample_tx_cred_intervals.pkl")
with open(sample_tx_cred_intervals_output, "wb") as f:
pickle.dump(sample_tx_cred_intervals, f)
###############
print("loading genotypes...", file = sys.stderr)
genotypes = pd.read_csv(variant_table, sep = "\t")
genotypes['CHROM'] = genotypes['CHROM'].apply(str)
genotypes.sort_values(["CHROM", "POS"], inplace = True)
genotypes = genotypes.loc[np.invert(genotypes.duplicated()),:]
#################
print("loading HST variants...", file = sys.stderr)
hst_variants = {}
for hst_file in hst_variant_files.values():
hst_table = pd.read_csv(hst_file, sep = "\t", header = 0)
hst_table['Chrom'] = hst_table['Chrom'].apply(str)
for i in range(hst_table.shape[0]):
if type(hst_table.HSTs.values[i]) == float:
# this seems to happen when the list of HSTs is empty
continue
hsts = hst_table.HSTs.values[i].split(",")
for hst in hsts:
tx = hst.split("_")[0]
gene = tx_id_to_gene[tx]
if not gene in focal_genes_set:
continue
if not hst in hst_variants:
hst_variants[hst] = []
var = (hst_table.Pos.values[i], hst_table.Allele.values[i])
hst_variants[hst].append(var)
del hst_table
gc.collect()
#################
sample_higher_haplo_expr = {}
sample_lower_haplo_expr = {}
sample_informative_expr = {}
sample_haplo_1_is_higher = {}
sample_haplo_hsts = {}
for i in range(len(tabs)):
sample = samples[i]
tab = tabs[i]
print("computing haplotype expression for sample {}...".format(sample), file = sys.stderr)
sample_expr = pd.read_csv(tab, sep = "\t")
sample_tx_rows, sample_cluster_txs = row_dicts(sample_expr)
higher_haplo_expr = {}
lower_haplo_expr = {}
informative_expr = {}
haplo_1_is_higher = {}
haplo_hsts = {}
sample_higher_haplo_expr[sample] = higher_haplo_expr
sample_lower_haplo_expr[sample] = lower_haplo_expr
sample_informative_expr[sample] = informative_expr
sample_haplo_1_is_higher[sample] = haplo_1_is_higher
sample_haplo_hsts[sample] = haplo_hsts
for gene in focal_genes_set:
chrom, start, end = gene_coords[gene]
blocks = get_haplotypes(chrom, start, end, sample, genotypes)
if len(blocks) > 1:
print("sample {} has {} phase blocks on gene {}, skipping".format(sample, len(blocks), gene), file = sys.stderr)
continue
block = blocks[0]
if not gene in higher_haplo_expr:
higher_haplo_expr[gene] = {}
lower_haplo_expr[gene] = {}
informative_expr[gene] = {}
gene_higher_haplo_expr = higher_haplo_expr[gene]
gene_lower_haplo_expr = lower_haplo_expr[gene]
gene_informative_expr = informative_expr[gene]
haplo_1_expr = {}
haplo_2_expr = {}
for tx_id in gene_to_tx_ids[gene]:
haplo_1_expr[tx_id] = 0.0
haplo_2_expr[tx_id] = 0.0
total_informative_expr = 0.0
haplo_hsts[tx_id] = [None, None]
for i in sample_tx_rows[tx_id]:
ex = sample_expr.TPM.values[i]
hst = sample_expr.Name.values[i]
match_1 = True
match_2 = True
for pos, allele in hst_variants[hst]:
hap_1, hap_2 = block[pos]
match_1 = match_1 and allele == hap_1
match_2 = match_2 and allele == hap_2
if match_1 and not match_2:
haplo_hsts[tx_id][0] = hst
haplo_1_expr[tx_id] += ex
elif match_2 and not match_1:
haplo_hsts[tx_id][1] = hst
haplo_2_expr[tx_id] += ex
if not (match_1 and match_2):
total_informative_expr += ex
if not tx_id in gene_informative_expr:
gene_informative_expr[tx_id] = []
gene_informative_expr[tx_id].append(total_informative_expr)
if sum(haplo_1_expr.values()) > sum(haplo_2_expr.values()):
higher = haplo_1_expr
lower = haplo_2_expr
haplo_1_is_higher[gene] = True
else:
lower = haplo_1_expr
higher = haplo_2_expr
haplo_1_is_higher[gene] = False
for tx_id in higher:
if not tx_id in gene_higher_haplo_expr:
gene_higher_haplo_expr[tx_id] = []
gene_lower_haplo_expr[tx_id] = []
gene_higher_haplo_expr[tx_id].append(higher[tx_id])
gene_lower_haplo_expr[tx_id].append(lower[tx_id])
#################
higher_haplo_output = os.path.join(out_dir, "sample_higher_haplo_expr.pkl")
with open(higher_haplo_output, "wb") as f:
pickle.dump(sample_higher_haplo_expr, f)
lower_haplo_output = os.path.join(out_dir, "sample_lower_haplo_expr.pkl")
with open(lower_haplo_output, "wb") as f:
pickle.dump(sample_lower_haplo_expr, f)
informative_output = os.path.join(out_dir, "sample_informative_expr.pkl")
with open(informative_output, "wb") as f:
pickle.dump(sample_informative_expr, f)
which_haplo_output = os.path.join(out_dir, "sample_haplo_1_is_higher.pkl")
with open(which_haplo_output, "wb") as f:
pickle.dump(sample_haplo_1_is_higher, f)
haplo_hsts_output = os.path.join(out_dir, "sample_haplo_hsts.pkl")
with open(haplo_hsts_output, "wb") as f:
pickle.dump(sample_haplo_hsts, f)
###############
print("identifying heterozygous variants...", file = sys.stderr)
inf = 2**62
het_positions = {}
for vcf in vcfs:
with gzip.open(vcf) as f:
samps = None
for line in f:
if type(line) == bytes:
line = line.decode("utf-8")
if line.startswith("##"):
continue
if line.startswith("#"):
samps = line.rstrip().split("\t")[9:]
for sample in samps:
if sample not in het_positions:
het_positions[sample] = set()
else:
tokens = line.rstrip().split("\t")
assert(len(tokens) == len(samps) + 9)
chrom_exonic_regions = exonic_regions[tokens[0]]
chrom = tokens[0]
pos = int(tokens[1])
idx = bisect.bisect(chrom_exonic_regions, [pos, inf])
if idx == 0:
# before the first exon
continue
elif chrom_exonic_regions[idx - 1][1] < pos:
# in between exons
continue
for i in range(9, len(tokens)):
genotype = tokens[i]
samp = samps[i - 9]
if "|" in genotype or "\\" in genotype:
al1, al2 = re.split("[\\|\\\\]", genotype)
if al1 != al2:
het_positions[samp].add((chrom, pos))
gc.collect()
###############
all_gene_intervals = sorted((interval[0], interval[1], interval[2], gene) for gene, interval in gene_coords.items())
sample_het_balance = {}
for i in range(len(tabs)):
tab = tabs[i]
sample = samples[i]
if sample not in sample_het_balance:
sample_het_balance[sample] = {}
het_balance = sample_het_balance[sample]
print("computing balance for sample {}".format(sample), file = sys.stderr)
buffer = collections.deque()
prev_chrom = None
tokens = None
pos = None
filesize = None
hst_file = None
gene_num = 0
sample_expr = pd.read_csv(tab, sep = "\t")
sample_tx_rows, sample_cluster_txs = row_dicts(sample_expr)
for chrom, start, end, gene in all_gene_intervals:
gene_num += 1
if gene_num % 2500 == 0:
print("processing gene {}".format(gene_num), file = sys.stderr)
gene_hst_variants = {}
if prev_chrom != chrom:
# we've switched chromosomes to a new file
if not chrom in hst_variant_files:
continue
hst_table = hst_variant_files[chrom]
#print("starting chrom {}".format(chrom), file = sys.stderr)
hst_file = open(hst_table)
filesize = os.fstat(hst_file.fileno()).st_size
# skip the header
hst_file.readline()
buffer.clear()
tell = hst_file.tell()
prev_pos = -1
tokens = hst_file.readline().strip().split()
var_chrom = tokens[0]
pos = int(tokens[1])
buffer.append((pos, tell))
# advance through rows that are strictly before this gene
while pos < start:
tell = hst_file.tell()
if tell == filesize:
break
prev_pos = pos
tokens = hst_file.readline().strip().split()
pos = int(tokens[1])
if pos != prev_pos:
buffer.append((pos, tell))
# remove any part of the buffer before this gene
while len(buffer) > 0:
buf_pos = buffer[0][0]
if buf_pos < start:
buffer.popleft()
else:
break
if len(buffer) > 0:
# everything before the start has been removed, except the current row
buf_pos, tell = buffer[0]
if buf_pos < pos:
# this occurred strictly before the current row, so we need to seek
# backwards
# reset the part of the buffer to the right of where we're seeking to
while len(buffer) > 1:
buffer.pop()
hst_file.seek(tell)
tokens = hst_file.readline().strip().split()
pos = int(tokens[1])
hst_vars = {}
# iterate over rows in the gene
while pos <= end:
if len(tokens) >= 5:
allele = tokens[3]
pos = int(tokens[1])
hsts = tokens[4].split(",")
for hst in hsts:
if hst not in hst_vars:
hst_vars[hst] = []
hst_vars[hst].append((pos, allele))
tell = hst_file.tell()
if tell == filesize:
# we hit the end of the file
break
prev_pos = pos
tokens = hst_file.readline().strip().split()
pos = int(tokens[1])
if pos != prev_pos:
# this is the first row we've seen with this position, remember
# it in the buffer
buffer.append((pos, tell))
prev_chrom = chrom
if gene not in het_balance:
het_balance[gene] = []
var_expr = {}
if gene not in gene_to_tx_ids:
continue
for tx_id in gene_to_tx_ids[gene]:
#print("looking at expression for tx " + tx_id, file = sys.stderr)
if tx_id not in sample_tx_rows:
continue
for i in sample_tx_rows[tx_id]:
ex = sample_expr.TPM.values[i]
if ex == 0.0:
continue
hst = sample_expr.Name.values[i]
#print("\thst " + hst + " has positive expression " + str(ex), file = sys.stderr)
if hst not in hst_vars:
# must not overlap any variants
continue
for var in hst_vars[hst]:
if var not in var_expr:
var_expr[var] = 0.0
var_expr[var] += ex
alleles = {}
for pos, allele in var_expr:
if pos not in alleles:
alleles[pos] = []
alleles[pos].append(allele)
for pos in alleles:
if (chrom, pos) not in het_positions[sample]:
continue
#print("looking at expression for pos " + chrom + " " + str(pos), file = sys.stderr)
total_expr = sum(var_expr[(pos, allele)] for allele in alleles[pos])
highest_expr = max(var_expr[(pos, allele)] for allele in alleles[pos])
#print("highest expr " + str(highest_expr) + ", total " + str(total_expr), file = sys.stderr)
het_balance[gene].append((highest_expr, total_expr))
del sample_expr
del sample_tx_rows
del sample_cluster_txs
gc.collect()
#################
balance_output = os.path.join(out_dir, "sample_het_balance.pkl")
with open(balance_output, "wb") as f:
pickle.dump(sample_het_balance, f)
tx_models_output = os.path.join(out_dir, "tx_models.pkl")
with open(tx_models_output, "wb") as f:
pickle.dump(tx_models, f)
tx_id_to_name_output = os.path.join(out_dir, "tx_id_to_name.pkl")
with open(tx_id_to_name_output, "wb") as f:
pickle.dump(tx_id_to_name, f)
```
#### File: hamster-project-scripts/python/reference_pad_cds_alleles.py
```python
import sys
import os
import subprocess
from Bio.Seq import Seq
from Bio import SeqIO
from utils import *
def parse_chromosome(filename, chrom_name):
sequence = ""
for record in SeqIO.parse(filename, "fasta"):
if record.id == chrom_name:
sequence += str(record.seq)
return sequence
def parse_gene_coords(filename, gene_name):
gene_coords = ["", "", -1, -1, -1, -1]
transcript_file = open(filename, "r")
for line in transcript_file:
if line[0] == "#":
continue
line_split = line.split("\t")
attributes_split = line_split[8].split(";")
cur_gene_name = ""
tags = []
transcript_type = ""
for attribute in attributes_split:
attribute = attribute.strip()
if attribute[:9] == "gene_name":
assert(cur_gene_name == "")
cur_gene_name = attribute.split('"')[1]
if attribute[:3] == "tag":
tags.append(attribute.split('"')[1])
if attribute[:15] == "transcript_type":
assert(transcript_type == "")
transcript_type = attribute.split('"')[1]
assert(cur_gene_name != "")
if cur_gene_name != gene_name:
continue
if gene_coords[0] == "":
gene_coords[0] = line_split[0]
assert(gene_coords[0] == line_split[0])
if gene_coords[1] == "":
gene_coords[1] = line_split[6]
assert(gene_coords[1] == line_split[6])
if line_split[2] == "transcript":
if gene_coords[2] == -1:
assert(gene_coords[5] == -1)
gene_coords[2] = int(line_split[3])
gene_coords[5] = int(line_split[4])
else:
gene_coords[2] = min(gene_coords[2], int(line_split[3]))
gene_coords[5] = max(gene_coords[5], int(line_split[4]))
elif line_split[2] == "start_codon" and "basic" in tags and transcript_type == "protein_coding":
if gene_coords[1] == "-":
line_split[3] = line_split[4]
if gene_coords[3] == -1:
gene_coords[3] = int(line_split[3])
elif gene_coords[3] != int(line_split[3]):
print("Warning different start codon:")
print(gene_coords[3])
print(int(line_split[3]))
elif line_split[2] == "stop_codon" and "basic" in tags and transcript_type == "protein_coding":
if gene_coords[1] == "-":
line_split[4] = line_split[3]
if gene_coords[4] == -1:
gene_coords[4] = int(line_split[4])
elif gene_coords[4] != int(line_split[4]):
print("Warning different stop codon:")
print(gene_coords[4])
print(int(line_split[4]))
assert(gene_coords[0] != "")
assert(gene_coords[1] != "")
assert(not -1 in gene_coords[2:])
if gene_coords[1] == "+":
assert(gene_coords[2] <= gene_coords[3])
assert(gene_coords[3] < gene_coords[4])
assert(gene_coords[4] <= gene_coords[5])
else:
assert(gene_coords[1] == "-")
gene_coords[2], gene_coords[5] = gene_coords[5], gene_coords[2]
assert(gene_coords[2] >= gene_coords[3])
assert(gene_coords[3] > gene_coords[4])
assert(gene_coords[4] >= gene_coords[5])
return gene_coords
printScriptHeader()
if len(sys.argv) != 7:
print("Usage: python reference_pad_cds_alleles.py <cds_alleles_input_name> <genome_fasta_name> <transcripts_gtf_name> <gene_name> <gene_flank_size> <output_fasta_name>\n")
sys.exit(1)
gene_coords = parse_gene_coords(sys.argv[3], sys.argv[4])
print(gene_coords)
chrom_seq = parse_chromosome(sys.argv[2], gene_coords[0])
print len(chrom_seq)
cds_file = open(sys.argv[1], "r")
out_file = open(sys.argv[6], "w")
gene_flank_size = int(sys.argv[5])
for line in cds_file:
line = line.strip()
line_split = line.split("\t")
assert(len(line_split) == 2)
if line_split[0] == "allele":
continue
if gene_coords[1] == "+":
left_flank = chrom_seq[(gene_coords[2] - gene_flank_size - 1):(gene_coords[3] - 1)]
right_flank = chrom_seq[gene_coords[4]:(gene_coords[5] + gene_flank_size - 1)]
else:
assert(gene_coords[1] == "-")
left_flank = chrom_seq[gene_coords[3]:(gene_coords[2] + gene_flank_size - 1)]
right_flank = chrom_seq[(gene_coords[5] - gene_flank_size - 1):(gene_coords[4] - 1)]
left_flank = Seq(left_flank)
left_flank = str(left_flank.reverse_complement())
right_flank = Seq(right_flank)
right_flank = str(right_flank.reverse_complement())
out_file.write(">" + line_split[0] + "\n")
out_file.write(left_flank + line_split[1] + right_flank + "\n")
cds_file.close()
out_file.close()
print("Done")
``` |
{
"source": "jonassignoreti/Python-CursoemVideo",
"score": 4
} |
#### File: lib/interface/__init__.py
```python
from pacotes.colors import *
def line(size=0):
print('-' * size)
def double_line(size=0):
print('=' * size)
def title(txt, size=0):
"""
--------------------------------------------------
'txt'
--------------------------------------------------
:param txt: the text in the center
:param size: size of the lines
:return: a title like the one above
"""
if size == 0:
size = len(txt) + 10
double_line(size)
print(f'{txt:^{size}}')
double_line(size)
else:
double_line(size)
print(f'{txt:^{size}}')
double_line(size)
def error(msg):
"""
:param msg: enter a message of error
:return: return a message with letters in red color
"""
print(f'{txt_white(back_red(msg))}')
def readInt(msg, errormsg):
while True:
print(msg, end='')
ans = str(input('')).strip()
if ans.isnumeric():
ans = int(ans)
return ans
else:
error(errormsg)
def menu(list, size):
title('MAIN MENU', size)
c = 1
back_blue(f=1)
for i in list:
print(f'{txt_yellow(back_aqua(c))}', end='')
back_blue(f=1)
txt_white(f=1)
print(f' - {i}')
c += 1
back_red(f=1)
double_line(size)
style_none(f=1)
while True:
opt = readInt('your option: ', 'ERROR!, is not a integer number.')
if 0 < opt <= len(list):
break
else:
error('ERROR!, select a valid option.')
continue
return opt
```
#### File: venv/Classes/aula21c.py
```python
print('--PARÂMETROS OPCIONAIS--')
def somar(a, b, c=0): #(c) é um parâmetro opcional, se caso não seja colocado nenhuma valor nele, será igual a 0
s = a + b + c
print(f'A soma vale {s}')
somar(3, 2, 5)
somar(8, 4)
```
#### File: venv/Exercises/ex113.py
```python
def leiaInt():
while True:
try:
r = int(input('Digite um Número Inteiro: '))
return r
except:
print(f'\033[31mERRO: por favor, digite um número inteiro válido\033[m')
continue
def leiaFloat():
while True:
try:
r = float(input('Digite um Número Real: '))
return r
except:
print(f'\033[31mERRO: por favor, digite um número real válido\033[m')
continue
n_int = leiaInt()
n_float = leiaFloat()
print(f'O valor inteiro digitado foi {n_int} e o real foi {n_float}')
``` |
{
"source": "jonassjoh/modsim",
"score": 4
} |
#### File: modsim/SIR/simulation.py
```python
import random
import numpy as np
import matplotlib.pyplot as plt
from random import uniform
def w_choice(seq):
"""
Takes an input on the form of [('A', 30), ('B', 40), ('C', 30)] and returns
one of the items based on the probality from the second value in each tuple.
Eg. B will get chosen 40% of the time while A and C will get chosen 30% of
the time.
"""
total_prob = sum(item[1] for item in seq)
chosen = random.uniform(0, total_prob)
cumulative = 0
for item, probality in seq:
cumulative += probality
if cumulative > chosen:
return item
class Disease:
"""
Uses the SIR model to model the spread of a certain disease.
At each time t, s(t) + i(t) + r(t) = 1
"""
def __init__(self, N, b, k, susceptible, infected, recovered):
"""
params:
N = The total population
b = The average _FIXED_ number of contacts per day per person.
k = The average _FIXED_ fraction of infected individuals that will recover per day.
susceptible = The initial amount of susceptible individuals
infected = The initial amount of infected individuals
recovered = The initial amount of recovered individuals
"""
self.time = 0
self._N = N
self._b = b
self._k = k
self._susceptible = susceptible
self._infected = infected
self._recovered = recovered
def N(self):
return self._N
def vaccinate(self, amount):
"""
Vaccinates the specified amount of the population.
"""
self.susceptible(self.susceptible() - amount)
def susceptible(self, v=None):
"""
The number of susceptible individuals.
S = S(t)
No one is added to this group due to births and immigrations begin ignored.
An individual will leave this group once infected.
"""
if v is None:
return self._susceptible
self._susceptible = v
def susceptible_dt(self):
"""
The rate of change for susceptible individuals.
dS/dt = -b*s(t)*I(t)
"""
return -1 * self._b * self.susceptible_fraction() * self.infected()
def infected(self, v=None):
"""
The number of infected individuals.
I = I(t)
Each infected individual infects b * s(t) new individuals.
"""
if v is None:
return self._infected
self._infected = v
def recovered(self, v=None):
"""
The number of recovered individuals.
R = R(t)
"""
if v is None:
return self._recovered
self._recovered = v
def susceptible_fraction(self):
"""
The susceptible fraction of the population.
s(t)
"""
return self.fraction(self.susceptible())
def susceptible_fraction_dt(self):
"""
The rate of change for the susceptible fraction.
ds/dt = -b*s(t)*i(t)
"""
return -1 * self._b * self.susceptible_fraction() * self.infected_fraction()
def recovered_fraction(self):
"""
The recovered fraction of the population.
r(t)
"""
return self.fraction(self.recovered())
def recovered_fraction_dt(self):
"""
The rate of change for the recovered fraction of the population.
dr/dt = k*i(t)
"""
return self._k * self.infected_fraction()
def infected_fraction(self):
"""
The infected fraction of the population.
i(t)
"""
return self.fraction(self.infected())
def infected_fraction_dt(self):
"""
The rate of change for the infected fraction of the population.
di/dt = b * s(t) * i(t) - k * i(t)
di/dt = -ds/dt - dr/dt
"""
return -1 * (self.susceptible_fraction_dt() + self.recovered_fraction_dt())
def fraction(self, nr):
"""
Returns the fraction of the total population.
nr / N
"""
return nr / self._N
def number(self, fraction):
"""
Returns the number of the total population
fraction * N
"""
return fraction * self._N
def step(self):
"""
Steps forward one unit in time.
"""
i_dt = self.number(self.infected_fraction_dt())
r_dt = self.number(self.recovered_fraction_dt())
s_dt = self.number(self.susceptible_fraction_dt())
self.susceptible(self.susceptible() + s_dt)
self.recovered(self.recovered() + r_dt)
self.infected(self.infected() + i_dt)
class City:
"""
City.
"""
def __init__(self, name, position, N, b, k, susceptible, infected, recovered):
"""
params:
name = The name of the city.
population = The population of the city
position = The position of the city
"""
self._position = position
self._name = name
self._disease = Disease(N=N, b=b, k=k, susceptible=susceptible, infected=infected, recovered=recovered)
self.Y_susceptible = []
self.Y_infected = []
self.Y_recovered = []
def population(self):
"""
Returns the population of the city.
"""
return self.disease().N()
def position(self):
"""
Returns the position of the city.
"""
return self.position
def disease(self):
return self._disease
def vaccinate(self, n):
"""
Vaccinates n people of the population.
"""
self.disease().vaccinate(n)
def name(self):
return self._name
def step(self, t, _print=False):
"""
Steps forward one step in time.
params:
t = The current timestamp (Used when printing).
_print = If output should be printed to the console
"""
d = self.disease()
self.log(d, t, _print)
d.step()
def log(self, d, t, _print):
"""
Logs the current state of the disease for later plotting and/or dumping
to the console.
"""
self.Y_susceptible.append(d.susceptible_fraction())
self.Y_infected.append(d.infected_fraction())
self.Y_recovered.append(d.recovered_fraction())
if _print:
print("S("+str(t)+")", "%0.2f" % d.susceptible(), "\ts("+str(t)+")=", "%0.2f" % d.susceptible_fraction())
print("I("+str(t)+")", "%0.2f" % d.infected(), "\ti("+str(t)+")=", "%0.2f" % d.infected_fraction())
print("R("+str(t)+")", "%0.2f" % d.recovered(), "\tr("+str(t)+")=", "%0.2f" % d.recovered_fraction())
print()
def plot(self):
"""
Plots the progress of the diesease for this City.
"""
_ys, = plt.plot(self.Y_susceptible, label='s(t)')
_yi, = plt.plot(self.Y_infected, label="i(t)")
_yr, = plt.plot(self.Y_recovered, label="r(t)")
plt.legend(handles=[_ys, _yi, _yr])
plt.title(self.name())
plt.show()
def plot_get_infected(self):
_yi, = plt.plot(self.Y_infected, label="i(t) - "+str(self.name()))
return _yi
def example_graph():
"""
Example graph that is the same as the one found at the link:
https://www.maa.org/press/periodicals/loci/joma/the-sir-model-for-spread-of-disease-the-differential-equation-model
"""
c = City(name="Sweden", position=(0,0), N=7900000, b=1/2, k=1/3, susceptible=7900000, infected=10, recovered=0)
for t in range(140):
c.step(t)
c.plot()
def example_cities():
b = 0.99
k = 1/10
cities = [
City(name="Stockholm", position=(0,0), N=1400000, b=b, k=k, susceptible=1400000, infected=1, recovered=0),
City(name="Göteborg", position=(-50, -50), N=1000000, b=b, k=k, susceptible=1000000, infected=0, recovered=0),
City(name="Umeå", position=(-4, 100), N=100000, b=b, k=k, susceptible=100000, infected=0, recovered=0),
City(name="Örebro", position=(-10, 5), N=350000, b=b, k=k, susceptible=350000, infected=0, recovered=0)
]
print(cities)
for c in cities:
print(cities[:].remove(c))
for t in range(140):
for c in cities:
c.step(t)
plt.legend(handles=[c.plot_get_infected() for c in cities])
plt.show()
example_cities()
``` |
{
"source": "jonassjuul/FartherFasterBroaderDeeper",
"score": 3
} |
#### File: SIR_data/Network_simulations/SIR_depth_breadth_and_SV.py
```python
import networkx as nx
import math
import numpy as np
from collections import Counter
def max_depth(G,root_node) :
p=nx.shortest_path_length(G,source=root_node)
return max(p.values())
def max_breadth(G,root_node) :
p=nx.shortest_path_length(G,source=root_node)
count = Counter(list(p.values()))
most_common = count.most_common(1)
return most_common[0][1]
def Structural_virality(G):
size = len(G.nodes())
if size==1:
return 0 ##virality is not defined for cascades of size 1,
sv=nx.average_shortest_path_length(G) #Note: this is very time-consuming for larger cascades
return sv
def depth_breadth_SV(G,root_node) :
# Depth
p = nx.shortest_path_length(G,source=root_node)
depth = max(p.values())
# Breadth
count = Counter(list(p.values()))
most_common = count.most_common(1)
breadth = most_common[0][1]
# Structural Virality
SV = Structural_virality(G)
return [depth, breadth, SV]
Rspread=0.550000
cascade_size = 1
# Make file for results
filenames = ['Cascades_on_networkathletes_edges_Rspread0.550000_MinimumCascadeSize1.txt',
'Cascades_on_networkathletes_edges_Rspread0.650000_MinimumCascadeSize1.txt',
'Cascades_on_networkCornell5_Rspread0.550000_MinimumCascadeSize1.txt',
'Cascades_on_networkCornell5_Rspread0.650000_MinimumCascadeSize1.txt',
]
for filename in filenames :
print('hello')
f = open('Measures_'+filename,'w')
#f.write('\n%i\t%i\t%.4f'%(results[0],results[1],results[2]))
f.write('Depth\tbreadth\Structural Virality')
f.close()
# Open file and make analysis
f = open(filename,'r')
f.readline()
line_num = -1
for line in f :
#if (line_num == 30) :
# break
line_num +=1
#if (line_num/100 == line_num//100) :
print("Doing line number",line_num)
# Import only nodes that get neigbors
line.strip()
columns = line.split(' ')
columns1 = [x for x in columns if not '\n' in x]
columns2 = [x for x in columns1 if not 'k' in x]
growth_array = [[],[]]
entry_num = -1
for entry in columns2 :
entry_num +=1
new_columns = entry.split('s')
if (entry_num == 0) :
seed = int(new_columns[1])
G = nx.Graph()
G.add_node(seed)
else :
growth_array[0].append(int(new_columns[0]))
growth_array[1].append(int(new_columns[1]))
#growth_array = [node.replace('s','') for node in columns2]
# Create graph
node_num = 0
for turn in range (len(growth_array[0])) :
#node_num+=1
G.add_edge(growth_array[0][turn],growth_array[1][turn])
# Calculate results
results = depth_breadth_SV(G,seed)
# Save results
f = open('Measures_'+filename,'a')
f.write('\n%i\t%i\t%.4f'%(results[0],results[1],results[2]))
f.close()
``` |
{
"source": "jonassoebro/Deep-Learning-in-Computer-Vision",
"score": 3
} |
#### File: project_1_1/src/model.py
```python
import torchvision.models as models
from torch.nn import Module, Sequential, Linear
class Model(Module):
def __init__(self, pretrained: bool = False, in_dim: int = 2048, out_dim: int = 256):
super(Model, self).__init__()
self.resnet = Sequential(*list(models.resnet50(pretrained=pretrained).children())[:-1])
self.linear = Linear(in_features=in_dim, out_features=out_dim, bias=True)
def forward(self, x):
x = self.resnet(x)
x = x.view(x.size(0), -1)
x = self.linear(x)
return x
```
#### File: Deep-Learning-in-Computer-Vision/project_1_1/train.py
```python
import sys
import os
sys.path.append('git_repo')
import hydra
import wandb
from omegaconf import DictConfig, OmegaConf
from project_1_1.src.data import get_data, download_data
from project_1_1.src.engine import EngineModule
from project_1_1.src.trainer import get_trainer
wandb.init(project='p1', entity='dlcv')
@hydra.main(config_path='config', config_name="default")
def run_training(cfg: DictConfig):
print(OmegaConf.to_yaml(cfg))
cfg_file = os.path.join(wandb.run.dir, 'config.yaml')
with open(cfg_file, 'w') as fh:
fh.write(OmegaConf.to_yaml(cfg))
wandb.save(cfg_file) # this will force sync it
download_data(cfg.data.path)
train_dataloader, test_dataloader = get_data(cfg.data.size, cfg.data.train_augmentation, cfg.training.batch_size,
base_path=cfg.data.path)
engine = EngineModule(cfg)
wandb.save('*.ckpt') # should keep it up to date
trainer = get_trainer(cfg, engine)
trainer.fit(engine, train_dataloader=train_dataloader, val_dataloaders=test_dataloader)
# TODO: visualizations
if __name__ == '__main__':
run_training()
``` |
{
"source": "jonassoenen/noise_robust_cobras",
"score": 3
} |
#### File: noise_robust_cobras/noise_robust_cobras/cluster.py
```python
import itertools
from collections.abc import Sequence
class Cluster:
def __init__(self, super_instances: Sequence):
self.super_instances = super_instances
# in the visual querier, the user can indicate that the entire cluster is pure
self.is_pure = False
# is set to True whenever splitting the super-instance fails i.e. if there is only one training instance
self.is_finished = False
def distance_to(self, other_cluster):
# calculates the distance between 2 clusters by calculating the distance between the closest pair of super-instances
super_instance_pairs = itertools.product(
self.super_instances, other_cluster.super_instances
)
return min([x[0].distance_to(x[1]) for x in super_instance_pairs])
def get_comparison_points(self, other_cluster):
# any super-instance should do, no need to find closest ones!
return self.super_instances[0], other_cluster.super_instances[0]
def get_all_points_per_superinstance(self):
all_pts = []
for superinstance in self.super_instances:
all_pts.append(superinstance.indices)
return all_pts
def get_all_points(self):
all_pts = []
for super_instance in self.super_instances:
all_pts.extend(super_instance.indices)
return all_pts
```
#### File: noise_robust_cobras/noise_robust_cobras/cobras_logger.py
```python
import copy
import time
from typing import List
import numpy as np
from noise_robust_cobras.noise_robust.datastructures.constraint import Constraint
class NopLogger(object):
def nop(*args, **kw):
pass
def __getattr__(self, _):
return self.nop
class ClusteringLogger:
def __init__(self):
# start time
self.start_time = None
# basic logging of intermediate results
self.intermediate_results = []
# all constraints obtained from the user
self.all_user_constraints = []
# algorithm phases
self.current_phase = None
self.algorithm_phases = []
# (detected) noisy constraints
self.corrected_constraint_sets = []
self.noisy_constraint_data = []
self.detected_noisy_constraint_data = []
# clustering to store
self.clustering_to_store = None
# execution time
self.execution_time = None
#########################
# information retrieval #
#########################
def get_all_clusterings(self):
return [cluster for cluster, _, _ in self.intermediate_results]
def get_runtimes(self):
return [runtime for _, runtime, _ in self.intermediate_results]
def get_ml_cl_constraint_lists(self):
ml = []
cl = []
for constraint in self.all_user_constraints:
if constraint.is_ML():
ml.append(constraint.get_instance_tuple())
else:
cl.append(constraint.get_instance_tuple())
return ml, cl
def add_mistake_information(self, ground_truth_querier):
for i, (constraint_number, constraint_copy) in enumerate(
self.corrected_constraint_sets
):
mistakes = []
for con in constraint_copy:
if (
ground_truth_querier.query(*con.get_instance_tuple()).is_ML()
!= con.is_ML()
):
mistakes.append(con)
self.corrected_constraint_sets[i] = (
constraint_number,
constraint_copy,
mistakes,
)
###################
# log constraints #
###################
def log_new_user_query(self, constraint):
# add the constraint to all_user_constraints
self.all_user_constraints.append(constraint)
# keep algorithm phases up to date
self.algorithm_phases.append(self.current_phase)
# intermediate clustering results
self.intermediate_results.append(
(
self.clustering_to_store,
time.time() - self.start_time,
len(self.all_user_constraints),
)
)
##################
# execution time #
##################
def log_start_clustering(self):
self.start_time = time.time()
def log_end_clustering(self):
self.execution_time = time.time() - self.start_time
##############
# phase data #
##############
def log_entering_phase(self, phase):
self.current_phase = phase
###############
# clusterings #
###############
def update_clustering_to_store(self, clustering):
if isinstance(clustering, np.ndarray):
self.clustering_to_store = clustering.tolist()
elif isinstance(clustering, list):
self.clustering_to_store = list(clustering)
else:
self.clustering_to_store = clustering.construct_cluster_labeling()
def update_last_intermediate_result(self, clustering):
if len(self.intermediate_results) == 0:
return
if not isinstance(clustering, np.ndarray):
self.intermediate_results[-1] = (
clustering.construct_cluster_labeling(),
time.time() - self.start_time,
len(self.all_user_constraints),
)
else:
self.intermediate_results[-1] = (
clustering.tolist(),
time.time() - self.start_time,
len(self.all_user_constraints),
)
#####################
# noisy constraints #
#####################
def log_corrected_constraint_set(self, constraints):
constraint_copy: List[Constraint] = [copy.copy(con) for con in constraints]
current_constraint_number = len(self.all_user_constraints)
self.corrected_constraint_sets.append(
(current_constraint_number, constraint_copy)
)
def log_detected_noisy_constraints(self, constraints):
con_length = len(self.all_user_constraints)
for con in constraints:
self.detected_noisy_constraint_data.append((con_length, copy.copy(con)))
```
#### File: noise_robust_cobras/noise_robust_cobras/cobras.py
```python
import copy
import gc
import itertools
import logging
from enum import Enum
from typing import Union
import numpy as np
from noise_robust_cobras.cluster import Cluster
from noise_robust_cobras.clustering import Clustering
from noise_robust_cobras.clustering_algorithms.clustering_algorithms import (
KMeansClusterAlgorithm,
ClusterAlgorithm,
)
from noise_robust_cobras.cobras_logger import ClusteringLogger
from noise_robust_cobras.strategies.splitlevel_estimation import (
StandardSplitLevelEstimationStrategy,
)
from noise_robust_cobras.strategies.superinstance_selection import (
SuperinstanceSelectionHeuristic,
MostInstancesSelectionHeuristic,
LeastInstancesSelectionHeuristic,
)
from noise_robust_cobras.superinstance import SuperInstance, SuperInstanceBuilder
from noise_robust_cobras.superinstance_kmeans import KMeans_SuperinstanceBuilder
from noise_robust_cobras.noise_robust.datastructures.certainty_constraint_set import (
NewCertaintyConstraintSet,
)
from noise_robust_cobras.noise_robust.datastructures.constraint import Constraint
from noise_robust_cobras.noise_robust.datastructures.constraint_index import (
ConstraintIndex,
)
from noise_robust_cobras.noise_robust.noise_robust_possible_worlds import (
gather_extra_evidence,
)
from noise_robust_cobras.querier.querier import MaximumQueriesExceeded
class SplitResult(Enum):
SUCCESS = 1
NO_SPLIT_POSSIBLE = 2
SPLIT_FAILED = 3
class COBRAS:
certainty_constraint_set: NewCertaintyConstraintSet
clustering: Union[Clustering, None]
def __init__(
self,
cluster_algo: ClusterAlgorithm = KMeansClusterAlgorithm(),
superinstance_builder: SuperInstanceBuilder = KMeans_SuperinstanceBuilder(),
split_superinstance_selection_heur: SuperinstanceSelectionHeuristic = None,
splitlevel_strategy=None,
noise_probability=0.10,
minimum_approximation_order=2,
maximum_approximation_order=3,
certainty_threshold=0.95,
seed=None,
correct_noise=True,
logger=None,
cobras_logger=None,
):
self.seed = seed
# init data, querier, max_questions, train_indices and store_intermediate results
# already initialised so object size does not change during execution
# python can optimize
self.data = None
self.querier = None
self.train_indices = None
# init cobras_cluster_algo
self.cluster_algo = cluster_algo
self.superinstance_builder = superinstance_builder
# init split superinstance selection heuristic
if split_superinstance_selection_heur is None:
self.split_superinstance_selection_heur = MostInstancesSelectionHeuristic()
else:
self.split_superinstance_selection_heur = split_superinstance_selection_heur
# init splitlevel_heuristic
if splitlevel_strategy is None:
self.splitlevel_strategy = StandardSplitLevelEstimationStrategy(
LeastInstancesSelectionHeuristic()
)
else:
self.splitlevel_strategy = splitlevel_strategy
# variables used during execution
self.clustering_to_store = None
self.clustering = None
self.random_generator = None
# logging
self._log = logging.getLogger(__name__) if logger is None else logger
self._cobras_log = (
ClusteringLogger() if cobras_logger is None else cobras_logger
)
# certainty_constraint_set
if correct_noise:
self.certainty_constraint_set: NewCertaintyConstraintSet = NewCertaintyConstraintSet(
minimum_approximation_order,
maximum_approximation_order,
noise_probability,
self._cobras_log,
)
self.constraint_index = self.certainty_constraint_set.constraint_index
else:
self.certainty_constraint_set = None
self.constraint_index = ConstraintIndex()
self.certainty_threshold = certainty_threshold
self.correct_noise = correct_noise
@property
def clustering_logger(self):
return self._cobras_log
def fit(self, X, nb_clusters, train_indices, querier):
"""
Perform clustering.
The number of clusters (nb_clusters) is not used in COBRAS but is added as a parameter to have a consistent
interface over all clustering algorithms
:param X: numpy array that where each row is an instance
:param nb_clusters: IGNORED, COBRAS determines the amount of clusters dynamically
:param train_indices: the indices for which COBRAS can ask constraints, if there is no training test_set use None
:param querier: a Querier object that can answer queries about the data X
:return: a tuple(all_clusters, runtimes, ml, cl) where all_clusters are the intermediate clusterings (for each query there is an intermediate clustering stored)
runtimes is the time the algorithm has been executing after each query
ml and cl are both lists of tuples representing the must-link and cannot-link constraints
note: these are the constraints that we got from the user! So there might be noisy constraints in these lists!
"""
self.random_generator = np.random.default_rng(self.seed)
self._cobras_log.log_start_clustering()
self.data = X
self.train_indices = (
train_indices if train_indices is not None else range(len(X))
)
self.split_superinstance_selection_heur.set_clusterer(self)
self.splitlevel_strategy.set_clusterer(self)
self.querier = querier
# initial clustering: all instances in one superinstance in one cluster
initial_superinstance = self.create_superinstance(
list(range(self.data.shape[0]))
)
initial_clustering = Clustering([Cluster([initial_superinstance])])
self.clustering = initial_clustering
# last valid clustering keeps the last completely merged clustering
last_valid_clustering = None
while not self.querier.query_limit_reached():
# during this iteration store the current clustering
self._cobras_log.update_clustering_to_store(self.clustering)
self.clustering_to_store = self.clustering.construct_cluster_labeling()
# splitting phase
self._cobras_log.log_entering_phase("splitting")
statuscode = self.split_next_superinstance()
if statuscode == SplitResult.NO_SPLIT_POSSIBLE:
# there is no split left to be done
# we have produced the best clustering
break
elif statuscode == SplitResult.SPLIT_FAILED:
# tried to split a superinstance but failed to split it
# this is recorded in the superinstance
# we will split another superinstance in the next iteration
continue
# merging phase
self._cobras_log.log_entering_phase("merging")
if self.correct_noise:
# make a copy of the current clustering and perform the merging phase on it
clustering_copy = copy.deepcopy(self.clustering)
fully_merged, new_user_constraints = self.merge_containing_clusters(
clustering_copy
)
corrected_clustering = None
if fully_merged:
# if we fully merged we can confirm and correct the clustering
# if not the query limit is reached so we have to stop
try:
fully_merged, corrected_clustering = self.confirm_and_correct(
new_user_constraints, clustering_copy
)
except MaximumQueriesExceeded:
# if during the confirm and correct the query limit is reached fully_merged is false
fully_merged = False
self.clustering = corrected_clustering
# explicit call to garbage collector to avoid memory problems
gc.collect()
else:
fully_merged, _ = self.merge_containing_clusters(self.clustering)
# correctly log intermediate results
if fully_merged:
self._cobras_log.update_last_intermediate_result(self.clustering)
# fill in the last_valid_clustering whenever appropriate
# after initialisation or after that the current clustering is fully merged
if fully_merged or last_valid_clustering is None:
last_valid_clustering = copy.deepcopy(self.clustering)
self.clustering = last_valid_clustering
self._cobras_log.log_end_clustering()
# collect results and return
all_clusters = self._cobras_log.get_all_clusterings()
runtimes = self._cobras_log.get_runtimes()
ml, cl = self._cobras_log.get_ml_cl_constraint_lists()
return all_clusters, runtimes, ml, cl
###########################
# SPLITTING #
###########################
def split_next_superinstance(self):
"""
Execute the splitting phase:
1) select the next super-instance to split
2) split the super-instance into multiple smaller super-instances
:return:
"""
# identify the next superinstance to split
to_split, originating_cluster = self.identify_superinstance_to_split()
if to_split is None:
return SplitResult.NO_SPLIT_POSSIBLE
# remove to_split from the clustering
originating_cluster.super_instances.remove(to_split)
if len(originating_cluster.super_instances) == 0:
self.clustering.clusters.remove(originating_cluster)
# split to_split into new clusters
split_level = self.determine_split_level(to_split)
new_super_instances = self.split_superinstance(to_split, split_level)
self._log.info(
f"Splitted super-instance {to_split.representative_idx} in {split_level} new super-instances {list(si.representative_idx for si in new_super_instances)}"
)
new_clusters = self.add_new_clusters_from_split(new_super_instances)
if not new_clusters:
# it is possible that splitting a super-instance does not lead to a new cluster:
# e.g. a super-instance constains 2 points, of which one is in the test set
# in this case, the super-instance can be split into two new ones, but these will be joined
# again immediately, as we cannot have super-instances containing only test points (these cannot be
# queried)
# this case handles this, we simply add the super-instance back to its originating cluster,
# and set the already_tried flag to make sure we do not keep trying to split this superinstance
self._log.info("Split failed! restoring original state")
originating_cluster.super_instances.append(to_split)
to_split.tried_splitting = True
to_split.children = None
if originating_cluster not in self.clustering.clusters:
self.clustering.clusters.append(originating_cluster)
return SplitResult.SPLIT_FAILED
else:
self.clustering.clusters.extend(new_clusters)
return SplitResult.SUCCESS
def identify_superinstance_to_split(self):
"""
Identify the next super-instance that needs to be split using the split superinstance selection heuristic
:return: (the super instance to split, the cluster from which the super instance originates)
"""
# if there is only one superinstance return that superinstance as superinstance to split
if (
len(self.clustering.clusters) == 1
and len(self.clustering.clusters[0].super_instances) == 1
):
return (
self.clustering.clusters[0].super_instances[0],
self.clustering.clusters[0],
)
options = []
for cluster in self.clustering.clusters:
if cluster.is_pure:
continue
if cluster.is_finished:
continue
for superinstance in cluster.super_instances:
if superinstance.tried_splitting:
continue
if len(superinstance.indices) == 1:
continue
if len(superinstance.train_indices) < 2:
continue
else:
options.append(superinstance)
if len(options) == 0:
return None, None
superinstance_to_split = self.split_superinstance_selection_heur.choose_superinstance(
options
)
originating_cluster = [
cluster
for cluster in self.clustering.clusters
if superinstance_to_split in cluster.super_instances
][0]
if superinstance_to_split is None:
return None, None
return superinstance_to_split, originating_cluster
def determine_split_level(self, superinstance):
"""
Determine the splitting level to split the given super-instance
"""
return self.splitlevel_strategy.estimate_splitting_level(superinstance)
def split_superinstance(self, si, k):
"""
Actually split the given super-instance si in k (the splitlevel) new super-instances
note: if splitting with self.cluster_algo results in a super-instance that has no training_instances,
this super-instance is merged with another super-instance that does still have training instances
:param si: the super-instance to be split
:param k: the splitlevel to be used
:return: A list with the resulting super-instances
:rtype List[Superinstance]
"""
# cluster the instances of the superinstance
clusters = self.cluster_algo.cluster(
self.data, si.indices, k, [], [], seed=self.random_generator.integers(1,1000000)
)
# based on the resulting clusters make new superinstances
# superinstances with no training instances are assigned to the closest superinstance with training instances
training = []
no_training = []
for new_si_idx in set(clusters):
cur_indices = [
si.indices[idx] for idx, c in enumerate(clusters) if c == new_si_idx
]
si_train_indices = [x for x in cur_indices if x in self.train_indices]
if len(si_train_indices) != 0:
training.append(self.create_superinstance(cur_indices, si))
else:
no_training.append(
(cur_indices, np.mean(self.data[cur_indices, :], axis=0))
)
for indices, centroid in no_training:
closest_train = min(
training,
key=lambda x: np.linalg.norm(
self.data[x.representative_idx, :] - centroid
),
)
closest_train.indices.extend(indices)
si.children = training
return training
@staticmethod
def add_new_clusters_from_split(si):
"""
small helper function: adds the new super-instances to the current clustering each in their own cluster
"""
new_clusters = []
for x in si:
new_clusters.append(Cluster([x]))
if len(new_clusters) == 1:
return None
else:
return new_clusters
###########################
# MERGING #
###########################
def merge_containing_clusters(self, clustering_to_merge):
"""
Perform the merging step to merge the clustering together
:param clustering_to_merge:
:return:
"""
query_limit_reached = False
merged = True
# the set of new user constraints that are used during merging
new_user_constraints = set()
while merged and not self.querier.query_limit_reached():
clusters_to_consider = [
cluster
for cluster in clustering_to_merge.clusters
if not cluster.is_finished
]
cluster_pairs = itertools.combinations(clusters_to_consider, 2)
cluster_pairs = [
x
for x in cluster_pairs
if not self.cannot_link_between_clusters(
x[0], x[1], new_user_constraints
)
]
cluster_pairs = sorted(cluster_pairs, key=lambda x: x[0].distance_to(x[1]))
merged = False
for x, y in cluster_pairs:
if self.querier.query_limit_reached():
query_limit_reached = True
break
# we will reuse or get a new constraint
constraint = self.get_constraint_between_clusters(x, y, "merging")
new_user_constraints.add(constraint)
if constraint.is_ML():
x.super_instances.extend(y.super_instances)
clustering_to_merge.clusters.remove(y)
merged = True
break
fully_merged = not query_limit_reached and not merged
return fully_merged, new_user_constraints
def cannot_link_between_clusters(self, c1, c2, new_constraints):
# first check if we can reuse from the constraint_structure itself
reused = self.check_constraint_reuse_clusters(c1, c2)
if reused is not None:
if reused.is_CL():
new_constraints.add(reused)
return True
return False
# otherwise check if we can reuse from new_constraints
for s1, s2 in itertools.product(c1.super_instances, c2.super_instances):
if (
Constraint(s1.representative_idx, s2.representative_idx, False)
in new_constraints
):
return True
return False
def must_link_between_clusters(self, c1, c2, new_constraints):
# first check if we can reuse from the constraint_structure itself
reused = self.check_constraint_reuse_clusters(c1, c2)
if reused is not None:
return reused.is_ML()
# otherwise check if we can reuse fron new_constraints
for s1, s2 in itertools.product(c1.super_instances, c2.super_instances):
if (
Constraint(s1.representative_idx, s2.representative_idx, True)
in new_constraints
):
return True
return False
######################################################
########### handling noisy constraints ###############
######################################################
def confirm_and_correct(self, new_user_constraints, clustering_copy):
"""
Confirm and correct the relevant user constraints
:param new_user_constraints:
:param clustering_copy:
:return:
"""
fully_merged = True
while len(new_user_constraints) > 0 and fully_merged:
# gather extra evidence for the uncertain userconstraints used during merging
relevant_instances = self.clustering.get_si_representatives()
all_relevant_constraints = self.constraint_index.find_constraints_between_instance_set(
relevant_instances
)
noisy_detected = gather_extra_evidence(
self.certainty_constraint_set,
all_relevant_constraints,
self.certainty_threshold,
self.querier,
self._cobras_log,
)
# if no noise detected continue with the next iteration of COBRAS
if not noisy_detected:
break
# there is noise but this could also be noise in userconstraints from previous iterations!
# so start from a clustering where each super-instance is in its own cluster!
self._cobras_log.log_entering_phase("merging")
all_sis = self.clustering.get_superinstances()
clusters = [Cluster([si]) for si in all_sis]
clustering_copy = Clustering(clusters)
fully_merged, new_user_constraints = self.merge_containing_clusters(
clustering_copy
)
if fully_merged:
# log constraints used during clustering
relevant_instances = self.clustering.get_si_representatives()
all_relevant_constraints = self.constraint_index.find_constraints_between_instance_set(
relevant_instances
)
self._cobras_log.log_corrected_constraint_set(all_relevant_constraints)
return fully_merged, clustering_copy
########
# util #
########
def get_constraint_length(self):
return self.constraint_index.get_number_of_constraints()
def create_superinstance(self, indices, parent=None) -> SuperInstance:
return self.superinstance_builder.makeSuperInstance(
self.data, indices, self.train_indices, parent
)
############################################
# constraint querying and constraint reuse #
############################################
def get_constraint_between_clusters(self, c1: Cluster, c2: Cluster, purpose):
"""
Gets a constraint between clusters c1 and c2
If there is already a known constraint between these two clusters it is reused
otherwise a new constraint between the 2 clusters is queried
:param c1: the first cluster
:param c2: the second cluster
:param purpose: the purpose of this constraint
:return: the reused or new constraint
"""
reused_constraint = self.check_constraint_reuse_clusters(c1, c2)
if reused_constraint is not None:
return reused_constraint
si1, si2 = c1.get_comparison_points(c2)
return self.query_querier(
si1.representative_idx, si2.representative_idx, purpose
)
def get_constraint_between_superinstances(
self, s1: SuperInstance, s2: SuperInstance, purpose
):
"""
Gets a constraint between the representatives of superinstances s1 and s2
If there is already a known constraint this constraint is reused
otherwise a new constraint between the super-instance representatives is queried
:param s1: the first super-instance
:param s2: the second super-instance
:param purpose: the purpose of this constraint
:return: the reused or new constraint
"""
reused_constraint = self.check_constraint_reuse_between_representatives(s1, s2)
if reused_constraint is not None:
return reused_constraint
return self.query_querier(s1.representative_idx, s2.representative_idx, purpose)
def get_constraint_between_instances(self, instance1, instance2, purpose):
"""
Gets a constraint between the instances instance1 and instance 2
If there is already a known constraint between these instances that constraint is reused
otherwise a new constraint between the instances is queried
:param instance1: the first instance
:param instance2: the second instance
:param purpose: the purpose of this constraint
:return: the reused or new constraint
"""
reused_constraint = self.check_constraint_reuse_between_instances(
instance1, instance2
)
if reused_constraint is not None:
return reused_constraint
min_instance = min(instance1, instance2)
max_instance = max(instance1, instance2)
return self.query_querier(min_instance, max_instance, purpose)
def check_constraint_reuse_clusters(self, c1: Cluster, c2: Cluster):
"""
Checks whether or not there is a known constraint between clusters c1 and c2
if there is return this constraint otherwise return None
:param c1: the first cluster
:param c2: the second cluster
:return: the existing constraint if there is one, none otherwise
:rtype Union[Constraint, None]
"""
superinstances1 = c1.super_instances
superinstances2 = c2.super_instances
for si1, si2 in itertools.product(superinstances1, superinstances2):
reused_constraint = self.check_constraint_reuse_between_representatives(
si1, si2
)
if reused_constraint is not None:
return reused_constraint
return None
def check_constraint_reuse_superinstances(self, si1, si2):
"""
Checks whether or not there is a known constraint between the representatives of si1 and si2
if there is return this constraint otherwise return None
:param si1: the first super-instance
:param si2: the second super-instance
:return: the existing constraint if there is one, none otherwise
:rtype Union[Constraint, None]
"""
reused_constraint = self.check_constraint_reuse_between_representatives(
si1, si2
)
return reused_constraint
def check_constraint_reuse_between_representatives(self, si1, si2):
"""
Checks whether or not there is a known constraint between the representatives of si1 and si2
if there is return this constraint otherwise return None
:param si1: the first super-instance
:param si2: the second super-instance
:return: the existing constraint if there is one, none otherwise
:rtype Union[Constraint, None]
"""
return self.check_constraint_reuse_between_instances(
si1.representative_idx, si2.representative_idx
)
def check_constraint_reuse_between_instances(self, i1, i2):
"""
Checks whether or not there is a known constraint between the instances i1 and i2
if there is return this constraint otherwise return NOne
:param i1: the first instance
:param i2: the second instance
:return: the existing constraint if there is one, none otherwise
:rtype Union[Constraint, None]
"""
reused_constraint = None
ml_constraint = Constraint(i1, i2, True)
cl_constraint = Constraint(i1, i2, False)
constraint_index = self.constraint_index
if ml_constraint in constraint_index:
reused_constraint = ml_constraint
elif cl_constraint in constraint_index:
reused_constraint = cl_constraint
# if reused_constraint is not None:
# self._cobras_log.log_reused_constraint_instances(reused_constraint.is_ML(), i1, i2)
return reused_constraint
def query_querier(self, instance1, instance2, purpose):
"""
Function to query the querier
The constraint obtained from the querier is stored in
the certainty_constraint set or constraint_index (depending on whether correct noise is true or false)
This method should not be called if check_constraint_reuse_between_instances(i1,i2) returns a constraint
:param instance1: the first instance
:param instance2: the second instance
:param purpose: the purpose of this query
:return:
"""
if self.querier.query_limit_reached():
print("going over query limit! ", self.get_constraint_length())
# print("query ",self.get_constraint_length())
min_instance = min(instance1, instance2)
max_instance = max(instance1, instance2)
constraint_type = self.querier._query_points(min_instance, max_instance)
if self.correct_noise:
# add the new constraint to the certainty constraint set
self.certainty_constraint_set.add_constraint(
Constraint(min_instance, max_instance, constraint_type, purpose=purpose)
)
new_constraint = next(
self.certainty_constraint_set.constraint_index.find_constraints_between_instances(
min_instance, max_instance
).__iter__()
)
else:
self.constraint_index.add_constraint(
Constraint(min_instance, max_instance, constraint_type, purpose=purpose)
)
new_constraint = next(
self.constraint_index.find_constraints_between_instances(
min_instance, max_instance
).__iter__()
)
self._cobras_log.log_new_user_query(
Constraint(min_instance, max_instance, constraint_type, purpose=purpose)
)
return new_constraint
```
#### File: noise_robust/datastructures/cycle.py
```python
from collections import defaultdict
from noise_robust_cobras.noise_robust.datastructures.constraint import Constraint
from noise_robust_cobras.noise_robust.datastructures.constraint_index import (
ConstraintIndex,
)
class Cycle:
"""
A class that represents a valid constraint cycle
attributes:
- constraints: a list of constraints the way they appear in the cycle (starts at a random point in the cycle)
- sorted_constraints: a tuple of constraints that is sorted for __eq__ and __hash__
- number_of_CLs: the number of CL constraints in this cycle
"""
def __init__(self, constraints, composed_from=None, number_of_CLs=None):
assert Cycle.is_valid_constraint_set_for_cycle(constraints)
self.constraints = set(constraints)
self.sorted_constraints = Cycle.sort_constraints(constraints)
self.composed_from = set(composed_from) if composed_from is not None else {self}
if number_of_CLs is None:
self.number_of_CLs = sum(
1 for constraint in constraints if constraint.is_CL()
)
else:
self.number_of_CLs = number_of_CLs
@staticmethod
def compose_multiple_cycles_ordered(cycles):
composed_cycle = cycles[0]
for to_compose in cycles[1:]:
composed_cycle = composed_cycle.compose_with(to_compose)
if composed_cycle is None:
break
return composed_cycle
@staticmethod
def compose_multiple_cycles(cycles):
composed_constraints = set(cycles[0].constraints)
composed_from = set(cycles[0].composed_from)
for to_compose in cycles[1:]:
composed_constraints.symmetric_difference_update(to_compose.constraints)
composed_from.symmetric_difference_update(to_compose.composed_from)
if not Cycle.is_valid_constraint_set_for_cycle(composed_constraints):
return None
return Cycle(composed_constraints, composed_from=composed_from)
@staticmethod
def make_cycle_from_raw_cons(raw_constraints):
constraints = Constraint.raw_constraints_to_constraints(raw_constraints)
return Cycle(constraints)
@staticmethod
def cycle_from_instances(instances):
instances = [int(i) for i in instances]
raw_constraints = list(zip(instances[:-1], instances[1:])) + [
(instances[0], instances[-1])
]
return Cycle.make_cycle_from_raw_cons(raw_constraints)
@staticmethod
def cycle_from_instances_constraint_index(instances, constraint_index):
instances = [int(i) for i in instances]
raw_constraints = list(zip(instances[:-1], instances[1:])) + [
(instances[0], instances[-1])
]
return Cycle(constraint_index.instance_tuples_to_constraints(raw_constraints))
@staticmethod
def is_valid_constraint_set_for_cycle(constraints):
if len(constraints) == 0:
return False
# check if each instance occurs twice
count = defaultdict(lambda: 0)
for constraint in constraints:
count[constraint.i1] += 1
count[constraint.i2] += 1
for key, value in count.items():
if value != 2:
return False
# check if all constraints are connected
all_sets = []
for constraint in constraints:
found_sets = [
s for s in all_sets if constraint.i1 in s or constraint.i2 in s
]
if len(found_sets) == 0:
all_sets.append({constraint.i1, constraint.i2})
elif len(found_sets) == 1:
found_sets[0].update(constraint.get_instance_tuple())
elif len(found_sets) == 2:
found_sets[0].update(found_sets[1])
all_sets.remove(found_sets[1])
return len(all_sets) == 1
def is_valid_cycle(self):
return Cycle.is_valid_constraint_set_for_cycle(self.constraints)
def get_sorted_constraint_list(self):
"""
:return: a list of all constraints in the order by which they appear in the cycle with an arbitrary starting constraints
"""
all_constraints = list(self.constraints)
start_constraint = all_constraints[0]
temp_index = ConstraintIndex()
for constraint in all_constraints[1:]:
temp_index.add_constraint(constraint)
current_list = [(start_constraint.get_instance_tuple(), start_constraint)]
current_instance = start_constraint.i2
while len(temp_index.constraints) > 0:
matching_constraints = temp_index.find_constraints_for_instance(
current_instance
)
if len(matching_constraints) == 1:
matching_constraint = list(matching_constraints)[0]
else:
raise Exception("Not a valid cycle!")
other_instance = matching_constraint.get_other_instance(current_instance)
current_list.append(
((current_instance, other_instance), matching_constraint)
)
current_instance = other_instance
temp_index.remove_constraint(matching_constraint)
# check if the cycle is complete
if start_constraint.i1 != current_instance:
raise Exception("Not a valid cycle!")
return current_list
def compose_with(self, other_cycle):
if len(self.constraints.intersection(other_cycle.constraints)) == 0:
return None
new_constraints = set(self.constraints).symmetric_difference(
other_cycle.constraints
)
if len(new_constraints) == 0:
return None
if not Cycle.is_valid_constraint_set_for_cycle(new_constraints):
return None
new_cycle = Cycle(
new_constraints,
other_cycle.composed_from.symmetric_difference(self.composed_from),
)
return new_cycle
def replace_constraint(self, old_constraint, new_constraint):
assert old_constraint in self.constraints
new_constraints = set(self.constraints)
new_constraints.remove(old_constraint)
new_constraints.add(new_constraint)
return Cycle(new_constraints)
@staticmethod
def sort_constraints(constraints):
return tuple(sorted(constraints))
def is_useful(self):
return self.number_of_CLs <= 2
def is_inconsistent(self):
return self.number_of_CLs == 1
def __iter__(self):
return self.constraints.__iter__()
def __len__(self):
return len(self.constraints)
def __eq__(self, other):
if other == None:
return False
return self.sorted_constraints == other.sorted_constraints
def __contains__(self, item):
return item in self.constraints
def __hash__(self):
return hash(self.sorted_constraints)
def __repr__(self):
return str(self)
def __str__(self):
# return ",".join([str(constraint) for constraint in self.constraints])
return ",".join([str(con) for _, con in self.get_sorted_constraint_list()])
```
#### File: noise_robust/datastructures/minimal_cycle_index.py
```python
import itertools
from collections import defaultdict
from noise_robust_cobras.noise_robust import find_cycles
from noise_robust_cobras.noise_robust.datastructures.cycle import Cycle
class CycleIndex:
"""
Cycle index is a class that keeps track of a set of cycles
Cycles are added through add_cycle_to_index and removed with remove_cycle
attributes:
- cycle-index a dictionary that maps a constraint to all cycles that involve this constraint
- all consistent cycles: all cycles in this cycle index that are consistent (#CL's != 1)
- all inconsistent cycles: all cycles in this cycle index that are inconsistent (#CL == 1)
Specific subclasses are provided to keep track of specific classes of cycles
"""
def __init__(self, constraint_index):
self.constraint_index = constraint_index
self.cycle_index = defaultdict(CycleIndex.set_tuple)
self.all_consistent_cycles = set()
self.all_inconsistent_cycles = set()
def replace_constraint(self, old_constraint, new_constraint):
all_cycles_with_constraint = self.get_all_cycles_for_constraint(old_constraint)
new_cycles_with_constraint = [
cycle.replace_constraint(old_constraint, new_constraint)
for cycle in all_cycles_with_constraint
]
for cycle_to_remove in all_cycles_with_constraint:
self.remove_cycle(cycle_to_remove)
for cycle in new_cycles_with_constraint:
self.add_cycle_to_index(cycle)
@staticmethod
def set_tuple():
return (set(), set())
def is_inconsistent(self):
return len(self.all_inconsistent_cycles) > 0
def __contains__(self, item):
return (
item in self.all_consistent_cycles or item in self.all_inconsistent_cycles
)
def all_cycles(self):
return self.all_inconsistent_cycles.union(self.all_consistent_cycles)
def get_all_cycles_for_constraint(self, constraint):
con_cycles, incon_cycles = self.cycle_index[constraint]
return con_cycles.union(incon_cycles)
def get_inconsistent_cycles_for_constraint(self, constraint):
_, incon_cycles = self.cycle_index[constraint]
return incon_cycles
def get_consistent_cycles_for_constraint(self, constraint):
con_cycles, _ = self.cycle_index[constraint]
return con_cycles
def add_cycle_to_index_entry(self, cycle, constraint):
consistent_cycles, inconsistent_cycles = self.cycle_index[constraint]
if cycle.is_inconsistent():
inconsistent_cycles.add(cycle)
else:
consistent_cycles.add(cycle)
def add_cycle_to_index(self, cycle):
"""
- inconsistent cycles are added to all_inconsistent_cycles and the inconsistent_cycle_index
- consistent cycles are added to all_cycles and the cycle_index
"""
assert cycle
# add cycle to cycle_index
for constraint in cycle.constraints:
self.add_cycle_to_index_entry(cycle, constraint)
# add cycle to all_inconsistent_cycles or all_consistent_cycles
if cycle.is_inconsistent():
self.all_inconsistent_cycles.add(cycle)
else:
# the cycle is consistent
self.all_consistent_cycles.add(cycle)
def remove_cycle(self, cycle_to_remove):
self.all_consistent_cycles.discard(cycle_to_remove)
self.all_inconsistent_cycles.discard(cycle_to_remove)
for con in cycle_to_remove:
consistent, inconsistent = self.cycle_index[con]
consistent.discard(cycle_to_remove)
inconsistent.discard(cycle_to_remove)
def remove_cycles_with_constraint(self, constraint_to_remove):
con_cycles, incon_cycles = self.cycle_index[constraint_to_remove]
self.all_consistent_cycles.difference_update(con_cycles)
self.all_inconsistent_cycles.difference_update(incon_cycles)
self.cycle_index.pop(constraint_to_remove)
class MinimalCycleIndex(CycleIndex):
"""
Through add constraint keeps track of all the minimal cycles in the graph
(for each constraint only the cycles are kept with the minimal length)
note: old cycles that are not minimal are not removed from this datastructure
constraints should be added through add_constraint and removed through remove_cycles_with_constraint to ensure consistency of the data structure
"""
def __init__(self, constraint_index):
super().__init__(constraint_index)
# minimal cycles dict is a dictionary from a constraint to a set of cycles
# it keeps the cycles that need to be retained for this constraint
self.minimal_cycles_dict = defaultdict(set)
def add_constraint(self, constraint):
all_cycles = find_cycles.find_all_cycles_with_minimal_length(
self.constraint_index, constraint
)
if all_cycles is not None:
self.add_minimal_cycles_to_index(all_cycles)
def add_minimal_cycles_to_index(self, cycles):
minimal_length = len(cycles[0])
assert all(len(cycle) == minimal_length for cycle in cycles)
# add the cycles to the index
for cycle in cycles:
self.add_cycle_to_index(cycle)
minimal_cycle: Cycle = cycles[0]
# remove longer cycles and add smaller minimal cycles
# this does nothing!
constraints_that_occur_in_short_cycle = minimal_cycle.constraints
cycles_to_check = {
cycle
for con in constraints_that_occur_in_short_cycle
for cycle in self.get_all_cycles_for_constraint(con)
if len(cycle) > minimal_length
}
for old_cycle in cycles_to_check:
composition = minimal_cycle.compose_with(old_cycle)
if composition is not None and len(composition) < len(old_cycle):
if composition not in self:
self.add_cycle_to_index(composition)
def add_cycle_to_index(self, cycle):
super(MinimalCycleIndex, self).add_cycle_to_index(cycle)
self.add_cycle_to_minimal_cycle_dict(cycle)
def check_cycles_for_removal(self, cycles):
for cycle in cycles:
if not self.is_minimal_cycle(cycle):
self.remove_cycle(cycle)
def add_cycle_to_minimal_cycle_dict(self, cycle):
for constraint in cycle.constraints:
existing_entry = self.minimal_cycles_dict[constraint]
if len(existing_entry) == 0:
self.minimal_cycles_dict[constraint].add(cycle)
else:
# you should keep the old cycle to ensure you have an inconsistent cycle
some_cycle = list(existing_entry)[0]
old_length = len(some_cycle)
new_length = len(cycle)
if new_length < old_length:
old_cycles = self.minimal_cycles_dict[constraint]
self.minimal_cycles_dict[constraint] = {cycle}
self.check_cycles_for_removal(old_cycles)
elif new_length == old_length:
self.minimal_cycles_dict[constraint].add(cycle)
else:
# new_length > old_length
pass
def is_minimal_cycle(self, cycle):
for constraint in cycle.constraints:
if cycle in self.minimal_cycles_dict[constraint]:
return True
return False
def remove_cycles_with_constraint(self, constraint_to_remove):
involved_cycles = self.get_all_cycles_for_constraint(constraint_to_remove)
new_cycles = []
for cycle1, cycle2 in itertools.combinations(involved_cycles, 2):
new_cycle = cycle1.compose_with(cycle2)
if new_cycle is None:
continue
new_cycles.append(new_cycle)
for cycle in involved_cycles:
self.remove_cycle(cycle)
for new_cycle in new_cycles:
self.add_cycle_to_index(new_cycle)
self.cycle_index.pop(constraint_to_remove)
def remove_cycle(self, cycle_to_remove):
super(MinimalCycleIndex, self).remove_cycle(cycle_to_remove)
self.remove_cycle_from_minimal_cycle_dict(cycle_to_remove)
def remove_cycle_from_minimal_cycle_dict(self, cycle_to_remove):
for con in cycle_to_remove:
entry = self.minimal_cycles_dict[con]
entry.discard(cycle_to_remove)
```
#### File: noise_robust_cobras/strategies/splitlevel_estimation.py
```python
from abc import ABC, abstractmethod
class SplitLevelEstimationStrategy(ABC):
def __init__(self):
self.cobras_clusterer = None
@abstractmethod
def estimate_splitting_level(self, superinstance):
pass
@abstractmethod
def get_name(self):
pass
def set_clusterer(self, cobras_clusterer):
self.cobras_clusterer = cobras_clusterer
class ConstantSplitLevelEstimationStrategy(SplitLevelEstimationStrategy):
def __init__(self, constant_split_level):
super().__init__()
self.constaint_split_level = constant_split_level
def estimate_splitting_level(self, superinstance):
return min(len(superinstance.train_indices), self.constaint_split_level)
def get_name(self):
return "ConstantSplittingLevel({})".format(self.constaint_split_level)
class StandardSplitLevelEstimationStrategy(SplitLevelEstimationStrategy):
def __init__(self, superinstance_selection_strategy):
super().__init__()
self.superinstance_selection_strategy = superinstance_selection_strategy
def get_name(self):
return "StandardSplitLevel({})".format(
type(self.superinstance_selection_strategy).__name__
)
def estimate_splitting_level(self, superinstance):
si_copy = superinstance.copy()
must_link_found = False
max_split = len(si_copy.indices)
split_level = 0
while (
not must_link_found
and not self.cobras_clusterer.querier.query_limit_reached()
):
new_sis = self.cobras_clusterer.split_superinstance(si_copy, 2)
if len(new_sis) == 1:
# we cannot split any further along this branch, we reached the splitting level
break
s1 = new_sis[0]
s2 = new_sis[1]
if self.cobras_clusterer.get_constraint_between_superinstances(
s1, s2, "determine splitlevel"
).is_ML():
must_link_found = True
continue
else:
# the constraint is a cannot link
split_level += 1
si_to_choose = []
if len(s1.train_indices) >= 2:
si_to_choose.append(s1)
if len(s2.train_indices) >= 2:
si_to_choose.append(s2)
if len(si_to_choose) == 0:
# neither of the superinstances have enough training instances
break
# continue with the superinstance chosen by the heuristic
si_copy = self.superinstance_selection_strategy.choose_superinstance(
si_to_choose
)
split_level = max(split_level, 1)
split_n = 2 ** int(split_level)
return min(max_split, split_n)
``` |
{
"source": "jonassoleil/swag",
"score": 3
} |
#### File: src/modules/base_model_iterator.py
```python
class BaseModelIterator:
def __init__(self):
self.length = 0
self.reset()
def __iter__(self):
return self
def reset(self):
self.i = -1
def __len__(self):
return self.length
def get_next_model(self):
raise NotImplementedError
def __next__(self):
self.i += 1
if self.i >= self.length:
raise StopIteration
return self.get_next_model()
``` |
{
"source": "jonasspenger/paciofspython",
"score": 2
} |
#### File: paciofspython/paciofs/helper.py
```python
import collections
import threading
import retrying
import logging
import pickle
import socket
import random
import time
import os
import module
logging.config.fileConfig(os.path.join(os.path.dirname(__file__), "logging.conf"))
logger = logging.getLogger("helper")
class DictServer(module.Module):
def __init__(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind((socket.gethostname(), 0))
self.sock.listen(128)
self.servers = {}
self.dict = {}
self.lock = threading.Lock()
self.stop_event = threading.Event()
def get_address(self):
return self.sock.getsockname()
def add_server(self, pubkey, address):
self.servers[pubkey] = address
def remove_server(self, pubkey, address):
del self.servers[pubkey]
def get(self, key):
with self.lock:
return self.dict.get(key)
def put(self, key, value):
with self.lock:
self.dict[key] = value
@retrying.retry(wait_random_min=10, wait_random_max=100, stop_max_delay=5000)
def get_remote(self, key):
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.connect(self.servers[random.choice(list(self.servers))])
serversocket.sendall(pickle.dumps(key))
value = bytearray()
buf = serversocket.recv(4096)
value.extend(buf)
while len(buf) >= 4096:
buf = serversocket.recv(4096)
value.extend(buf)
value = pickle.loads(value)
serversocket.close()
if value == None:
raise Exception("could not find key: %s" % (key))
return value
def _listen(self):
while not self.stop_event.is_set():
try:
(clientsocket, address) = self.sock.accept()
key = pickle.loads(clientsocket.recv(4096))
value = self.dict.get(key)
clientsocket.sendall(pickle.dumps(value))
clientsocket.close()
except ConnectionAbortedError as e:
logger.error("error: %s" % e)
def _start(self):
self.stop_event.clear()
self._handle_exit(self._stop)
threading.Thread(target=self._listen, daemon=True).start()
def _stop(self):
self.stop_event.set()
self.sock.close()
```
#### File: paciofs/tpb/tamperproofbroadcast.py
```python
import logging.config
import threading
import argparse
import logging
import inspect
import sys
import os
import tpb.multichain as multichain
import tpb.protocols as protocols
import tpb.module as module
import time
logging.config.fileConfig(os.path.join(os.path.dirname(__file__), "logging.conf"))
logger = logging.getLogger("tamperproofbroadcast")
class TamperProofBroadcast(module.Module):
@classmethod
def _Parser(cls):
parser = argparse.ArgumentParser(add_help=False)
subparsers = parser.add_subparsers(dest="protocol", required=True)
fotb = subparsers.add_parser(
"fotb",
help="FIFO-order reliable tamper-proof broadcast",
description="FIFO-order reliable tamper-proof broadcast",
parents=[protocols.FOTB._Parser(), multichain.MultiChain._Parser(),],
)
totb = subparsers.add_parser(
"totb",
help="uniform causal-order total-order reliable tamper-proof broadcast",
description="uniform causal-order total-order reliable tamper-proof broadcast",
parents=[protocols.TOTB._Parser(), multichain.MultiChain._Parser(),],
)
return parser
@classmethod
def _Init(cls, args):
protocol = args.protocol
if protocol == "fotb":
fotb = protocols.FOTB._Init(args)
mc = multichain.MultiChain._Init(args)
mc._register_northbound(fotb)
fotb._register_southbound(mc)
return fotb
if protocol == "totb":
totb = protocols.TOTB._Init(args)
mc = multichain.MultiChain._Init(args)
mc._register_northbound(totb)
totb._register_southbound(mc)
return totb
if __name__ == "__main__":
parser = argparse.ArgumentParser(parents=[TamperProofBroadcast._Parser(),])
parser.add_argument("--logginglevel", default="INFO")
args = parser.parse_args()
logging.getLogger().setLevel(args.logginglevel)
tpb = TamperProofBroadcast._Init(args)
tpb._create()
tpb._start()
def receive():
while True:
print(tpb.deliver(blocking=True))
threading.Thread(target=receive, daemon=True).start()
while True:
tpb.broadcast(sys.stdin.readline())
tpb._stop()
tpb._uncreate()
``` |
{
"source": "jonasspenger/snakemake.ml",
"score": 3
} |
#### File: snakemake.ml/scripts/generate_report.py
```python
import jinja2
import glob
import os
import pandas
import datetime
def generate_report(input):
""" Generate report of csv and png files in input directory.
Args:
input (str): Input directory.
Returns:
None
"""
# HTML template for the report
tmplt = """
<!DOCTYPE html>
<html>
<head lang="en">
<meta charset="UTF-8">
<title>{{ title }}</title>
</head>
<body>
<h1>{{ title }}</h1>
<br/>
{{ datetime }}
<br/>
<h2>Results:</h2>
{% for result in results %}
<br/>
<br/>
{{ result }}
{% endfor %}
<h2>Figures:</h2>
{% for figure in figures %}
<br/>
<br/>
{{ figure }}
<br/>
<img src="{{ figure }}">
{% endfor %}
</body>
</html>
"""
# change directory to the folder
owd = os.getcwd()
os.chdir(input)
# collect figures and results
figures = sorted(glob.glob("**/*.png", recursive=True))
csv_files = sorted(glob.glob("**/*.csv", recursive=True))
csv_df = [pandas.read_csv(r) for r in csv_files]
csv_html = [r.to_html() for r in csv_df]
# set template variables
template_vars = {"title" : "Snakemake ML Report",
"figures": figures,
"results": csv_html,
"datetime": datetime.datetime.now().strftime("%I:%M%p, %B %d, %Y")}
# make html and save to file
jinja2.Template(tmplt).stream(template_vars).dump('report.html')
os.chdir(owd)
``` |
{
"source": "jonasspenger/tamperproofbroadcast",
"score": 2
} |
#### File: tamperproofbroadcast/src/etcd.py
```python
import multiprocessing
import logging.config
import threading
import tempfile
import binascii
import retrying
import port_for
import logging
import pickle
import shutil
import queue
import etcd3
import time
import os
import module
logging.config.fileConfig(os.path.join(os.path.dirname(__file__), "logging.conf"))
logger = logging.getLogger("etcd")
class _ETCDBroadcast(module.Module):
def __init__(self, host="localhost", port="2379", queuesize=128):
self.host = host
self.port = port
self.queuesize = queuesize
self.queue = queue.Queue(maxsize=queuesize)
self.cancel = None
def broadcast(self, message):
try:
self.etcdclient.put("broadcast", self._pack(message))
except Exception as e:
time.sleep(0.1)
raise Exception({"error": "failed to broadcast message"})
def deliver(self):
try:
return self.queue.get_nowait()
except:
raise Exception("nothing to deliver")
def _deliver(self, start_revision=1):
try:
try:
iter, self.cancel = self.etcdclient.watch(
"broadcast", start_revision=start_revision
)
for i, message in enumerate(iter):
self.queue.put(self._unpack(message._event.kv.value))
start_revision = message._event.kv.mod_revision + 1
self.cancel()
except etcd3.exceptions.RevisionCompactedError as e:
iter, self.cancel = self.etcdclient.watch(
"broadcast", start_revision=e.compacted_revision
)
for i, message in enumerate(iter):
self.queue.put(self._unpack(message._event.kv.value))
self.cancel()
except etcd3.exceptions.ConnectionFailedError as e:
time.sleep(1)
self._deliver(start_revision=start_revision)
def _start(self):
self.etcdclient = etcd3.client(
host=self.host,
port=self.port,
grpc_options={
"grpc.max_send_message_length": -1,
"grpc.max_receive_message_length": -1,
}.items(),
)
self.etcdclient.status()
threading.Thread(target=self._deliver, daemon=True).start()
def _stop(self):
del self.etcdclient
def _create(self):
datadir = tempfile.mkdtemp()
self._handle_exit(lambda: shutil.rmtree(datadir, ignore_errors=True))
self._execute_command(
"etcd --listen-client-urls=http://%s:%s --advertise-client-urls=http://%s:%s --data-dir=%s --listen-peer-urls=http://localhost:%s"
% (
self.host,
self.port,
self.host,
self.port,
datadir,
port_for.select_random(),
),
daemon=True,
)
self._execute_command(
"etcdctl --endpoints=http://%s:%s endpoint status" % (self.host, self.port),
)
class _BatchingBroadcast(module.Module):
def __init__(self, batchsize=128):
self.batch = [None] * batchsize
self.nextpos = 0
self.batchsize = batchsize
self.deliverbatch = [None] * batchsize
self.delivernextpos = batchsize
self.queue = queue.Queue(maxsize=batchsize)
self.stop_event = threading.Event()
def broadcast(self, message):
self.batch[self.nextpos] = message
if self.nextpos == self.batchsize - 1:
self.southbound.broadcast(self.batch)
self.nextpos = 0
else:
self.nextpos = self.nextpos + 1
def deliver(self, blocking=False):
if blocking == False:
try:
return self.queue.get_nowait()
except:
raise Exception({"error": "no message to deliver"})
else:
return self.queue.get()
def _deliver(self):
while not self.stop_event.is_set():
try:
for message in self.southbound.deliver():
self.queue.put(message)
except:
pass
def _start(self):
threading.Thread(target=self._deliver, daemon=True).start()
def _stop(self):
self.stop_event.set()
time.sleep(1)
class ETCD(module.Module):
def __init__(
self, host=None, port=None, queuesize=128, batchsize=128, create=False
):
self.port = port
self.host = host
self.queuesize = int(queuesize)
self.batchsize = int(batchsize)
self.create = bool(create)
if self.host == None:
self.host = "localhost"
if self.port == None:
self.port = port_for.select_random()
self.etcdbroadcast = _ETCDBroadcast(
host=self.host, port=self.port, queuesize=self.queuesize
)
self.batchingbroadcast = _BatchingBroadcast(batchsize=self.batchsize)
self.etcdbroadcast._register_northbound(self.batchingbroadcast)
self.batchingbroadcast._register_southbound(self.etcdbroadcast)
def broadcast(self, message):
return self.batchingbroadcast.broadcast(message)
def deliver(self, blocking=False):
message = self.batchingbroadcast.deliver(blocking)
return message
def _create(self):
if self.create:
logger.info("creating etcd at %s:%s" % (self.host, self.port))
self.etcdbroadcast._create()
self.batchingbroadcast._create()
logger.info("finished creating etcd at %s:%s" % (self.host, self.port))
def _uncreate(self):
if self.create:
logger.info("uncreating etcd")
self.batchingbroadcast._uncreate()
self.etcdbroadcast._uncreate()
def _start(self):
logger.info("starting etcd at %s:%s" % (self.host, self.port))
self.etcdbroadcast._start()
self.batchingbroadcast._start()
logger.info("finished starting etcd at %s:%s" % (self.host, self.port))
def _stop(self):
logger.info("stopping etcd")
self.batchingbroadcast._stop()
self.etcdbroadcast._stop()
``` |
{
"source": "jonasspinner/weighted-f-free-edge-editing",
"score": 3
} |
#### File: weighted-f-free-edge-editing/scripts/download_dataset.py
```python
import urllib.request
import yaml
from tempfile import NamedTemporaryFile, TemporaryDirectory
from pathlib import Path
import zipfile
import re
from shutil import rmtree
import argparse
from typing import Optional
def output(text: str):
print(text)
def transform(input_path: Path, output_path: Path):
with input_path.open('r') as input_file:
lines = input_file.read().split('\n')
n = int(lines[0])
m = n * (n - 1) // 2
fmt = 1
M = [[0.0] + [float(value) for value in line.split('\t')] for line in lines[n + 1:-2]] + [[0.0]]
S = [[M[min([i, j])][max([i, j]) - min([i, j])] for j in range(n)] for i in range(n)]
output(f"\twriting {output_path.name}")
with output_path.open('w') as output_file:
output_file.write(f"{n} {m} {fmt}\n")
for i in range(n):
output_file.write(" ".join([f"{j+1} {S[i][j]}" for j in range(n) if i != j]))
output_file.write("\n")
def download_bio_dataset(output_dir: Path, config_file: Path, max_size: Optional[int]):
if output_dir.exists():
output(f"{output_dir.name} already exists")
pass
output("loading config")
with config_file.open('r') as file:
config = yaml.safe_load(file)
output("downloading data")
response = urllib.request.urlopen(config['download_url'])
CHUNK = 16 * 1024
with NamedTemporaryFile() as file:
for chunk in iter(lambda: response.read(CHUNK), b''):
file.write(chunk)
with zipfile.ZipFile(file.name, 'r') as zip_ref:
zip_ref.extractall(output_dir)
output("transforming files")
for file_path in output_dir.glob("**/*.cm"):
match = re.match(r"cost_matrix_component_nr_(\d+)_size_(\d+)_cutoff_10.0.cm", file_path.name)
number, size = match.group(1, 2)
if max_size is not None and int(size) > max_size:
continue
output_path = output_dir / f"bio-nr-{number}-size-{size}.graph"
try:
transform(file_path, output_path)
except MemoryError:
output(f"\t{output_path.name} encountered memory error")
output("deleting original data")
rmtree(output_dir / "biological")
def main():
parser = argparse.ArgumentParser(
description="Downloads the specified dataset.")
parser.add_argument("--dir", type=str, default=".",
help="Path for output directory.")
parser.add_argument("--config", type=str, required=True,
help="Path for config file.")
parser.add_argument("--max-size", type=int, default=None,
help="Maximum instance size (default: unrestricted).")
data_group = parser.add_mutually_exclusive_group(required=True)
data_group.add_argument("--biological", action='store_true',
help=f"Download instances derived from COG protein similarity data.")
options = parser.parse_args()
if options.biological:
download_bio_dataset(Path(options.dir), Path(options.config), options.max_size)
if __name__ == '__main__':
main()
```
#### File: scripts/figures/lower_bound.py
```python
import numpy as np
import matplotlib.pyplot as plt
import subprocess
import yaml
from itertools import product
from pathlib import Path
from typing import List, Dict, Any
def run(path: Path, selectors: List[str] = None, lower_bounds: List[str] = None, search_strategies: List[str] = None, timelimit=10):
if search_strategies is None:
search_strategies = ["Exponential"]
if lower_bounds is None:
lower_bounds = ["SortedGreedy"]
if selectors is None:
selectors = ["MostAdjacentSubgraphs"]
for selector, lower_bound, search_strategy in product(selectors, lower_bounds, search_strategies):
try:
out = subprocess.run([
"../../cmake-build-release/fpt",
"--input", path,
"--search-strategy", search_strategy, "--all", "1",
"--permutation", str(0),
"--multiplier", str(100),
"--selector", selector,
"--lower-bound", lower_bound,
"--F", "C4P4",
"--verbosity", str(0),
"--timelimit", str(timelimit)], capture_output=True) # timeout=4*timelimit
doc = yaml.safe_load(out.stdout.decode())
if doc is None:
print(out)
continue
yield doc
except subprocess.TimeoutExpired:
print(f"timeout {selector}, {lower_bound}, {search_strategy}")
continue
except RuntimeError as e:
print(e)
def plot_experiment_docs(docs: List[Dict[str, Any]], output_path: Path) -> None:
doc = docs[0]
name = doc["instance"]["name"].split("/")[-1]
k_final = doc["solution_cost"]
fig, ax = plt.subplots(figsize=(15, 5))
ax.grid(True)
ax.set_yscale("log")
for doc in docs:
k = np.array(doc["stats"]["k"])
calls = np.array(doc["stats"]["calls"])
time = np.array(doc["stats"]["time"]) / 10**9
config = doc["config"]
x = k
y = time
ax.plot(x, y, "o", label="{0} {1}".format(config["selector"], config["lower_bound"], config["search_strategy"]))
if k_final != -1:
ax.axvline(x=k_final, c="black")
ax.set_title(name)
ax.set_xlim((0, None))
ax.set_ylabel("Time [s]")
ax.set_xlabel("Editing cost $k$")
#ax.set_ylim((1, None))
ax.legend()
plt.savefig(output_path)
plt.show()
def main():
paths = [Path("../../data/bio/bio-nr-1590-size-56.graph")]
for path in paths:
print(path)
docs = []
selectors = ["MostAdjacentSubgraphs"] # + ["MostMarkedPairs"]
lower_bounds = ["SortedGreedy", "LocalSearch", "Greedy"] + ["Trivial"]
search_strategies = ["IncrementByMultiplier"] # + ["Exponential"] # + ["PrunedDelta"]
for doc in run(path, selectors=selectors, lower_bounds=lower_bounds, search_strategies=search_strategies):
docs += [doc]
if len(docs) > 0:
plot_experiment_docs(docs, Path(path.stem + ".pdf"))
if __name__ == '__main__':
main()
``` |
{
"source": "JonasStankevicius/CenterPoint",
"score": 2
} |
#### File: JonasStankevicius/CenterPoint/dataset_curbs_test.py
```python
import open3d as o3d
import torch
import numpy as np
from dataTool import BoundingBoxFromVoxel, DataTool
from tqdm import tqdm
from time import time
import os
from det3d.torchie import Config
from det3d.datasets import build_dataset, build_dataloader
from scipy.spatial import ConvexHull
from shapely.geometry import LineString
cfg = Config.fromfile("/mnt/c/git_repos/CenterPoint/configs/curbs/curbs_centerpoint_pp_02voxel_two_pfn.py")
# cfg = Config.fromfile("/mnt/c/git_repos/CenterPoint/configs/curbs/curbs_centerpoint_voxelnet_01voxel.py")
test_cfg = cfg['test_cfg']
dataset = build_dataset(cfg.data.train)
val_dataset = build_dataset(cfg.data.val)
# [str(data) for data in tqdm(dataset)]
def calculate_stats(dataset):
all_volume = []
all_point_count = 0
all_line_length = 0
for i in tqdm(range(len(dataset.points))):
points = dataset.points[i]
lines = dataset.original_lines[i]
scenes = dataset.scenes[i]
hull = ConvexHull(points[:,:2])
# hull_points = np.array(hull.simplices)
# print(hull_points)
all_volume.append(hull.volume)
all_point_count += points.shape[0]
lines_length = sum([LineString(line).length for line in lines])
all_line_length += lines_length
print(f"{os.path.basename(dataset.scenes[i]['pc_file'])} hull.volume: {hull.volume}")
print(f"{os.path.basename(dataset.scenes[i]['pc_file'])} all_point_count: {all_point_count}")
print(f"{os.path.basename(dataset.scenes[i]['pc_file'])} lines_length: {lines_length}")
print('--------')
print(f'all_volume : {sum(all_volume)}')
print(f'all_point_count : {all_point_count}')
print(f'all_line_length : {all_line_length}')
calculate_stats(dataset)
calculate_stats(val_dataset)
dataset = build_dataloader(dataset, cfg.data.samples_per_gpu, 0)
# [str(data) for data in tqdm(dataset)]
for _ in tqdm(range(10)):
# for _ in range(len(dataset)):
t = time()
# for data in tqdm(dataset):
for data in tqdm(dataset):
# data = next(iterator)
# print(f"Data time: {time()-t}")
# t = time()
# if(len(data['voxels']) == 0 or any([len(pt) == 0 for pt in data['points']])):
# print("its bad")
# if(any([bool(n == 0) for n in data['num_voxels']])):
# print("not good")
# continue
nums = data['num_voxels'].cpu().data.numpy().cumsum()
vox_xyz = data['voxels'].cpu().data.numpy()[:,:,:3]
vox_rgb = data['voxels'].cpu().data.numpy()[:,:,3:]
vox_z_min = data['voxel_z_min'].cpu().data.numpy()
for i in range(cfg.data.samples_per_gpu):
# for i in range(len(data['points'])):
# example = collate_kitti(data)
# example = example_to_device(data, torch.device('cuda'))
# print(data['points'][i].shape)
# xyz = data['points'][i][:,:3]
# rgb = data['points'][i][:,3:]
# # cars_boxes = data["annotations"][0]['gt_boxes'][0]
# lines = data['lines'][i]
# # boxes = [BoundingBoxFromVoxel(pt, 1) for pt in objects[:, :3]]
# DataTool().VisualizePointCloud([xyz], [rgb], lines= lines, downSample=True)
start = nums[i-1] if i > 0 else 0
xyz = vox_xyz[start:nums[i],:,:3].reshape([-1,3])
rgb = vox_rgb[start:nums[i],:,3:].reshape([-1,3])
H, W, num_cls = data['hm'][0][i].size()
pts = data['gt_line_points'][0][i]
batch_reg = pts[:, :2]
batch_hei = pts[:, 2:]
ys, xs = torch.meshgrid([torch.arange(0, H), torch.arange(0, W)])
# idx = y * H + x
recreated_point = []
# height_points = []
indices = data['ind'][0][i].data.cpu().numpy()
voxel_pts = []
for j in range(len(indices)):
idx = indices[j]
if(idx == 0):
continue
y = (idx // W)
x = idx - (y * W)
xs = x + pts[j,0]
ys = y + pts[j,1]
xs = xs * test_cfg.out_size_factor * test_cfg.voxel_size[0] + test_cfg.pc_range[0]
ys = ys * test_cfg.out_size_factor * test_cfg.voxel_size[1] + test_cfg.pc_range[1]
voxel_pts.append([
x * test_cfg.out_size_factor * test_cfg.voxel_size[0] + test_cfg.pc_range[0] + test_cfg.voxel_size[0]/2,
y * test_cfg.out_size_factor * test_cfg.voxel_size[1] + test_cfg.pc_range[1] + test_cfg.voxel_size[1]/2,
pts[j,2],
])
recreated_point.append([xs, ys, pts[j,2]])
# height_points.append([xs, ys, vox_z_min[i, y, x]])
# ys = ys.view(1, H, W).repeat(batch, 1, 1)
# xs = xs.view(1, H, W).repeat(batch, 1, 1)
# xs = xs.view(batch, -1, 1) + batch_reg[:, :, 0:1]
# ys = ys.view(batch, -1, 1) + batch_reg[:, :, 1:2]
# xs = xs * test_cfg.out_size_factor * test_cfg.voxel_size[0] + test_cfg.pc_range[0]
# ys = ys * test_cfg.out_size_factor * test_cfg.voxel_size[1] + test_cfg.pc_range[1]
# batch_box_preds = torch.cat([xs, ys, batch_hei], dim=2)
boxes = [BoundingBoxFromVoxel(vxl, [test_cfg.voxel_size[0], test_cfg.voxel_size[1], 1]) for vxl in voxel_pts]
# DataTool().VisualizePointCloud([xyz, np.array(recreated_point), np.array(height_points)], [rgb if len(rgb) > 0 else None, [1,0,0], [0,1,0]], bBoxes=boxes)
DataTool().VisualizePointCloud([xyz, np.array(recreated_point)], [rgb if len(rgb) > 0 else None, [1,0,0]], bBoxes=boxes)
DataTool().VisualizePointCloud([xyz, np.array(recreated_point)], [None, [1,0,0]], bBoxes=boxes)
# DataTool().VisualizePointCloud([np.array(recreated_point), data['line_points'][i]], [[1,0,0], [0,1,0]])
# DataTool().VisualizePointCloud([xyz, data['line_points'][i]], [rgb if len(rgb) > 0 else None, [1,0,0]])
``` |
{
"source": "JonasStankevicius/ConvPoint_Keras",
"score": 2
} |
#### File: ConvPoint_Keras/configs/Config.py
```python
from dataTool import ReadXYZRGBLBL, ProcessFeatures, RotatePointCloud
from imports import *
from enum import Enum
class DataPipeline(Enum):
Sequence = 1
tfData = 2
class Config:
@staticmethod
def IsLocalMachine():
if os.path.isdir("C:/Program Files") or os.path.isdir("/home/jonas"):
return True
else:
return False
@staticmethod
def IsWindowsMachine():
if os.path.isdir("C:/"):
return True
else:
return False
def BatchSize(self):
if Config.IsLocalMachine():
size = 8
else:
size = 32
return ceil(size / self.input_tile_count / (self.npoints / Config.npoints))
#Placeholders
classCount = Label.Semantic3D.Count-1
classNames = Label.Semantic3D.Names[1:]
validClasses = []
class_weights = np.ones([Label.Semantic3D.Count])
class_color = np.random.uniform(0, 1, size = [classCount, 3])
validation_split = 0.3
trainFiles = validationFiles = None
testFiles = []
excludeFiles = []
Paths = Paths.Semantic3D
dataSummaryFile = None
testsAreFixed = True
oneSamplePerFile = True
epochs = 100
pointComponents = 3
featureComponents = 3 #rgb
classCount = 0
npoints = 8192
# npoints = 100000
blocksize = 8
test_step = 0.5
name = ""
storeFilesInRAM = False
oneSamplePerFile = True
dataFileExtension = ""
rawDataExtension = ""
classBalanceTrain = False
classBalanceTest = False
input_tile_count = 1
#Algorithm configuration
noFeature = False
Fusion = False
Scale = False
Rotate = False
Mirror = False
Jitter = False
FtrAugment = False
ValidationInterval = 1 # validate model after each N epochs
TestInterval = None # test model after each N epochs
ValidateOnOtherData = None
DataPipeline = DataPipeline.Sequence
logsPath = "./logs"
### MODEL CONFIG
pl = 64
### MODEL CONFIG
def BuildSpecDict(self):
return {
"MultiScale": True if self.input_tile_count > 1 else False,
"noFeature" : self.noFeature,
"Fusion" : self.Fusion,
"Scale" : self.Scale,
"Rotate" : self.Rotate,
"Mirror" : self.Mirror,
"Jitter" : self.Jitter,
"FtrAugment" : False if self.noFeature else self.FtrAugment,
}
dataSummary = None
datasets = []
def __init__(self, skipInitialization):
return
def __init__(self, validationSplit = None, skipAdding = False):
if not(validationSplit is None):
self.validation_split = validationSplit
self.dataSummaryFile = self.Paths.pointCloudPath + '/summary.csv'
import pandas as pd
if not(self.dataSummaryFile is None) and self.dataSummaryFile.endswith(".csv") and os.path.exists(self.dataSummaryFile):
self.dataSummary = pd.read_csv(self.dataSummaryFile)
self.train_files_indices, self.validation_files_indices = self.SplitData(self.AllFiles())
elif self.trainFiles is None and self.validationFiles is None:
self.dataSummary = pd.DataFrame.from_dict({ "File" : Paths.GetFiles(self.Paths.processedTrain, onlyNames=True) })
self.train_files_indices, self.validation_files_indices = self.SplitData(self.AllFiles())
else:
self.dataSummary = pd.DataFrame.from_dict({ "File" : self.trainFiles+self.validationFiles })
self.train_files_indices = list(range(len(self.trainFiles)))
self.validation_files_indices = list(range(len(self.trainFiles), len(self.trainFiles)+len(self.validationFiles)))
self.RAMfiles = {}
if(self.storeFilesInRAM and self.DataPipeline == DataPipeline.Sequence):
self.RAMFilesPointCount = {}
for fileName in np.concatenate([self.TrainFiles(), self.ValidationFiles()]):
file = os.path.join(self.Paths.processedTrain, fileName+self.dataFileExtension)
self.RAMfiles[file] = self.ReadFile(file)
self.RAMFilesPointCount[fileName] = len(self.RAMfiles[file][0])
if(not skipAdding):
self.datasets.append(self)
def ReadFile(self, filePath):
if(not filePath.endswith(self.dataFileExtension)):
filePath += self.dataFileExtension
if(filePath in self.RAMfiles):
return self.RAMfiles[filePath]
else:
return ProcessFeatures(self, ReadXYZRGBLBL(filePath))
def ConcatDataForTraining(self, config_type):
cts = config_type(skipAdding = True)
self.datasets.append(cts)
def GetRandomDataset(self, validation = False):
datasets = np.array([[i, len(dt.ValidationFiles()) if validation else len(dt.TrainFiles())] for i, dt in enumerate(self.datasets)])
datasets = datasets[datasets[:, 1].astype(np.int64).argsort()] # sort files by point count
datasets[:, 1] = datasets[:, 1].astype(np.int64).cumsum() # accumulate point count
pt = np.random.randint(0, datasets[-1, 1], dtype=np.int64) if (len(datasets) > 1) else 0 #random points
index = np.argmax(datasets[:, 1] > pt)
return self.datasets[index]
def ChooseRandomFileByNumberOfPoints(self, files):
files = files[files[:, 2].astype(np.int64).argsort()] # sort files by point count
files[:, 2] = files[:, 2].astype(np.int64).cumsum() # accumulate point count
pt = np.random.randint(0, files[-1, 2], dtype=np.int64) if (len(files) > 1) else 0 #random point
index = np.argmax(files[:, 2].astype(np.int64) > pt)
return files[index]
def RotateClouds(self, validation):
configs = self.datasets if len(self.datasets) > 0 else [self]
for config in configs:
files = config.ValidationFiles() if validation else config.TrainFiles()
for file in files:
file = os.path.join(self.Paths.processedTrain, file)
if(file in config.RAMfiles):
pts, fts, lbl = config.RAMfiles[file]
pts = RotatePointCloud(pts)
config.RAMfiles[file] = (pts, fts, lbl)
def GetData(self, validation):
configs = self.datasets if len(self.datasets) > 0 else [self]
file_lists = [[i, data.ValidationFiles() if validation else data.TrainFiles() ] for i, data in enumerate(configs)]
files = []
for i, file_list in file_lists:
if(configs[i].storeFilesInRAM):
files.extend([[i, file, configs[i].RAMFilesPointCount[file]] for file in file_list])
else:
files.extend([[i, file, int(self.dataSummary.loc[self.dataSummary['File'] == file]['Points'])] for file in file_list])
file = self.ChooseRandomFileByNumberOfPoints(np.array(files))
cts = configs[file[0].astype(int)]
fileName = str(file[1])
filePath = os.path.join(cts.Paths.processedTrain, fileName)
pts, fts, lbs = cts.ReadFile(filePath)
return pts, fts, lbs
def GetClassFile(self, label : int, validation = False):
"""
Input: class number
Output: sample file that contains this class
"""
file_indices = self.validation_files_indices if validation else self.train_files_indices
if(len(self.dataSummary.columns) == 1):
allFiles = np.array(self.dataSummary['File'])[file_indices]
rand = np.random.randint(len(allFiles))
fileName = allFiles[rand]
elif(label != -1):
files = np.array(self.dataSummary[['File', str(self.validClasses[label])]])[file_indices]
# files = np.array(self.dataSummary[['File', str(self.validClasses[label])]])[self.train_files_indices]
files = files[files[:, 1].astype(int).argsort()] # sort files by point count
files[:, 1] = files[:, 1].astype(int).cumsum() # accumulate point count
pt = np.random.randint(0, files[-1, 1], dtype=np.int64) if (len(files) > 1) else 0 #random points
index = np.argmax(files[:, 1] > pt)
fileName = files[index, 0]
else:
# find files that contain all classes
idx = np.where(np.sum(np.array(self.dataSummary[[str(cls) for cls in self.validClasses]])[file_indices] > 0, -1) == len(fileClasses))
fileName = self.dataSummary.loc[file_indices[idx[np.random.randint(0, len(idx))]][0]]['File']
return os.path.join(self.Paths.processedTrain, fileName)
def DataPath(self):
return self.Paths.processedTrain
def RawDataPath(self):
return self.Paths.rawTrain
def AllFiles(self):
return np.array(self.dataSummary['File'])
def TrainFiles(self):
return np.array(self.dataSummary['File'])[self.train_files_indices]
def ValidationFiles(self):
return np.array(self.dataSummary['File'])[self.validation_files_indices]
def TestFiles(self):
return self.testFiles
def SplitData(self, files):
# np.random.seed(0) #reproduce same random choice for files
train_files_indices = np.array([i for i in range(len(files))
if os.path.exists(os.path.join(self.Paths.processedTrain, files[i]+self.dataFileExtension))
and not (os.path.splitext(os.path.basename(files[i]))[0] in self.testFiles)])
random_indices = np.random.choice(range(len(train_files_indices)), int(len(train_files_indices)*self.validation_split), replace=False).astype(int)
validation_files_indices = train_files_indices[random_indices]
train_files_indices = np.delete(train_files_indices, random_indices, axis=0)
return train_files_indices, validation_files_indices
def Name(self, UID = ""):
modelName = self.name
trainfiles = np.sum([len(cts.TrainFiles()) for cts in self.datasets]) if (len(self.datasets) > 0) else len(self.TrainFiles())
validationfiles = np.sum([len(cts.ValidationFiles()) for cts in self.datasets]) if (len(self.datasets) > 0) else len(self.ValidationFiles())
modelName += f"({trainfiles}-{validationfiles}-{len(self.TestFiles())})"
for spec, value in self.BuildSpecDict().items():
if(value == True):
modelName += f"({spec})"
if(UID != ""):
modelName += f"_{UID}"
return modelName
def NormalizeData(self, points, features = None, labels = None, validation = False):
if(labels is None):
return points, features, labels
if(not validation):
mask = np.where(labels != 0)[0]
if(len(mask) > 0):
points = points[mask]
labels = labels[mask]
if(not features is None):
features = features[mask]
# labels = self.MapLabels(labels, mapLabels)
labels -= 1
return points, features, labels
def RevertLabels(self, labels):
return labels
def MapLabels(self, labels, type):
return labels
def RevertData(self, points, features = None, labels = None):
return points, features, labels
def GetMetaInfo(self):
feature = 'none'
if not self.noFeature and self.featureComponents == 1:
feature = 'intensity'
elif not self.noFeature and self.featureComponents == 3:
feature = 'RGB'
metaInfo = {
"feature": feature,
"classes": self.classNames
}
return metaInfo
@staticmethod
def RemoveUID(name : str):
return name.replace(f"_{Config.ParseModelUID(name)}", "")
@staticmethod
def UID():
import uuid
return uuid.uuid4().hex[:10]
@staticmethod
def ParseModelConfig(file):
config = Paths.FileName(file).split("_")[0].replace("("," ").replace(")","").replace("vox ","").split(" ")
const = None
if(config[0] == NPM3D.name):
const = NPM3D()
if(config[0] == Semantic3D.name):
const = Semantic3D()
for conf in config[1:]:
if conf == "noFeature" or conf == "NOCOL":
const.noFeature = True
elif conf == "Fusion":
const.Fusion = True
elif conf == "Scale":
const.Scale = True
elif conf == "Rotate":
const.Rotate = True
elif conf == "Mirror":
const.Mirror = True
elif conf == "Jitter":
const.Jitter = True
elif conf == "FtrAugment":
const.FtrAugment = True
return const
@staticmethod
def ParseModelUID(file):
parts = Paths.FileName(file).split("_")
if(len(parts) >= 2):
return parts[1]
else:
return None
@staticmethod
def ParseModelName(file, withUID = True):
parts = Paths.FileName(file, withoutExt = False).split("_")
name = parts[0]
if(withUID and len(parts) > 1):
name += "_"+parts[1]
return name
# def TestFiles(self):
# return Paths.JoinPaths(self.Paths.processedTrain, self.testFiles)
# def TrainFiles(self):
# return Paths.GetFiles(self.Paths.processedTrain, excludeFiles = self.TestFiles()+self.excludeFiles)
# def ValidationFiles(self):
# return []
def GetClassColors(self):
return np.random.uniform(0, 1, size = [self.classCount, 3])
def GetDataPipeline(self, train_pipeline = True, batch_count = None):
return None
```
#### File: ConvPoint_Keras/configs/LaneMarkings.py
```python
from configs import Config, Aerial
from imports import *
class LaneMarkings(Aerial):
pointComponents = 3
noFeature = False
featureComponents = 1
classCount = 2
classNames = Label.LaneMarkings.Names
blocksize = 1
test_step = 0.5
name = "LaneMarkings"
def NormalizeData(self, points, features = None, labels = None):
return points, features * 255, labels
class LaneMarkingsSmallDense(LaneMarkings):
blocksize = 1
test_step = 0.9
ValidationInterval = 1
TestInterval = 3
name = "LaneMarkingsSmallDense"
Paths = Paths.LaneMarkingsSmallDense
def ValidationFiles(self):
return Paths.GetFiles(self.Paths.processedTest)
class LaneMarkingsLargeSparse(LaneMarkings):
blocksize = 6
test_step = 3
ValidationInterval = 1
TestInterval = 3
test_split = 0
name = "LaneMarkingsLargeSparse"
Paths = Paths.LaneMarkingsLargeSparse
def TestFiles(self):
return self.train_files
def ValidationFiles(self):
return Paths.GetFiles(self.Paths.processedTest)
``` |
{
"source": "jonasstein/nikola",
"score": 2
} |
#### File: plugins/command/subtheme.py
```python
import configparser
import os
import requests
from nikola import utils
from nikola.plugin_categories import Command
LOGGER = utils.get_logger('subtheme')
def _check_for_theme(theme, themes):
for t in themes:
if t.endswith(os.sep + theme):
return True
return False
class CommandSubTheme(Command):
"""Given a swatch name from bootswatch.com and a parent theme, creates a custom theme."""
name = "subtheme"
doc_usage = "[options]"
doc_purpose = "given a swatch name from bootswatch.com or hackerthemes.com and a parent theme, creates a custom"\
" theme"
cmd_options = [
{
'name': 'name',
'short': 'n',
'long': 'name',
'default': 'custom',
'type': str,
'help': 'New theme name (default: custom)',
},
{
'name': 'swatch',
'short': 's',
'default': '',
'type': str,
'help': 'Name of the swatch from bootswatch.com.'
},
{
'name': 'parent',
'short': 'p',
'long': 'parent',
'default': 'bootstrap4',
'help': 'Parent theme name (default: bootstrap4)',
},
]
def _execute(self, options, args):
"""Given a swatch name and a parent theme, creates a custom theme."""
name = options['name']
swatch = options['swatch']
if not swatch:
LOGGER.error('The -s option is mandatory')
return 1
parent = options['parent']
version = '4'
# Check which Bootstrap version to use
themes = utils.get_theme_chain(parent, self.site.themes_dirs)
if _check_for_theme('bootstrap', themes) or _check_for_theme('bootstrap-jinja', themes):
version = '2'
elif _check_for_theme('bootstrap3', themes) or _check_for_theme('bootstrap3-jinja', themes):
version = '3'
elif _check_for_theme('bootstrap4', themes) or _check_for_theme('bootstrap4-jinja', themes):
version = '4'
elif not _check_for_theme('bootstrap4', themes) and not _check_for_theme('bootstrap4-jinja', themes):
LOGGER.warn(
'"subtheme" only makes sense for themes that use bootstrap')
elif _check_for_theme('bootstrap3-gradients', themes) or _check_for_theme('bootstrap3-gradients-jinja', themes):
LOGGER.warn(
'"subtheme" doesn\'t work well with the bootstrap3-gradients family')
LOGGER.info("Creating '{0}' theme from '{1}' and '{2}'".format(
name, swatch, parent))
utils.makedirs(os.path.join('themes', name, 'assets', 'css'))
for fname in ('bootstrap.min.css', 'bootstrap.css'):
if swatch in [
'bubblegum', 'business-tycoon', 'charming', 'daydream',
'executive-suite', 'good-news', 'growth', 'harbor', 'hello-world',
'neon-glow', 'pleasant', 'retro', 'vibrant-sea', 'wizardry']: # Hackerthemes
LOGGER.info(
'Hackertheme-based subthemes often require you use a custom font for full effect.')
if version != '4':
LOGGER.error(
'The hackertheme subthemes are only available for Bootstrap 4.')
return 1
if fname == 'bootstrap.css':
url = 'https://raw.githubusercontent.com/HackerThemes/theme-machine/master/dist/{swatch}/css/bootstrap4-{swatch}.css'.format(
swatch=swatch)
else:
url = 'https://raw.githubusercontent.com/HackerThemes/theme-machine/master/dist/{swatch}/css/bootstrap4-{swatch}.min.css'.format(
swatch=swatch)
else: # Bootswatch
url = 'https://bootswatch.com'
if version:
url += '/' + version
url = '/'.join((url, swatch, fname))
LOGGER.info("Downloading: " + url)
r = requests.get(url)
if r.status_code > 299:
LOGGER.error('Error {} getting {}', r.status_code, url)
return 1
data = r.text
with open(os.path.join('themes', name, 'assets', 'css', fname),
'w+') as output:
output.write(data)
with open(os.path.join('themes', name, '%s.theme' % name), 'w+') as output:
parent_theme_data_path = utils.get_asset_path(
'%s.theme' % parent, themes)
cp = configparser.ConfigParser()
cp.read(parent_theme_data_path)
cp['Theme']['parent'] = parent
cp['Family'] = {'family': cp['Family']['family']}
cp.write(output)
LOGGER.notice(
'Theme created. Change the THEME setting to "{0}" to use it.'.format(name))
``` |
{
"source": "jonasstenling/bumper",
"score": 3
} |
#### File: jonasstenling/bumper/method.py
```python
from eval import EvalResult
class PluginMount(type):
'''
Metaclass used by MethodProvider to create plugin architecture.
'''
def __init__(cls, name, bases, attrs):
if not hasattr(cls, 'plugins'):
# This branch only executes when processing the mount point itself.
# So, since this is a new plugin type, not an implementation, this
# class shouldn't be registered as a plugin. Instead, it sets up a
# list where plugins can be registered later.
cls.plugins = []
else:
# This must be a plugin implementation, which should be registered.
# Simply appending it to the list is all that's needed to keep
# track of it later.
cls.plugins.append(cls)
def get_plugin(cls, method, *args, **kwargs):
'''Fetches plugin for *method* and returns the result.'''
for plugin in cls.plugins:
if plugin.method_name == method:
return plugin(*args, **kwargs)
class MethodProvider:
'''
Mount point for plugins which refer to Rule evaluation to be performed.
'''
def __init__(self):
pass
__metaclass__ = PluginMount
class StringMatch(MethodProvider):
'''
Implements a string matching rule for CiscoConfParse objects.
'''
method_name = 'string_match'
def __init__(self):
MethodProvider.__init__(self)
def __call__(self, rule, config):
'''
Returns: List of EvalResult objects.
Parameters:
rule Rule object to be evaluated.
config CiscoConfParse object containing the configuration to
evaluate.
'''
mandatory = rule.params.get('mandatory')
objs = config.find_objects(rule.selection)
evaluation = []
for obj in objs:
if obj.children:
for i in mandatory:
match = obj.re_search_children(i)
if match:
evaluation.append(EvalResult(result=True, cfgline=obj,
rule=i))
else:
evaluation.append(EvalResult(result=False, cfgline=obj,
rule=i))
else:
for i in mandatory:
match = obj.re_search(i, default=None)
if match:
evaluation.append(EvalResult(result=True, cfgline=obj,
rule=i))
else:
evaluation.append(EvalResult(result=False,
cfgline=obj, rule=i))
return evaluation
```
#### File: jonasstenling/bumper/run.py
```python
from ruleset import RuleSet
from ciscoconfparse import CiscoConfParse
def load_config(config_file):
'''Returns parse configuration as CiscoConfParse object.'''
return CiscoConfParse(config_file)
def main():
'''Main entry point for module.'''
config = load_config('test.conf')
myrules = RuleSet()
myrules.load_rules('syntax.yml')
myresult = []
for rule in myrules.rules:
result = rule.apply(config)
for element in result:
if element.result == False:
print "Rule evaluation failed:\n Cfgline: '{}'\n Rule: {}".format(
element.cfgline.text,
element.rule)
myresult.append(result)
return myresult
if __name__ == '__main__':
main()
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.