metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JohnDoee/wampire",
"score": 2
} |
#### File: wampyre/tests/test_session.py
```python
import pytest
from ..opcodes import OP
from ..pattern import Pattern
from ..realm import realm_manager
from ..session import STATE_CLOSED, STATE_UNAUTHENTICATED
from ..transports.base import TransportBase
def transport_base():
class TestTransportBase(TransportBase):
def __init__(self):
self._sends = []
self._closed = False
self._last_id = 0
self._method_uri_allowed = lambda method, uri: True
super().__init__()
def send(self, opcode, *args):
self._sends.append((opcode, args))
def realm_allowed(self, realm):
return "realm_deny" not in realm
def close_session(self):
self._closed = True
def set_state(self, state):
self.session.state = state
def get_reply(self):
return self._sends.pop()
def is_empty(self):
return not self._sends
def generate_id(self):
self._last_id += 1
return self._last_id
def connect(self, realm):
self.receive(OP.HELLO, realm, {})
return self.get_reply()
def disconnect(self):
self.session.close_session()
def method_uri_allowed(self, method, uri):
return self._method_uri_allowed(method, uri)
return TestTransportBase()
@pytest.fixture
def transport():
yield transport_base()
realm_manager.realms = {}
@pytest.fixture
def transport2():
yield transport_base()
realm_manager.realms = {}
@pytest.fixture
def transport3():
yield transport_base()
realm_manager.realms = {}
def test_hello_goodbye(transport):
transport.receive(OP.HELLO, "a.realm", {})
opcode, args = transport.get_reply()
assert opcode == OP.WELCOME
assert Pattern("id", "dict")(*args)
transport.receive(OP.GOODBYE, {}, "wamp.close.goodbye_and_out")
opcode, args = transport.get_reply()
assert opcode == OP.GOODBYE
assert Pattern("dict", "uri!")(*args)
def test_subscribe_unsubscribe(transport, transport2, transport3):
transport.connect("a.realm")
transport2.connect("a.realm")
transport3.connect("a.realm")
transport.receive(
OP.PUBLISH, transport.generate_id(), {}, "a.topic", ["a"], {"b": "c"}
)
assert transport.is_empty()
assert transport2.is_empty()
assert transport3.is_empty()
transport2.receive(OP.SUBSCRIBE, transport2.generate_id(), {}, "a.topic")
opcode, args = transport2.get_reply()
assert opcode == OP.SUBSCRIBED
assert Pattern("id", "id")(*args)
assert transport2._last_id == args[0]
transport2_a_topic_subscription_id = args[1]
transport.receive(
OP.PUBLISH, transport.generate_id(), {}, "a.topic", ["a"], {"b": "c"}
)
opcode, args = transport2.get_reply()
assert opcode == OP.EVENT
assert Pattern("id", "id", "dict", "list", "dict")(*args)
assert args[0] == transport2_a_topic_subscription_id
assert args[3] == ["a"]
assert args[4] == {"b": "c"}
assert transport.is_empty()
assert transport2.is_empty()
assert transport3.is_empty()
transport3.receive(OP.SUBSCRIBE, transport3.generate_id(), {}, "a.topic")
opcode, args = transport3.get_reply()
assert opcode == OP.SUBSCRIBED
transport3_a_topic_subscription_id = args[1]
transport2.receive(
OP.PUBLISH, transport2.generate_id(), {}, "a.topic", ["b"], {"c": "d"}
)
opcode, args = transport2.get_reply()
assert opcode == OP.EVENT
assert Pattern("id", "id", "dict", "list", "dict")(*args)
assert args[0] == transport2_a_topic_subscription_id
assert args[3] == ["b"]
assert args[4] == {"c": "d"}
opcode, args = transport3.get_reply()
assert opcode == OP.EVENT
assert Pattern("id", "id", "dict", "list", "dict")(*args)
assert args[0] == transport3_a_topic_subscription_id
assert args[3] == ["b"]
assert args[4] == {"c": "d"}
assert transport.is_empty()
assert transport2.is_empty()
assert transport3.is_empty()
transport2.receive(
OP.UNSUBSCRIBE, transport2.generate_id(), transport2_a_topic_subscription_id
)
opcode, args = transport2.get_reply()
assert opcode == OP.UNSUBSCRIBED
assert Pattern("id")(*args)
assert transport2._last_id == args[0]
transport2.receive(
OP.PUBLISH, transport2.generate_id(), {}, "a.topic", ["b"], {"c": "d"}
)
opcode, args = transport3.get_reply()
assert transport.is_empty()
assert transport2.is_empty()
assert transport3.is_empty()
transport3.receive(
OP.UNSUBSCRIBE, transport3.generate_id(), transport3_a_topic_subscription_id
)
opcode, args = transport3.get_reply()
assert opcode == OP.UNSUBSCRIBED
assert Pattern("id")(*args)
assert transport3._last_id == args[0]
transport2.receive(
OP.PUBLISH, transport2.generate_id(), {}, "a.topic", ["b"], {"c": "d"}
)
assert transport.is_empty()
assert transport2.is_empty()
assert transport3.is_empty()
transport3.receive(OP.SUBSCRIBE, transport3.generate_id(), {}, "a.topic")
opcode, args = transport3.get_reply()
assert opcode == OP.SUBSCRIBED
transport3_a_topic_subscription_id = args[1]
transport.receive(
OP.PUBLISH, transport.generate_id(), {"acknowledge": True}, "a.topic", ["b"]
)
opcode, args = transport.get_reply()
assert opcode == OP.PUBLISHED
assert Pattern("id", "id")(*args)
assert transport._last_id == args[0]
opcode, args = transport3.get_reply()
assert opcode == OP.EVENT
assert Pattern("id", "id", "dict", "list")(*args)
assert args[0] == transport3_a_topic_subscription_id
assert args[3] == ["b"]
assert transport.is_empty()
assert transport2.is_empty()
assert transport3.is_empty()
def test_register_call_yield(transport, transport2, transport3):
transport.connect("a.realm")
transport2.connect("a.realm")
transport3.connect("a.realm")
transport.receive(OP.REGISTER, transport.generate_id(), {}, "a.procedure")
opcode, args = transport.get_reply()
assert opcode == OP.REGISTERED
assert Pattern("id", "id")(*args)
assert transport._last_id == args[0]
assert transport.is_empty()
transport_register_id = args[1]
transport3.receive(OP.REGISTER, transport.generate_id(), {}, "a.procedure.2")
opcode, args = transport3.get_reply()
transport2.receive(
OP.CALL, transport2.generate_id(), {}, "a.procedure", ["a"], {"b": "c"}
)
assert transport2.is_empty()
opcode, args = transport.get_reply()
assert opcode == OP.INVOCATION
assert Pattern("id", "id", "dict", "list", "dict")(*args)
assert transport.is_empty()
assert args[1] == transport_register_id
assert args[3] == ["a"]
assert args[4] == {"b": "c"}
transport.receive(OP.YIELD, args[0], {}, ["c"], {"d": "e"})
assert transport.is_empty()
assert transport3.is_empty()
opcode, args = transport2.get_reply()
assert opcode == OP.RESULT
assert transport2._last_id == args[0]
assert args[2] == ["c"]
assert args[3] == {"d": "e"}
assert transport.is_empty()
assert transport2.is_empty()
assert transport3.is_empty()
def test_inter_realm_communication(transport, transport2):
transport.connect("a.realm")
transport2.connect("another.realm")
transport2.receive(OP.SUBSCRIBE, transport2.generate_id(), {}, "a.topic")
opcode, args = transport2.get_reply()
transport.receive(
OP.PUBLISH, transport.generate_id(), {}, "a.topic", ["a"], {"b": "c"}
)
assert transport.is_empty()
assert transport2.is_empty()
def test_failed_register_unregister(transport, transport2):
transport.connect("a.realm")
transport2.connect("a.realm")
transport.receive(OP.REGISTER, transport.generate_id(), {}, "a.procedure")
opcode, args = transport.get_reply()
assert opcode == OP.REGISTERED
assert Pattern("id", "id")(*args)
assert transport._last_id == args[0]
assert transport.is_empty()
transport_register_id = args[1]
transport.receive(OP.REGISTER, transport.generate_id(), {}, "a.procedure")
opcode, args = transport.get_reply()
assert opcode == OP.ERROR
assert Pattern("opcode", "id", "dict", "uri!")(*args)
assert args[0] == OP.REGISTER
assert args[3] == "wamp.error.procedure_already_exists"
assert transport.is_empty()
transport2.receive(OP.REGISTER, transport2.generate_id(), {}, "a.procedure")
opcode, args = transport2.get_reply()
assert opcode == OP.ERROR
assert Pattern("opcode", "id", "dict", "uri!")(*args)
assert args[0] == OP.REGISTER
assert args[3] == "wamp.error.procedure_already_exists"
assert transport2.is_empty()
transport2.receive(OP.UNREGISTER, transport2.generate_id(), transport_register_id)
opcode, args = transport2.get_reply()
assert opcode == OP.ERROR
assert Pattern("opcode", "id", "dict", "uri!")(*args)
assert args[0] == OP.UNREGISTER
assert args[3] == "wamp.error.no_such_registration"
assert transport2.is_empty()
transport.receive(OP.UNREGISTER, transport.generate_id(), transport_register_id)
opcode, args = transport.get_reply()
assert opcode == OP.UNREGISTERED
assert Pattern("id")(*args)
assert args[0] == transport._last_id
assert transport.is_empty()
transport.receive(OP.UNREGISTER, transport.generate_id(), transport_register_id)
opcode, args = transport.get_reply()
assert opcode == OP.ERROR
assert Pattern("opcode", "id", "dict", "uri!")(*args)
assert args[0] == OP.UNREGISTER
assert args[3] == "wamp.error.no_such_registration"
assert transport.is_empty()
def test_failed_mixed_unsubscribe(transport, transport2):
transport.connect("a.realm")
transport2.connect("a.realm")
transport.receive(OP.SUBSCRIBE, transport.generate_id(), {}, "a.topic")
opcode, args = transport.get_reply()
transport_a_topic_subscription_id = args[1]
transport2.receive(
OP.UNSUBSCRIBE, transport2.generate_id(), transport_a_topic_subscription_id
)
opcode, args = transport2.get_reply()
assert opcode == OP.ERROR
assert Pattern("opcode", "id", "dict", "uri!")(*args)
assert args[0] == OP.UNSUBSCRIBE
assert args[3] == "wamp.error.no_such_subscription"
assert transport2.is_empty()
transport.receive(
OP.UNSUBSCRIBE, transport.generate_id(), transport_a_topic_subscription_id
)
opcode, args = transport.get_reply()
assert opcode == OP.UNSUBSCRIBED
assert Pattern("id")(*args)
assert transport.is_empty()
transport.receive(
OP.UNSUBSCRIBE, transport.generate_id(), transport_a_topic_subscription_id
)
opcode, args = transport.get_reply()
assert opcode == OP.ERROR
assert Pattern("opcode", "id", "dict", "uri!")(*args)
assert args[0] == OP.UNSUBSCRIBE
assert args[3] == "wamp.error.no_such_subscription"
assert transport.is_empty()
def test_call_invocation_error(transport, transport2):
transport.connect("a.realm")
transport2.connect("a.realm")
transport.receive(OP.REGISTER, transport.generate_id(), {}, "a.procedure")
opcode, args = transport.get_reply()
transport2.receive(
OP.CALL, transport2.generate_id(), {}, "a.procedure", ["a"], {"b": "c"}
)
opcode, args = transport.get_reply()
transport.receive(
OP.ERROR,
OP.INVOCATION,
args[0],
{},
"a.procedure.error.no_happy_time",
["a"],
{"b": "c"},
)
opcode, args = transport2.get_reply()
assert opcode == OP.ERROR
assert Pattern("opcode", "id", "dict", "uri", "list", "dict")(*args)
assert args[0] == OP.CALL
assert args[1] == transport2._last_id
assert args[3] == "a.procedure.error.no_happy_time"
assert transport2.is_empty()
def test_call_unknown(transport):
transport.connect("a.realm")
transport.receive(
OP.CALL, transport.generate_id(), {}, "a.procedure", ["a"], {"b": "c"}
)
opcode, args = transport.get_reply()
assert opcode == OP.ERROR
assert Pattern("opcode", "id", "dict", "uri!")(*args)
assert args[0] == OP.CALL
assert args[1] == transport._last_id
assert args[3] == "wamp.error.no_such_procedure"
assert transport.is_empty()
def test_call_connection_lost(transport, transport2):
transport.connect("a.realm")
transport2.connect("a.realm")
transport.receive(OP.REGISTER, transport.generate_id(), {}, "a.procedure")
opcode, args = transport.get_reply()
transport2.receive(
OP.CALL, transport2.generate_id(), {}, "a.procedure", ["a"], {"b": "c"}
)
transport.disconnect()
opcode, args = transport2.get_reply()
assert opcode == OP.ERROR
assert Pattern("opcode", "id", "dict", "uri!")(*args)
assert args[0] == OP.CALL
assert args[1] == transport._last_id
assert args[3] == "wamp.error.callee_lost"
assert transport2.is_empty()
def test_connection_lost_unregister_disable_calls(transport, transport2):
transport.connect("a.realm")
transport2.connect("a.realm")
transport2.receive(OP.REGISTER, transport2.generate_id(), {}, "a.procedure")
opcode, args = transport2.get_reply()
transport2.receive(OP.SUBSCRIBE, transport2.generate_id(), {}, "a.topic")
opcode, args = transport2.get_reply()
transport2.disconnect()
transport.receive(
OP.CALL, transport.generate_id(), {}, "a.procedure", ["a"], {"b": "c"}
)
opcode, args = transport.get_reply()
assert opcode == OP.ERROR
assert Pattern("opcode", "id", "dict", "uri!")(*args)
assert args[0] == OP.CALL
assert args[1] == transport._last_id
assert args[3] == "wamp.error.no_such_procedure"
assert transport.is_empty()
transport.receive(
OP.PUBLISH, transport.generate_id(), {}, "a.topic", ["b"], {"c": "d"}
)
def test_invalid_opcodes_syntaxes(transport):
assert transport.session.state == STATE_UNAUTHENTICATED
transport.connect("a.realm")
transport.receive(OP.REGISTER, transport.generate_id(), "a.bogus.procedure")
opcode, args = transport.get_reply()
assert opcode == OP.ABORT
assert Pattern("dict", "uri!")(*args)
assert args[1] == "wamp.error.protocol_violation"
assert transport.is_empty()
assert transport.session.state == STATE_CLOSED
transport.connect("a.realm")
transport.receive(500000, transport.generate_id(), "a.bogus.procedure")
opcode, args = transport.get_reply()
assert opcode == OP.ABORT
assert Pattern("dict", "uri!")(*args)
assert args[1] == "wamp.error.protocol_violation"
assert transport.is_empty()
assert transport.session.state == STATE_CLOSED
transport.connect("a.realm")
transport.receive(OP.HELLO, "a.realm", {})
opcode, args = transport.get_reply()
assert opcode == OP.ABORT
assert Pattern("dict", "uri!")(*args)
assert args[1] == "wamp.error.protocol_violation"
assert transport.is_empty()
assert transport.session.state == STATE_CLOSED
def test_inaccessible_realm(transport):
opcode, args = transport.connect("a.realm_deny")
assert opcode == OP.ABORT
assert Pattern("dict", "uri!")(*args)
assert args[1] == "wamp.error.no_such_realm"
assert transport.is_empty()
assert transport.session.state == STATE_CLOSED
def test_uri_denied(transport):
transport.connect("a.realm")
transport._method_uri_allowed = lambda method, uri: uri == "b.topic"
transport.receive(OP.SUBSCRIBE, transport.generate_id(), {}, "a.topic")
opcode, args = transport.get_reply()
assert opcode == OP.ERROR
assert Pattern("opcode", "id", "dict", "uri!")(*args)
assert args[0] == OP.SUBSCRIBE
assert args[3] == "wamp.error.not_authorized"
assert transport.is_empty()
transport.receive(OP.SUBSCRIBE, transport.generate_id(), {}, "b.topic")
opcode, args = transport.get_reply()
assert opcode == OP.SUBSCRIBED
assert Pattern("id", "id")(*args)
assert transport._last_id == args[0]
def test_subscribe_wildcard(transport, transport2, transport3):
transport.connect("a.realm")
transport2.connect("a.realm")
transport3.connect("a.realm")
transport.receive(
OP.SUBSCRIBE, transport.generate_id(), {"match": "wildcard"}, "a..topic"
)
opcode, args = transport.get_reply()
assert opcode == OP.SUBSCRIBED
assert Pattern("id", "id")(*args)
assert transport._last_id == args[0]
transport_a_topic_subscription_id = args[1]
transport2.receive(
OP.PUBLISH, transport.generate_id(), {}, "a.good.topic", ["a"], {"b": "c"}
)
opcode, args = transport.get_reply()
assert opcode == OP.EVENT
assert Pattern("id", "id", "dict", "list", "dict")(*args)
assert args[0] == transport_a_topic_subscription_id
assert args[2] == {"topic": "a.good.topic"}
assert args[3] == ["a"]
assert args[4] == {"b": "c"}
transport.receive(
OP.UNSUBSCRIBE, transport.generate_id(), transport_a_topic_subscription_id
)
opcode, args = transport.get_reply()
assert opcode == OP.UNSUBSCRIBED
assert Pattern("id")(*args)
assert transport._last_id == args[0]
transport2.receive(
OP.PUBLISH, transport.generate_id(), {}, "a.good.topic", ["a"], {"b": "c"}
)
assert transport.is_empty()
def test_register_wildcard(transport, transport2, transport3):
transport.connect("a.realm")
transport2.connect("a.realm")
transport3.connect("a.realm")
transport.receive(
OP.REGISTER, transport.generate_id(), {"match": "wildcard"}, "a..procedure"
)
opcode, args = transport.get_reply()
assert opcode == OP.REGISTERED
assert Pattern("id", "id")(*args)
assert transport._last_id == args[0]
assert transport.is_empty()
transport_register_id = args[1]
transport3.receive(OP.REGISTER, transport.generate_id(), {"match": "prefix"}, "a")
opcode, args = transport3.get_reply()
transport2.receive(
OP.CALL, transport2.generate_id(), {}, "a.cool.procedure", ["a"], {"b": "c"}
)
assert transport2.is_empty()
opcode, args = transport.get_reply()
assert opcode == OP.INVOCATION
assert Pattern("id", "id", "dict", "list", "dict")(*args)
assert transport.is_empty()
assert args[1] == transport_register_id
assert args[2] == {"procedure": "a.cool.procedure"}
assert args[3] == ["a"]
assert args[4] == {"b": "c"}
assert transport3.is_empty()
```
#### File: wampyre/transports/base.py
```python
from abc import ABC, abstractmethod
from ..session import Session
class TransportBase(ABC):
def __init__(self):
self.session = Session(self)
@abstractmethod
def send(self, opcode, *args):
"""Send a command to a client"""
@abstractmethod
def realm_allowed(self, realm):
"""Check if a transport can access a realm"""
@abstractmethod
def close_session(self):
"""Close a session"""
def receive(self, *args):
self.session.handle_command(*args)
def session_lost(self):
self.session.close_session()
@abstractmethod
def method_uri_allowed(self, method, uri):
"""Check if method and uri call is allowed by this transport"""
``` |
{
"source": "JohnDoee/weeblife",
"score": 2
} |
#### File: weeblife/weeblife/models.py
```python
import io
import logging
import requests
from django.conf import settings
from django.core.files import File
from django.db import models
from django.db.models import options
from django.utils.timezone import now
logger = logging.getLogger(__name__)
options.DEFAULT_NAMES = options.DEFAULT_NAMES + ("settings_prefix",)
class ImageManager(models.Manager):
def get_image(self):
"""
Find an image that we can use. Returns None if
the cache is empty.
"""
images = self.filter(last_consumed__isnull=True).order_by("?")
if images:
image = images[0]
image.last_consumed = now()
image.save(update_fields=["last_consumed"])
return image.image
images = self.all().order_by("?")
if images:
image = images[0]
image.last_consumed = now()
image.save(update_fields=["last_consumed"])
return image.image
return None
def _download_file(self, source, source_id, data_url):
if self.filter(source=source, source_id=source_id):
return False
ext = data_url.split("?")[0].split(".")[-1]
if ext == "zip":
return False
logger.debug(f"Fetching {data_url}")
image = requests.get(data_url)
filename = f"{source}-{source_id}.{ext}"
self.create(
source=source,
source_id=source_id,
image=File(io.BytesIO(image.content), name=filename),
)
return True
def preload_images(self):
"""
Add some images to the local cache if there's
not a lot of unused.
Can take some time to run.
"""
prefix = f"WEEBLIFE_{self.model._meta.settings_prefix}"
if len(self.all()) >= getattr(settings, f"{prefix}_CAP"):
return
for source in getattr(settings, f"{prefix}_ENABLED"):
source_prefix = f"{prefix}_{source.upper()}"
prefetch_count = getattr(settings, f"{source_prefix}_PREFETCH_COUNT")
if (
len(self.filter(last_consumed__isnull=True, source=source))
>= prefetch_count
):
continue
fetched = 0
if source == "danbooru":
r = requests.get(
"https://danbooru.donmai.us/posts.json",
params={
"limit": "30",
"random": "true",
"tags": getattr(settings, f"{source_prefix}_TAGS"),
},
)
for entry in r.json():
source_id = str(entry["id"])
data_url = entry["file_url"]
if self._download_file(source, source_id, data_url):
fetched += 1
if fetched >= prefetch_count:
break
elif source == "wallheaven":
r = requests.get(
"https://wallhaven.cc/api/v1/search",
params={
"sorting": "random",
"q": getattr(settings, f"{source_prefix}_Q"),
},
)
for entry in r.json()["data"]:
source_id = entry["id"]
data_url = entry["path"]
if self._download_file(source, source_id, data_url):
fetched += 1
if fetched >= prefetch_count:
break
def preload_and_get_image(self):
"""
The easy way out when you're not in a hurry.
"""
self.preload_images()
return self.get_image()
class ImageAbstract(models.Model):
source = models.CharField(max_length=100)
source_id = models.CharField(max_length=100)
last_consumed = models.DateTimeField(null=True)
created = models.DateTimeField(auto_now_add=True)
objects = ImageManager()
class Meta:
abstract = True
unique_together = ("source", "source_id")
class LoadingAnimation(ImageAbstract):
image = models.FileField(upload_to="weeblife_loading")
class Meta(ImageAbstract.Meta):
settings_prefix = "LOADING"
class Wallpaper(ImageAbstract):
image = models.FileField(upload_to="weeblife_wallpaper")
class Meta(ImageAbstract.Meta):
settings_prefix = "WALLPAPER"
``` |
{
"source": "JohnDoenut/NeuroKit",
"score": 3
} |
#### File: NeuroKit/tests/tests_events.py
```python
import numpy as np
import pandas as pd
import neurokit2 as nk
# =============================================================================
# Events
# =============================================================================
def test_events_find():
signal = np.cos(np.linspace(start=0, stop=20, num=1000))
events = nk.events_find(signal)
assert list(events["Onset"]) == [0, 236, 550, 864]
events = nk.events_find(signal, duration_min = 150)
assert list(events["Onset"]) == [236, 550]
events = nk.events_find(signal, inter_min = 300)
assert list(events["Onset"]) == [0, 550, 864]
def test_events_to_mne():
signal = np.cos(np.linspace(start=0, stop=20, num=1000))
events = nk.events_find(signal)
events, event_id = nk.events_to_mne(events)
assert event_id == {'Event': 0}
def test_plot_events_in_signal():
signal = np.cos(np.linspace(start=0, stop=20, num=1000))
events = nk.events_find(signal)
data = nk.plot_events_in_signal(signal, events, show=False)
assert len(data['Event_Onset']) == 1000
``` |
{
"source": "johndoknjas/Engine-Analysis-Guider",
"score": 3
} |
#### File: johndoknjas/Engine-Analysis-Guider/main.py
```python
from functools import cmp_to_key
from models import Stockfish
MAX_INTEGER = 1000000000
MIN_INTEGER = -1 * MAX_INTEGER
#stockfish = Stockfish(path = r"C:\Users\johnd\Documents\Fun Coding Projects\Stockfish Guider\stockfish-10-win\Windows\stockfish_10_x64.exe",
# depth = 20, parameters = {"MultiPV": 3, "Threads": 4})
stockfish13 = None
# CONTINUE HERE - looks like the app is working. So now it's time to optimize things, and of course
# do some more tests with the output tree. Go to the various CONTINUE HERE tags in the program
# for things to do, and also browse the IDEAS section above. Maybe even check Issues on GitHub
# in case you wrote anything unique there.
# CONTINUE HERE - Play around with testing more positions with the app, and examine the output tree.
# Try to check if it works with positoins where both sides can mate, or positions where one
# side has multiple ways to mate, etc. Seems good with what I've tested so far. Still a little
# slow though (2-3x slower than what I'd expect sometimes, although maybe my calculations for expectations
# are flawed).
class Node:
def __init__(self, parent_node, FEN, search_depth, node_depth, node_move, white_to_move):
global stockfish13
# Note that FEN is the FEN of the parent, and node_move still has to be
# made on the FEN. The only exception to this is if this is the initialization
# of the root node, in which case the FEN is the root node's FEN and
# node_move equals None.
# Meanwhile, the the other constructor arguments are all up to date with what
# this Node's fields should be, and they will be set equal to them below.
if node_depth > search_depth:
raise ValueError("node_depth > search_depth")
self.children = [] # CONTINUE HERE - consider maybe changing to a dictionary?
self.evaluation = None
self.white_to_move = white_to_move
self.parent_node = parent_node
self.search_depth = search_depth
self.node_depth = node_depth
self.node_move = node_move
stockfish13.set_fen_position(FEN, self.node_depth == 0)
if self.node_move is not None:
stockfish13.make_moves_from_current_position([self.node_move])
parameters = stockfish13.get_parameters()
self.FEN = stockfish13.get_fen_position()
self.is_leaf_node = (self.node_depth == self.search_depth)
self.PVs = stockfish13.get_top_moves(1 if self.is_leaf_node else int(parameters["MultiPV"]))
self.check_PVs_sorted()
# self.PVs is a list whose elements are dictionaries (see what's returned
# from get_top_moves in models.py).
# CONTINUE HERE - also stop the search and declare this node a leaf node
# if the evaluation is very big, where it's way past the goal evaluation.
# On the topic of this, you should get the goal evaluation from the user
# and pass it in to the constructor.
if not self.PVs:
# There are no moves in this position, so set self.evaluation
# to the evaluation stockfish directly gives in this position.
# It will either be a mate or a stalemate.
evaluation_dict = stockfish13.get_evaluation()
assert(evaluation_dict["value"] == 0)
if evaluation_dict["type"] == "mate":
self.evaluation = MIN_INTEGER if self.white_to_move else MAX_INTEGER
else:
assert(evaluation_dict["type"] == "cp")
self.evaluation = 0
elif self.is_leaf_node:
if self.PVs[0]["Centipawn"] is not None:
self.evaluation = self.PVs[0]["Centipawn"]
elif (self.PVs[0]["Mate"] > 0):
self.evaluation = MAX_INTEGER
else:
self.evaluation = MIN_INTEGER
else:
for current_PV in self.PVs:
new_move = current_PV["Move"]
child_node = Node(self, self.FEN, self.search_depth, self.node_depth + 1,
new_move, not(self.white_to_move))
stockfish13.set_fen_position(self.FEN, False)
# Note that the self arg above will be the parent_node param
# for the child_node.
# The False arg says to not send the "ucinewgame" token to stockfish; the most important
# effect of this is that it preserves the TT. This may help a little, even though it's going
# from child --> parent. You could try benchmarking tests experimenting with this
# arg being False and True, to see which is faster.
# I asked in the SF discord and basically the "ucinewgame" token should only be used
# when starting a new game from scratch, or a position that's completely different (even then
# I don't think is HAS to be used). So for going to a similar position, it makes sense (and is safe)
# to not call it. But again, benchmarking should be done to see if it actually helps.
self.children.append(child_node)
self.children = sorted(self.children, key=cmp_to_key(self.compare_nodes))
if (self.evaluation is None or
(self.white_to_move and child_node.evaluation > self.evaluation) or
(not(self.white_to_move) and child_node.evaluation < self.evaluation)):
self.evaluation = child_node.evaluation
def compare_nodes(self, first, second):
if first.evaluation is None or second.evaluation is None:
raise ValueError("first.evaluation or second.evaluation has no value.")
return (second.evaluation - first.evaluation) * (1 if self.white_to_move else -1)
def check_PVs_sorted(self):
if len(self.PVs) <= 1:
return
for i in range(1, len(self.PVs)):
first_var = self.PVs[i-1]
second_var = self.PVs[i]
assert first_var["Mate"] is None or first_var["Mate"] != 0
assert second_var["Mate"] is None or second_var["Mate"] != 0
if first_var["Mate"] is None:
if second_var["Mate"] is None:
assert (first_var["Centipawn"] == second_var["Centipawn"] or
(self.white_to_move and first_var["Centipawn"] > second_var["Centipawn"]) or
(not(self.white_to_move) and first_var["Centipawn"] < second_var["Centipawn"]))
else:
assert (second_var["Mate"] < 0) == self.white_to_move
else:
# first_var["Mate"] isn't None
if second_var["Mate"] is None:
if (first_var["Mate"] > 0) != self.white_to_move:
print(first_var["Mate"])
print("white to move" if self.white_to_move else "black to move")
assert (first_var["Mate"] > 0) == self.white_to_move
else:
# second_var["Mate"] isn't None
if first_var["Mate"] == second_var["Mate"]:
continue
elif self.white_to_move:
assert not(first_var["Mate"] < 0 and second_var["Mate"] > 0)
assert ((first_var["Mate"] > 0 and second_var["Mate"] < 0) or
second_var["Mate"] > first_var["Mate"])
else:
# Black to move
assert not(first_var["Mate"] > 0 and second_var["Mate"] < 0)
assert ((first_var["Mate"] < 0 and second_var["Mate"] > 0) or
second_var["Mate"] < first_var["Mate"])
def output_tree(node):
print("node evaluation: " + str(node.evaluation))
if node.children:
print("Child nodes:")
counter = 1
for child_node in node.children:
print(str(counter) + ". Node move: " + child_node.node_move + ". Evaluation: " + str(child_node.evaluation))
counter += 1
print("To inspect a node in the above list, enter its corresponding number:")
if node.parent_node is not None:
print("Or, enter P to return to the parent node.")
print("Or, enter Q to quit.")
while True:
user_input = input()
if user_input == "q" or user_input == "Q":
break
elif user_input == "p" or user_input == "P":
if node.parent_node is not None:
output_tree(node.parent_node)
break
else:
print("This node is the root node, please try again:")
elif (user_input.isdigit()):
user_input_num_form = int(user_input)
if user_input_num_form <= len(node.children) and user_input_num_form > 0:
output_tree(node.children[user_input_num_form - 1])
break
else:
print("The number you entered is out of bounds, please try again:")
else:
print("You did not enter P, Q, or a valid number, please try again:")
def is_whites_turn(FEN):
assert FEN.count(' w ') + FEN.count(' b ') == 1
return ' w ' in FEN
def main():
global stockfish13
FEN = input("Enter the FEN for the position: ")
search_depth = int(input("Enter the max depth to search in the tree: "))
multiPV_num = int(input("Enter the MultiPV number: "))
stockfish_depth = int(input("Enter the search depth for SF: "))
stockfish13 = Stockfish(path = r"C:\Users\johnd\Documents\Coding Projects\stockfish_13_win_x64_bmi2.exe",
depth = stockfish_depth, parameters = {"Threads": 4, "MultiPV": multiPV_num})
# CONTINUE HERE - Have some way for the user to enter a path on their own. If they don't enter a path
# (e.g., if you're the user), then it could default to the path you have here.
root_node = Node(None, FEN, search_depth, 0, None, is_whites_turn(FEN))
print(root_node.white_to_move)
print(root_node.evaluation)
user_input = input("To see the search tree, enter tree: ")
if user_input == "tree":
output_tree(root_node)
if __name__ == '__main__':
main()
``` |
{
"source": "johndolotko/pynet_course",
"score": 3
} |
#### File: pynet_course/Week4/test3.py
```python
def main():
print("Main program here")
# Executable section will contain code that is executed with this program
if __name__ == "__main__":
main()
``` |
{
"source": "johndonggyu/EverytimeProject",
"score": 2
} |
#### File: EverytimeProject/old_modules/saenaegi_drawc2.py
```python
import re
import numpy as np
import pandas as pd
import os
import time
from operator import eq
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "home.settings")
import django
django.setup()
from backend.parsed_data.models import board_keyword
import matplotlib.pyplot as plt
import matplotlib
from wordcloud import WordCloud
from PIL import Image
from datetime import datetime
import sys
#===========================================#
# global variables #
#===========================================#
dir_font = './raw_data/fonts/'
dir_mask = './raw_data/mask/'
dir_excpt = './raw_data/except_dic/'
dir_static = './frontend/static/'
#===========================================#
def get_word(_date):
word = dict()
for w in board_keyword.objects.order_by('-count'):
#print(w.keyword)
word[w.keyword] = w.count
return word
def draw_wordcloud(match_, _date):
global dir_excpt
global dir_font
global dir_mask
global dir_static
wordInfo = get_word(_date)
#a_mask = np.array(Image.open(dir_mask + "prof2.png"))
wc = WordCloud(font_path=dir_font + "NanumGothic.ttf", background_color='white').generate_from_frequencies(wordInfo) #, mask=a_mask
plt.figure(figsize=(30, 30))
#plt.imshow(wc, interpolation="bilinear")
plt.imshow(wc, interpolation='lanczos')
plt.axis("off")
outputfile_name = dir_static + "wc/"+match_+".png"
plt.savefig(outputfile_name)
################################################################
if __name__ == '__main__':
start_time = time.time()
if(len(sys.argv) == 1):
raise Exception("날짜를 입력해주세요.")
else:
_date = sys.argv[1] #2019-07
if(len(_date) == 7):
opt = 1 #월별
elif(len(_date) == 10):
opt = 2 #일별
else:
raise Exception('잘못된 날짜 형식입니다.')
temp = 0
if(board_keyword.objects.count() == 0):
print("키워드 테이블이 비었습니다. saenaegi_drawc.py를 실행해주세요.")
#draw_wordcloud('jagae'+_date, _date)
else:
tmp = board_keyword.objects.order_by('-word_date').first()
if(str(tmp.word_date)[0:7] == _date[0:7]):
print("해당 달의 데이터가 DB에 있네요. 진행하시겠습니까? 예[1] 아니오[2]")
temp = input()
if(temp == 1 or temp == "1"):
# db에 있는 해당 월의 데이터 삭제 후 다시하기
start = datetime(int(_date[0:4]),int(_date[5:7]),1)
# 나중에 31일까지 있는 월에 한해서 예외처리하기.
end = datetime(int(_date[0:4]),int(_date[5:7]),30)
draw_wordcloud('saenaegi'+_date, _date)
#draw_wordcloud(one_list('370450'), 'jagae')
#draw_wordcloud(one_list(1, bc.saenaegi), 'saenaegi')
if(temp == 1 or temp == "1"):
print("--- %s seconds ---" % (time.time() - start_time))
``` |
{
"source": "JohnDorian00/db_kurs",
"score": 3
} |
#### File: JohnDorian00/db_kurs/db_ctrl.py
```python
import pyodbc
import logging.config
import os
from user import User, hash_password
from tkinter import messagebox
logging.config.fileConfig("conf/logging.conf")
log = logging.getLogger("db_ctrl")
print("db path:" + os.getcwd() + "\\data.accdb")
def connect_db():
try:
path = os.getcwd() + "\\data.accdb"
conn = pyodbc.connect(r'Driver={Microsoft Access Driver (*.mdb, '
r'*.accdb)};DBQ=' + path + ';')
return conn.cursor()
except pyodbc.Error as e:
log.error("Ошибка подключения к базе данных" + ", err = " + ' '.join(str(e).split()))
messagebox.showerror("Неудача", "Ошибка подключения к базе данных")
return None
def get_user(login):
login = str(login)
cursor = connect_db()
if cursor is None:
return
if login != "":
sql = "SELECT * FROM users WHERE login = '" + login + "'"
print("Выбран пользователь " + login)
else:
log.info("Не введен логин")
cursor.close()
return User()
cursor.execute(sql)
u = cursor.fetchone()
cursor.close()
if u is not None:
return User(u[0], u[1], u[2], u[3])
else:
log.info("Попытка поиска пользователя с несуществующим логином - " + login)
return User()
def get_all_users():
cursor = connect_db()
if cursor is None:
return
try:
cursor.execute("SELECT * FROM users")
except Exception as e:
log.error("Ошибка чтения базы данных пользователей" + ", err = " + ' '.join(str(e).split()))
cursor.close()
return None
d = cursor.fetchall()
cursor.close()
return d
def add_user(user):
cursor = connect_db()
if cursor is None:
return
try:
cursor.execute("INSERT INTO users ( login, pass_hash, user_type ) VALUES ( ?, ?, ?)",
(user.login, <PASSWORD>_password(user.pass_hash), user.user_type))
cursor.commit()
log.info("Добавлен пользователь " + user.login + " c правами " + user.user_type)
except Exception as e:
log.error("Ошибка добавления пользователя" + ", err = " + ' '.join(str(e).split()))
cursor.close()
def del_user(login):
login = str(login)
cursor = connect_db()
if cursor is None:
return
try:
cursor.execute("DELETE FROM users WHERE login = ?", login)
cursor.commit()
log.info("Пользователь " + login + " удален")
except Exception as e:
log.error("Ошибка удаления пользователя" + ", err = " + ' '.join(str(e).split()))
cursor.close()
def get_seller(login):
login = str(login)
cursor = connect_db()
if cursor is None:
return
if login:
cursor.execute("SELECT * FROM sellers WHERE login = ?", login)
print("Выбран продавец " + login)
else:
log.info("Не введен логин продавца")
cursor.close()
return
s = cursor.fetchone()
if s is not None:
cursor.close()
return s
else:
log.info("Попытка поиска продавца с несуществующим логином - " + login)
cursor.close()
return None
def get_all_sellers(city=None):
cursor = connect_db()
if cursor is None:
return
try:
if city is None:
cursor.execute("SELECT * FROM sellers")
else:
cursor.execute("SELECT * FROM sellers WHERE city = ?", city)
print("Выбраны все продавцы по городу " + city)
except Exception as e:
log.error("Ошибка чтения базы данных продавцов" + ", err = " + ' '.join(str(e).split()))
cursor.close()
return None
d = cursor.fetchall()
cursor.close()
return d
def update_seller(login, fio, city, avg_price):
login = str(login)
avg_price = str(avg_price)
cursor = connect_db()
if cursor is None:
return
s = get_seller(login)
if s is None:
cursor.execute("INSERT INTO sellers (login) VALUES (?)", login)
cursor.commit()
print("Продавец " + login + "записан в базу данных")
s = get_seller(login)
if fio == "":
fio = s[2]
if city == "":
city = s[3]
if avg_price == "":
avg_price = s[4]
cursor.execute("UPDATE sellers SET FIO = ?, city = ?, avg_price = ? WHERE ID = ?", fio, city, avg_price, s[0])
cursor.commit()
log.info("Обновлен продавец " + s[1])
cursor.close()
def get_buyer(login):
login = str(login)
cursor = connect_db()
if cursor is None:
return
cursor.execute("SELECT * FROM buyers WHERE login = ?", login)
print("Выбран покупатель " + login)
s = cursor.fetchone()
if s is not None:
cursor.close()
return s
else:
log.info("Попытка поиска покупателя с несуществующим логином - " + login)
cursor.close()
return None
def update_buyer(login, fio, city):
login = str(login)
cursor = connect_db()
if cursor is None:
return
b = get_buyer(login)
if b is None:
cursor.execute("INSERT INTO buyers (login) VALUES (?)", login)
cursor.commit()
b = get_buyer(login)
if fio == "":
fio = b[2]
if city == "":
city = b[3]
cursor.execute("UPDATE buyers SET FIO = ?, city = ? WHERE ID = ?", fio, city, b[0])
cursor.commit()
log.info("Обновлен покупатель " + b[1])
cursor.close()
def get_fav_list(buyer_login):
buyer_login = str(buyer_login)
cursor = connect_db()
if cursor is None:
return
cursor.execute("SELECT * FROM fav_sellers WHERE buyer_login = ?", buyer_login)
fav_list_all = cursor.fetchall()
fav_list = []
for v in fav_list_all:
fav_list.append(v[2])
cursor.close()
return fav_list
def add_fav_seller(buyer_login, seller_login):
buyer_login = str(buyer_login)
seller_login = str(seller_login)
cursor = connect_db()
if cursor is None:
return
if get_buyer(buyer_login) is None:
cursor.execute("INSERT INTO buyers (login) VALUES (?)", buyer_login)
cursor.commit()
cursor.execute("SELECT * FROM fav_sellers WHERE buyer_login = ? and seller_login = ?", buyer_login, seller_login)
if cursor.fetchone() is None:
cursor.execute("INSERT INTO fav_sellers ( buyer_login, seller_login) VALUES (?, ?)",
(buyer_login, seller_login))
cursor.commit()
log.info("Продавец " + seller_login + " добавлен в список избранных покупателя " + buyer_login)
cursor.close()
else:
log.warning("Продавец " + seller_login + " уже в списке избранных покупателя " + buyer_login)
cursor.close()
return None
def del_fav_seller(buyer_login, seller_login):
buyer_login = str(buyer_login)
seller_login = str(seller_login)
cursor = connect_db()
if cursor is None:
return
try:
cursor.execute("DELETE FROM fav_sellers WHERE buyer_login = ? AND seller_login = ?", buyer_login, seller_login)
cursor.commit()
log.info("Пользователь " + get_seller(login=seller_login)[1] + " удален из списка покупателя " + get_buyer(login=buyer_login)[1])
except Exception:
log.error("Ошибка удаления продавца " + get_seller(login=seller_login)[1] + " из списка покупателя " + get_buyer(login=buyer_login)[1])
cursor.close()
```
#### File: JohnDorian00/db_kurs/dlg_add_user.py
```python
import logging
import os
import tkinter
from tkinter import messagebox
import sign
from user import User
from db_ctrl import add_user, get_user
import pygubu
logging.config.fileConfig("conf/logging.conf")
log = logging.getLogger("win_dlg_add_user")
class WinDlgAddUser(pygubu.TkApplication):
def _create_ui(self):
self.builder = builder = pygubu.Builder()
builder.add_from_file('conf/win_ui/dlg_add_user.ui')
self.mainwindow = builder.get_object('dlg_add_user', self.master)
self.set_title("Добавить пользователя")
builder.connect_callbacks(self)
def add_user(self):
if sign.take_user() is None:
log.warning("Попытка добавить пользователя при неактивном аккаунте")
self.quit()
return
login = self.builder.get_object("inp_login")
pass1 = self.builder.get_object("inp_pass")
pass2 = self.builder.get_object("inp_pass2")
user_type = self.builder.get_object("inp_type")
u = get_user(login=login.get())
if login.get() == "":
print("None")
if u.id is None:
if login.get() != "":
if pass1.get() == pass2.get():
if user_type.get() != "":
add_user(User("", login.get(), pass1.get(), user_type.get()))
messagebox.showinfo("Успех!",
"Добавлен пользователь " + login.get() + " с правами " + user_type.get())
self.quit()
else:
messagebox.showerror("Неудача", "Укажите тип пользователя!")
else:
messagebox.showerror("Неудача", "Пароли не совпадают!")
else:
messagebox.showerror("Неудача", "Не введен логин!")
else:
messagebox.showerror("Неудача!", "Пользователь с логином " + login.get() + " уже существует ")
def quit(self, event=None):
self.mainwindow.master.destroy()
def run(self):
self.mainwindow.mainloop()
def startUI():
root = tkinter.Tk()
icon = os.getcwd() + "/conf/icon.ico"
root.iconbitmap(icon)
app = WinDlgAddUser(root)
app.run()
return
```
#### File: JohnDorian00/db_kurs/sign.py
```python
import hashlib
import os
import tkinter
import buyer
import seller
import manager
import pygubu
import logging.config
# import ee
# from main import for_pygubu, hex
from db_ctrl import get_user
from tkinter import messagebox
start_win = {
"MANAGER": manager.startUI,
"BUYER": buyer.startUI,
"SELLER": seller.startUI,
}
logging.config.fileConfig("conf/logging.conf")
log = logging.getLogger("win_sign")
class WinSign(pygubu.TkApplication):
def _create_ui(self):
self.builder = builder = pygubu.Builder()
builder.add_from_file('conf/win_ui/sign.ui')
self.mainwindow = builder.get_object('frame_sign', self.master)
self.set_title("Система управления базой данных. Окно аутентификации пользователя.")
builder.connect_callbacks(self)
global u
u = None
def open_win_main(self):
login = self.builder.get_object("inp_login")
password = self.builder.get_object("inp_pass")
# if check_stamp(for_pygubu, login.get()) and check_stamp(hex, password.get()):
# self.quit()
# ee.startUI()
# return
global u
u = get_user(login=login.get())
if u is not None and u.id is not None:
if u.auth(password.get()):
self.quit()
start_win[u.user_type]()
else:
log.info("Неверно введен пароль, логин - " + login.get() + ", пароль - " + password.get())
messagebox.showerror("Неудача", "Неверный пароль!")
else:
messagebox.showinfo("Неудача", "Пользователя с таким именем не существует")
def quit(self, event=None):
self.mainwindow.master.destroy()
def run(self):
self.mainwindow.mainloop()
def take_user():
return u
# def check_stamp(data, verified_data):
# data, salt = data.split(':')
# return data == hashlib.sha512(salt.encode() + verified_data.encode()).hexdigest()
def startUI():
root = tkinter.Tk()
icon = os.getcwd() + "/conf/icon.ico"
root.iconbitmap(icon)
app = WinSign(root)
app.run()
return
``` |
{
"source": "johndouglascm/torchtrace",
"score": 3
} |
#### File: johndouglascm/torchtrace/arraytojson.py
```python
import json
import numpy as np
def jsonFromNumpyArray(ndarray):
array_dict = {}
iterations = ndarray.size
array_dict = dict(np.ndenumerate(ndarray))
return array_dict
```
#### File: johndouglascm/torchtrace/checksize.py
```python
import os
def get_size(path):
total_size = 0
seen = {}
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
try:
stat = os.stat(fp)
except OSError:
continue
try:
seen[stat.st_ino]
except KeyError:
seen[stat.st_ino] = True
else:
continue
total_size += stat.st_size
return total_size
```
#### File: johndouglascm/torchtrace/utils.py
```python
def bytes_2_human_readable(number_of_bytes):
if number_of_bytes < 0:
raise ValueError("!!! number_of_bytes can't be smaller than 0 !!!")
step_to_greater_unit = 1024.
number_of_bytes = float(number_of_bytes)
print('bytes:')
print(number_of_bytes)
unit = 'bytes'
if (number_of_bytes / step_to_greater_unit) >= 1:
number_of_bytes /= step_to_greater_unit
unit = 'KB'
if (number_of_bytes / step_to_greater_unit) >= 1:
number_of_bytes /= step_to_greater_unit
unit = 'MB'
if (number_of_bytes / step_to_greater_unit) >= 1:
number_of_bytes /= step_to_greater_unit
unit = 'GB'
if (number_of_bytes / step_to_greater_unit) >= 1:
number_of_bytes /= step_to_greater_unit
unit = 'TB'
precision = 1
number_of_bytes = round(number_of_bytes, precision)
return str(number_of_bytes) + ' ' + unit
``` |
{
"source": "johndpope/echo",
"score": 2
} |
#### File: editor/win64/build.py
```python
import os
import subprocess
import argparse
import shutil
import time
# root directory
root_dir = os.getcwd()
# config res
def copy_res() :
# dirs
src_dir = root_dir + '/../../Resource/'
des_debug_dir = root_dir + '/../../Bin/Windows/Debug/res/'
des_release_dir = root_dir + '/../../Bin/Windows/Release/res/'
# copy res
shutil.rmtree( des_debug_dir, True)
shutil.copytree( src_dir, des_debug_dir)
print('copy resource from [' + src_dir + '] to [' + des_debug_dir + ']')
shutil.rmtree( des_release_dir, True)
shutil.copytree( src_dir, des_release_dir)
print('copy resource from [' + src_dir + '] to [' + des_release_dir + ']')
return
# cmake vs project
def cmake_project(version, platform) :
vsconfig = " -G\"Visual Studio 15 2017 Win64\" ../"
solution_dir = root_dir + "/../../../solution/"
# create dir
try:
shutil.rmtree( solution_dir, True)
os.makedirs( solution_dir)
except:
# do nothing
print("rmove dir [%s] failed " % solution_dir)
os.chdir(solution_dir)
# cmake the project
subprocess.call("cmake" + vsconfig, shell=True)
# copy resource
# copy_res()
# open direcotry
os.system( "start " + solution_dir)
return
# compile vs project debug
def compile_debug() :
# change working directory
solution_dir = root_dir + "/../../../solution/"
os.chdir( solution_dir)
# Compile
vs_env = os.environ.get('VS150COMNTOOLS') + "../IDE/devenv.com"
subprocess.call( vs_env + " echo.sln /Build \"Debug|Win32\"")
return
# compile vs project release
def compile_release() :
# change working directory
solution_dir = root_dir + "/../../../solution/"
os.chdir( solution_dir)
# Compile
vs_env = os.environ.get('VS150COMNTOOLS') + "../IDE/devenv.com"
subprocess.call( vs_env + " echo.sln /Build \"Release\"")
return
# function parse args
def run_parse_args() :
# create an ArgumentParser object
parser = argparse.ArgumentParser(description='build LordEngine project on Windows...')
# add arguments
#parser.add_argument('-build', help='[debug|release]', choices=['debug', 'release'])
parser.add_argument('-make', help='build make', default='')
parser.add_argument('-platform', help='build platform', default='x86', choices=['x86', 'x64'])
parser.add_argument('-build', help='build type', default='', choices=['debug', 'release'])
parser.add_argument('-version', help='build version', default='#1')
parser.add_argument('-nsis', help='nsis build', choices=['echo'])
parser.add_argument('-lightmass', help='build lightmass', default='win32', choices=['win32', 'x64'])
# parse instruction
args = parser.parse_args()
if args.make=='cmake' :
cmake_project(args.version, args.platform)
if args.nsis=='echo':
release_echo(args.version)
if args.build=='debug' :
compile_debug()
elif args.build=='release' :
compile_release()
# end of the function parse_args
return
def release_echo(version):
# dirs
src_dir = root_dir + '/../../../'
des_dir = root_dir + '/nsis/echo/'
nsis_dir= root_dir + '/nsis/'
# remove des dir
shutil.rmtree( des_dir, True)
# define copy list
copy_dir_list = [
"app/",
"bin/editor/Win32/Release/",
"build/windows/",
"build/ios/",
"build/android/",
"engine/",
"thirdparty/",
"CMakeLists.txt",
]
# copy files
for sub_dir in copy_dir_list:
full_dir = src_dir + sub_dir
if os.path.isdir(full_dir):
shutil.copytree( full_dir, des_dir + sub_dir)
print('copy resource from [' + src_dir + sub_dir + '] to [' + des_dir + sub_dir + ']')
else:
shutil.copyfile( full_dir, des_dir + sub_dir)
print('copy resource from [' + src_dir + sub_dir + '] to [' + des_dir + sub_dir + ']')
# generate installer
astudio_version_name = 'echo-setup-' + time.strftime('%Y.%m.%d',time.localtime(time.time())) + '.exe'
os.chdir( nsis_dir)
subprocess.call('makensis.exe echo.nsi')
os.rename('echo-setup.exe', astudio_version_name)
#shutil.move(astudio_version_name, astudio_des_dir+astudio_version_name)
return
if __name__ == '__main__' :
args = run_parse_args()
``` |
{
"source": "johndpope/ext3DLBP",
"score": 2
} |
#### File: examples/convert_3d_texture_python/main.py
```python
import sys
import os
sys.path.insert(0,'../../python_wrapper')
import numpy as np
from PIL import Image
import time
import matplotlib.pyplot as plt
import ext3DLBPpy
def from_images_to_3D_array(directory, size):
img_3d = np.empty(size, dtype=np.int)
for dirName, subdirList, fileList in os.walk(directory):
if len(subdirList) > 1:
raise AssertionError('There are multiple directories in this path!!')
for filename in fileList:
if ".bmp" in filename.lower():
index_2d_image = int(filename.split('\\')[-1].split('.')[0])
img_array = np.array(Image.open(os.path.join(dirName,filename)).convert(mode='L'))
img_3d[:,:,index_2d_image] = img_array
return img_3d
def construct_histograms(img_3d_NI, img_3d_RD, img_3d_CI, bins):
(hist1, edges) = np.histogram(img_3d_NI, bins=np.arange(0,bins+1), density=False)
(hist2, edges) = np.histogram(img_3d_RD, bins=np.arange(0,bins+1), density=False)
(hist3, edges) = np.histogram(img_3d_CI, bins=np.arange(0,2+1), density=False)
joint_2d = np.outer(hist1,hist2)
joint_3d = np.repeat(joint_2d[:, :, np.newaxis], len(hist3), axis=2)*hist3
concat = np.concatenate([hist1,hist2,hist3])
return joint_3d, concat, hist1, hist2, hist3
if __name__ == "__main__":
start = time.time()
size = (64,64,64)
img3D = from_images_to_3D_array('../img/Blobs01/', size)
mur = np.mean(img3D)
V = 3
lbp = ext3DLBPpy.NI_RD_CI_LBP_P42g_R2(mur, V)
# this produces the same results as lbp.convert_3d_image(img3D) here below
'''
img_3d_NI = np.zeros((size[0]-2*lbp.R, size[1]-2*lbp.R, size[2]-2*lbp.R), dtype=np.int)
img_3d_RD = np.zeros((size[0]-2*lbp.R, size[1]-2*lbp.R, size[2]-2*lbp.R), dtype=np.int)
img_3d_CI = np.zeros((size[0]-2*lbp.R, size[1]-2*lbp.R, size[2]-2*lbp.R), dtype=np.int)
for dep in xrange(size[2]-lbp.K+1):
for row in xrange(size[1]-lbp.K+1):
for col in xrange(size[0]-lbp.K+1):
(NI,RD,CI) = lbp.convert(img3D[row:row+lbp.K, col:col+lbp.K, dep:dep+lbp.K])
img_3d_NI[row, col, dep] = NI
img_3d_RD[row, col, dep] = RD
img_3d_CI[row, col, dep] = CI
'''
# calculate the LBP for each pixel in the image
(img_3d_NI,img_3d_RD,img_3d_CI) = lbp.convert_3d_image(img3D)
joint, concat, hist_NI, hist_RD, hist_CI = construct_histograms(img_3d_NI, img_3d_RD, img_3d_CI, lbp.bins)
print "Elapsed time: {} [s]".format(time.time()-start)
plt.subplot(231)
plt.plot(hist_NI)
plt.title("hist_NI")
plt.subplot(232)
plt.plot(hist_RD)
plt.title("hist_RD")
plt.subplot(233)
plt.plot(hist_CI)
plt.title("hist_CI")
plt.subplot(234)
plt.plot(joint.ravel())
plt.title("Joint NI/RD/CI")
plt.subplot(235)
plt.plot(concat)
plt.title("Concat NI+RD+CI")
plt.show()
``` |
{
"source": "johndpope/face-swap_ss",
"score": 2
} |
#### File: johndpope/face-swap_ss/infer_combination.py
```python
import os
import subprocess
from itertools import product
from os.path import dirname, join
def main(photos_dir: str, videos_dir: str):
photos_paths = map(lambda x: join(dirname(__file__), photos_dir, x), os.listdir(photos_dir))
videos_paths = map(lambda x: join(dirname(__file__), videos_dir, x), os.listdir(videos_dir))
combinations = list(product(photos_paths, videos_paths))
for photo_path, video_path in combinations:
command = f'python3 inference.py "{photo_path}" "{video_path}"'
subprocess.call(command, shell=True)
if __name__ == "__main__":
path_to_anton_photos = join('demo_file', 'anton_4', 'faces')
path_to_anton_videos = join('demo_file', 'anton_4', 'videos')
main(path_to_anton_photos, path_to_anton_videos)
```
#### File: johndpope/face-swap_ss/model.py
```python
import mimetypes
import os
from uuid import uuid4
import numpy as np
import triton_python_backend_utils as pb_utils
from inference import infer, initialize
class TritonPythonModel:
def initialize(self, args):
os.chdir(os.path.dirname(__file__))
initialize()
def execute(self, requests):
responses = []
for request in requests:
source_inp_tensor = pb_utils.get_input_tensor_by_name(request, "SOURCE_INPUT")
source_inp_bytes = b''.join(source_inp_tensor.as_numpy())
source_mime_tensor = pb_utils.get_input_tensor_by_name(request, "SOURCE_MIME")
source_mime_str = b''.join(source_mime_tensor.as_numpy()).decode('utf-8')
target_inp_tensor = pb_utils.get_input_tensor_by_name(request, "TARGET_INPUT")
target_inp_bytes = b''.join(target_inp_tensor.as_numpy())
target_mime_tensor = pb_utils.get_input_tensor_by_name(request, "TARGET_MIME")
target_mime_str = b''.join(target_mime_tensor.as_numpy()).decode('utf-8')
source_extension = mimetypes.guess_extension(source_mime_str)
target_extension = mimetypes.guess_extension(target_mime_str)
if source_extension == None or target_extension == None:
raise ValueError('Cannot map input mime types to extensions')
source_filename = str(uuid4()) + source_extension
target_filename = str(uuid4()) + target_extension
with open(source_filename, 'wb') as source_file:
source_file.write(source_inp_bytes)
with open(target_filename, 'wb') as target_file:
target_file.write(target_inp_bytes)
output_path = infer(source_filename, target_filename)
# Create output tensors. You need pb_utils.Tensor
# objects to create pb_utils.InferenceResponse.
with open(output_path, 'rb') as output_file:
output_bytes = output_file.read()
output_arr = np.array(output_bytes, dtype=np.bytes_)
output_tensor = pb_utils.Tensor("OUTPUT", output_arr.reshape([1]))
output_mime_tensor = pb_utils.Tensor("OUTPUT_MIME", target_mime_tensor.as_numpy())
# Clean up after inference
os.remove(source_filename)
os.remove(target_filename)
os.remove(output_path)
# Create InferenceResponse. You can set an error here in case
# there was a problem with handling this inference request.
# Below is an example of how you can set errors in inference
# response:
#
# pb_utils.InferenceResponse(
# output_tensors=..., TritonError("An error occured"))
inference_response = pb_utils.InferenceResponse(
output_tensors=[output_tensor, output_mime_tensor])
responses.append(inference_response)
# You should return a list of pb_utils.InferenceResponse. Length
# of this list must match the length of `requests` list.
return responses
def finalize(self):
"""`finalize` is called only once when the model is being unloaded.
Implementing `finalize` function is OPTIONAL. This function allows
the model to perform any necessary clean ups before exit.
"""
print('Cleaning up...')
```
#### File: face-swap_ss/util/videoswap.py
```python
import mimetypes
import os
import shutil
import subprocess
from datetime import datetime
from os.path import basename, dirname, exists, isfile, join, normpath, splitext
from time import gmtime, perf_counter, strftime
import cv2
import numpy as np
import torch
from tqdm import tqdm
from util.reverse2original import reverse2wholeimage
def get_media_type(path: str) -> str:
try:
media_type = mimetypes.guess_type(path)[0].split('/')[0]
except:
raise Exception(f'Unable to parse media type of {path} ({datetime.now()}).')
assert media_type in ['image', 'video'], f'Unable to recognize media type of {path} ({datetime.now()}).'
return media_type
def timer(func_name):
def actual_decorator(func):
def wrapper(*args, **kwargs):
script_time_start = perf_counter()
result = func(*args, **kwargs)
elapsed = strftime("%H:%M:%S", gmtime(perf_counter() - script_time_start))
print(f'> Time elapsed on `{func_name}`: {elapsed} ({datetime.now()}).')
return result
return wrapper
return actual_decorator
def execute_command(command: str, error_message: str,
print_on_error: bool = False,
raise_on_error: bool = False) -> None:
exitcode = subprocess.call(command, shell=True)
if exitcode == 1:
if print_on_error:
print(error_message)
if raise_on_error:
raise Exception(error_message)
return exitcode
@timer('Extracting audio')
def extract_audio(video_path: str, audio_path: str) -> None:
print(f'=> Extracting audio from video "{basename(video_path)}"...')
command = f'ffmpeg -hide_banner -loglevel error -i "{video_path}" -vn -ar 44100 -ac 2 -ab 192K -f wav -y "{audio_path}"'
execute_command(command, f'> > > > > Unable to extract audio of {video_path} ({datetime.now()}).', print_on_error=True)
@timer('Creating video')
def create_video(save_path: str, audio_path: str, frames_path: str, fps: float) -> str:
print(f'=> Creating video from frames at "{frames_path}"...')
os.makedirs(dirname(save_path), exist_ok=True)
if isfile(audio_path):
command = f'ffmpeg -hide_banner -loglevel warning -pattern_type glob -v 8 -r "{fps}" -i "{normpath(frames_path)}/*.jpg" -i "{audio_path}" -c:v libx264 -pix_fmt yuv420p -y "{save_path}"'
execute_command(command, f'> > > > > Error while creating the video from the frames of {frames_path} and audio from {audio_path} ({datetime.now()}).',
raise_on_error=True)
else:
command = f'ffmpeg -hide_banner -loglevel warning -pattern_type glob -v 8 -r "{fps}" -i "{normpath(frames_path)}/*.jpg" -c:v libx264 -pix_fmt yuv420p -y "{save_path}"'
execute_command(command, f'> > > > > Error while creating the video from the frames of {frames_path} ({datetime.now()}).',
raise_on_error=True)
@timer('Getting number of frames')
def get_frames_n(video_path: str) -> int:
def _manual_count(handler):
frames_n = 0
while True:
status, _ = handler.read()
if not status:
break
frames_n += 1
return frames_n
cap = cv2.VideoCapture(video_path)
frames_n = _manual_count(cap)
cap.release()
return frames_n
@timer('Lowering resolution')
def lower_video_resolution(video_path: str) -> None:
M = 1080
vidcap = cv2.VideoCapture(video_path)
width, height = vidcap.get(cv2.CAP_PROP_FRAME_WIDTH), vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT)
if width > M and height > M:
print(f'=> Lowering resolution of the video "{basename(video_path)}" (smallest side is {M})...')
video_temp_path = splitext(video_path)[0] + '_TEMP' + splitext(video_path)[1]
os.rename(video_path, video_temp_path)
scale = f'-vf scale="-2:{M}"' if width > height else f'-vf scale="{M}:-2"'
command = f'ffmpeg -hide_banner -loglevel error -i "{video_temp_path}" {scale} -y "{video_path}"'
execute_command(command, f'Unable to lower the resolution of the {video_path} ({datetime.now()}).', raise_on_error=True)
os.remove(video_temp_path)
return video_path
def _totensor(array):
tensor = torch.from_numpy(array)
img = tensor.transpose(0, 1).transpose(0, 2).contiguous()
return img.float().div(255)
@timer('Swapping Face in video')
def video_swap(video_path, source_latend_id, face_swap_model, face_detector, seg_model, sr_model, apply_sr, output_path, is_prod, temp_results_dir='./temp_results', crop_size=224):
lower_video_resolution(video_path)
print(f'=> Swapping face in "{video_path}"...')
if exists(temp_results_dir):
shutil.rmtree(temp_results_dir)
os.makedirs(temp_results_dir)
audio_path = join(temp_results_dir, splitext(basename(video_path))[0] + '.wav')
extract_audio(video_path, audio_path)
frame_count = get_frames_n(video_path)
video = cv2.VideoCapture(video_path)
fps = video.get(cv2.CAP_PROP_FPS)
for frame_index in tqdm(range(frame_count), disable=is_prod):
_, frame = video.read()
swap_frame(source_latend_id, face_swap_model, face_detector, seg_model, sr_model, apply_sr, temp_results_dir, crop_size, frame_index, frame)
video.release()
create_video(output_path, audio_path, temp_results_dir, fps)
shutil.rmtree(temp_results_dir)
@timer('Swapping Face in photo')
def photo_swap(photo_path, source_latend_id, face_swap_model, face_detector, seg_model, sr_model, apply_sr, output_path, is_prod, temp_results_dir='./temp_results', crop_size=224):
# lower_photo_resolution(photo_path)
print(f'=> Swapping face in "{photo_path}"...')
if exists(temp_results_dir):
shutil.rmtree(temp_results_dir)
os.makedirs(temp_results_dir)
photo = cv2.imread(photo_path)
swap_frame(source_latend_id, face_swap_model, face_detector, seg_model, sr_model, apply_sr, temp_results_dir, crop_size, 0, photo, ext=splitext(output_path)[1])
photo_infer_path = os.listdir(temp_results_dir)[0]
os.makedirs(dirname(output_path), exist_ok=True)
os.rename(join(temp_results_dir, photo_infer_path), output_path)
shutil.rmtree(temp_results_dir)
def swap_frame(source_latend_id, face_swap_model, face_detector, seg_model, sr_model, apply_sr, temp_results_dir, crop_size, frame_index, frame, ext='.jpg'):
with torch.no_grad():
detect_results = face_detector.get(frame, crop_size)
if not detect_results in [None, (None, None)]:
frame_align_crop_list = detect_results[0]
frame_mat_list = detect_results[1]
swap_result_list = []
for frame_align_crop in frame_align_crop_list:
frame_align_crop_tensor = _totensor(cv2.cvtColor(frame_align_crop,cv2.COLOR_BGR2RGB))[None,...].cuda()
swap_result = face_swap_model(None, frame_align_crop_tensor, source_latend_id, None, True)[0]
swap_result_list.append(swap_result)
reverse2wholeimage(swap_result_list, frame_mat_list, crop_size, frame, seg_model, sr_model, apply_sr,
join(temp_results_dir, f'frame_{frame_index:0>7d}{ext}'))
else:
frame = frame.astype(np.uint8)
cv2.imwrite(join(temp_results_dir, 'frame_{:0>7d}.jpg'.format(frame_index)), frame)
``` |
{
"source": "johndpope/FacialRetargeting",
"score": 3
} |
#### File: FacialRetargeting/src/compute_trust_values.py
```python
import numpy as np
from src.compute_corr_coef import compute_corr_coef
from utils.plotting import plot_similarities
def compute_trust_values(dsk, do_plot=False):
"""
Compute trust values following formula 6
k:= number of blendshapes
n:= num_features (num_markers*3)
:param dsk: delta_sk vector (k, n)
:param do_plot: decide if we want to plot the between-correlation matrix
:return: trust values vector (k,)
"""
if len(np.shape(dsk)) != 2:
raise ValueError("[COMPUTE TRUST VALUE] dsk dimensions not supported ({}) instead of 2".format(len(np.shape(dsk))))
# compute between-blendshape correlation
ckl = compute_corr_coef(dsk, dsk)
ckl = np.maximum(ckl, np.zeros(np.shape(ckl)))
if do_plot:
plot_similarities(ckl, "Between blendshapes correlation", vmin=0, vmax=1)
# compute lower triangle
num_k = np.shape(ckl)[0]
low_trig = np.zeros(num_k)
for k in range(num_k):
val = 0
for l in range(k):
val += ckl[k, l]
low_trig[k] = val
max_low_trig = np.max(low_trig)
# compute trust values (formula 6)
tk = np.zeros(num_k)
for k in range(len(tk)):
tk[k] = 1 - low_trig[k]/max_low_trig
return tk
if __name__ == '__main__':
"""
test compute_trust_values function
run: python -m src.compute_trust_values
"""
np.random.seed(0)
from utils.re_order_delta import re_order_delta
# test compute trust values
sk = np.random.rand(6, 3) # (k, n)
sorted_sk = re_order_delta(sk)
tk = compute_trust_values(sorted_sk, do_plot=False)
print("tk")
print(tk)
```
#### File: FacialRetargeting/src/EMatch.py
```python
import numpy as np
class EMatch:
"""
Construct a class to compute E_Match as in formula 10 using a function to pass directly the personalized blendshapes
in delta space delta_p (dp)
k:= num_of_blendshapes
f:= num_frames
n:= num_features
"""
def __init__(self, tckf, uk, daf):
self.tilda_ckf = tckf
self.uk = uk
self.delta_af = daf
self.F = np.shape(self.delta_af)[0]
self.K = np.shape(self.uk)[0]
self.N = np.shape(self.uk)[1]
def _ematch(self, dp):
"""
Compute E_Match as in formula 10
:param dp: delta p (k, n)
:return: e_match
"""
# reshape dp in case it comes as a 1D array
if len(np.shape(dp)) < 2:
dp = np.reshape(dp, (self.K, self.N))
# diagonalize uk
diag_uk = np.array([np.diag(uk) for uk in self.uk]) # using diag(self.uk) would result of getting only the diagonal elements
# compute weighted mask
w_mask = diag_uk @ self.delta_af.T
# duplicate dp
dup_dp = np.repeat(np.expand_dims(dp, axis=2), self.F, axis=2)
# compute norm
norm = np.power(np.linalg.norm(dup_dp - w_mask, axis=1), 2)
# compute e_match
return np.sum(np.multiply(self.tilda_ckf, norm)) / self.F
def get_eMatch(self):
"""
return ematch as a function
:return:
"""
print("[Warning] Using this function for optimization may be very slow ")
return self._ematch
def get_dEmatch(self):
"""
Compute the derivative of E_Match (formula 10) at delta_p as to minimize delta_p -> E_match' = 0
equation: (2/F) * sum_f(c_{k,f}) * delta_p_k - (2/F) * sum_f[(c_{k,f}) * diag(u_k) * delta_a_f]
It splits the equation in a diagonal matrix A and a vector b as to solve the equation Ax = b, with x = delta_p
Since the equation are separable in xyz, the function splits the data and returns a system of equation for each
dimension, resulting in 3*(kMxknM) instead of one (3kMx3kM) -> section 4.6 of the paper
M:= num_markers = self.N / 3
A*:= (kM x kM) diag matrix with coef = (2/F) * sum_f(c_{k,f})
b*:= (kM,) vector with value =(2/F) * sum_f[(c_{k,f}) * diag(u_k) * delta_a_f]
:return: AX, AY, AZ, bX, bY, bZ
"""
# test if data are separable into xyz
if self.N % 3 != 0:
raise ValueError("Number of features ({}) is not a multiple of 3 (xyz)".format(self.N))
M = int(self.N / 3) # num markers
# split data into xyz coordinates
x_indices = np.arange(start=0, stop=self.N, step=3)
y_indices = np.arange(start=1, stop=self.N, step=3)
z_indices = np.arange(start=2, stop=self.N, step=3)
# split self.uk
ukX = self.uk[:, x_indices]
ukY = self.uk[:, y_indices]
ukZ = self.uk[:, z_indices]
# split self.delta_af
afX = self.delta_af[:, x_indices]
afY = self.delta_af[:, y_indices]
afZ = self.delta_af[:, z_indices]
# declare variables
bX = np.zeros((self.K, M))
bY = np.zeros((self.K, M))
bZ = np.zeros((self.K, M))
# build A (kM x kM) diagonal matrix
A = (2/self.F) * np.diag(np.repeat(np.sum(self.tilda_ckf, axis=1), M))
# there's probably an even better way to make it all in a matrix form :)
for k in range(self.K):
# compute the term: tilda_c[k,:] * diag(u[k]) * delta_af[:]
bX[k] = (2 / self.F) * self.tilda_ckf[k] @ (np.diag(ukX[k]) @ afX.T).T
bY[k] = (2 / self.F) * self.tilda_ckf[k] @ (np.diag(ukY[k]) @ afY.T).T
bZ[k] = (2 / self.F) * self.tilda_ckf[k] @ (np.diag(ukZ[k]) @ afZ.T).T
bX = bX.reshape(-1)
bY = bY.reshape(-1)
bZ = bZ.reshape(-1)
# A = Ax = Ay = Az
return A, A, A, bX, bY, bZ
if __name__ == '__main__':
"""
test E_Match function
1) test that E_Match is computer correctly
2) test optimization of the E_Match function
run: python -m src.EMatch
"""
np.random.seed(0)
np.set_printoptions(precision=4, linewidth=200)
# declare variables
n_k = 2
n_f = 3
n_n = 12 # = 4 markers
tckf = np.random.rand(n_k, n_f) # (k, f)
uk = np.random.rand(n_k, n_n)
da = np.random.rand(n_f, n_n)
dp = np.random.rand(n_k, n_n)
print("----- EMatch Function -----")
# control compute e_match
ematch_ctrl = 0
for f in range(n_f):
for k in range(n_k):
norm = np.linalg.norm(dp[k] - np.diag(uk[k]) @ da[f])
ematch_ctrl += tckf[k, f] * norm**2
ematch_ctrl /= n_f
print("ematch_ctrl")
print(ematch_ctrl)
# compute e_match
e_match_fn = EMatch(tckf, uk, da).get_eMatch()
ematch = e_match_fn(dp)
print("ematch")
print(ematch)
# test if value matches (up to 6 decimals)
assert np.around(ematch, 6) == np.around(ematch_ctrl, 6)
print("ematch values are equal")
print()
print("----- Minimization ------")
import time as time
print("try optimizer")
from scipy import optimize
start = time.time()
opt = optimize.minimize(e_match_fn, dp, method="BFGS")
print("solved in:", time.time() - start)
print(opt.x[:10]) # print only 10 first
from scipy.linalg import solve
print("try solver")
AX, AY, AZ, bX, bY, bZ = EMatch(tckf, uk, da).get_dEmatch()
start = time.time()
solX = solve(AX, bX)
solY = solve(AY, bY)
solZ = solve(AZ, bZ)
sol = np.vstack((solX, solY, solZ)).reshape(-1, order='F')
print("solved in:", time.time() - start)
print(sol[:10]) # print only 10 first
# test if values matches
assert opt.x.all() == sol.all()
print("Reached same value!")
```
#### File: FacialRetargeting/src/EMesh.py
```python
import numpy as np
from src.mesh import triangulate_vertices
from src.mesh import build_Laplacian
class EMesh:
"""
Construct a class to compute E_Mesh as in formula 11 using a function to pass directly the personalized blendshapes
in delta space delta_p (dp)
k:= num_of_blendshapes
f:= num_frames
m:= num_markers
n:= num_features
"""
def __init__(self, delta_gk):
self.delta_gk = delta_gk
self.K = np.shape(self.delta_gk)[0]
self.M = np.shape(self.delta_gk)[1]
self.L = []
for k in range(self.K):
mesh = triangulate_vertices(delta_gk[k])
self.L.append(build_Laplacian(mesh, self.M))
self.L = np.array(self.L)
def _emesh(self, dp):
"""
Compute E_Mesh as in formula 11
:param dp: delta p (k, n)
:return: e_mesh
"""
# reshape dp in case it comes as a 1D array
if len(np.shape(dp)) < 2:
dp = np.reshape(dp, (self.K, self.M * 3))
e_list = []
for k in range(self.K):
e = np.linalg.norm(self.L[k].dot(np.reshape(dp[k], (-1, 3)) - self.delta_gk[k]), axis=1)**2
e_list.append(e)
return np.sum(e_list) / self.M
def get_eMesh(self):
"""
return the function emesh
:return:
"""
return self._emesh
def get_dEmesh(self):
"""
Compute the derivative of E_Mesh (formula 11) at delta_p as to minimize delta_p -> E_mesh' = 0
equation: (2/M) * sum_i(L^{m, i}_k) * delta_p^m_k - (2/M) * sum_i(L^{m, i}_k) * delta_g^m_k]
with L^i the Laplacian coefficients
It splits the equation in a diagonal matrix A and a vector b as to solve the equation Ax = b, with x = delta_p
Since the equation are separable in xyz, the function splits the data and returns a system of equation for each
dimension, resulting in 3*(kMxknM) instead of one (3kMx3kM) -> section 4.6 of the paper
M:= num_markers = self.N / 3
A*:= (kM x kM) diag matrix with coef = (2/M) * s sum_i(L^{m, i}_k)
b*:= (kM,) vector with value = (2/M) * sum_i(L^{m, i}_k) * delta_g^m_k
:return: AX, AY, AZ, bX, bY, bZ
:return:
"""
# test if delta_gk is separable into 3
if len(np.shape(self.delta_gk)) < 3:
if np.shape(self.delta_gk)[1] % 3 != 0:
raise ValueError("Number of features delta_gk ({}) is not a multiple of 3 (xyz)".format(np.shape(self.delta_gk)))
else:
self.delta_gk = self.delta_gk.reshape(self.K, self.M, 3)
print("[EMesh] Warning! self.delta_gk has been reshaped to: {}".format(np.shape(self.delta_gk)))
# split delta_gk
dgkX = self.delta_gk[:, :, 0]
dgkY = self.delta_gk[:, :, 1]
dgkZ = self.delta_gk[:, :, 2]
# declare variables
A = np.zeros((self.K, self.M)) # get reshaped afterward into (kMxkM)
bX = np.zeros((self.K, self.M)) # get reshaped afterward into (kM,)
bY = np.zeros((self.K, self.M)) # get reshaped afterward into (kM,)
bZ = np.zeros((self.K, self.M)) # get reshaped afterward into (kM,)
# build A (kM x kM) diagonal matrix and b(kM) vector
for k in range(self.K):
# build coef.: sum_m'(L^{m, m'}_k)
sum_lapl = np.sum(np.power(self.L[k].todense(), 2), axis=0)
# build A coef. as sum_m'(L^{m, m'}_k)
A[k] = sum_lapl
# build b coef. as sum_m'(L^{m, m'}_k) * g^m_k
bX[k] = np.multiply(sum_lapl, np.expand_dims(dgkX[k], axis=1).T)
bY[k] = np.multiply(sum_lapl, np.expand_dims(dgkY[k], axis=1).T)
bZ[k] = np.multiply(sum_lapl, np.expand_dims(dgkZ[k], axis=1).T)
# reshape matrix A into diagonal of (kMxkM) and b into vector of (kM,)
A = (2/self.M) * np.diag(A.flatten())
bX = (2/self.M) * bX.flatten()
bY = (2/self.M) * bY.flatten()
bZ = (2/self.M) * bZ.flatten()
# A = Ax = Ay = Az
return A, A, A, bX, bY, bZ
if __name__ == '__main__':
"""
test e_mesh functions
1st part build a random array
2nd part triangulate a set of markers from Vicon recording into a mesh
run: python -m src.EMesh
"""
np.random.seed(1)
np.set_printoptions(precision=4, linewidth=250, suppress=True)
print("--------- test toy example ----------")
# declare variables
n_k = 1 # num_blendshapes
n_m = 5 # num markers
n_n = n_m * 3 # num_features (num_markers * 3)
dgk = np.random.rand(n_k, n_m, 3)
dp = np.random.rand(n_k, n_n)
print("dgk")
print(dgk)
print("dp")
print(dp)
# create EMesh object
e_mesh = EMesh(dgk)
# control compute e_mesh
print("compute control e_mesh")
emesh_list = []
for k in range(n_k):
mesh = triangulate_vertices(dgk[k])
L = build_Laplacian(mesh, n_m)
dv = np.reshape(dp[k], (-1, 3)) - dgk[k]
l_op = L.dot(dv)
norm = np.linalg.norm(l_op, axis=1)**2
emesh_list.append(norm)
emesh_ctrl = np.sum(emesh_list) / n_m
print("emesh_ctrl =", emesh_ctrl)
# compute e_mesh
print("compute e_mesh")
e_mesh_fn = e_mesh.get_eMesh()
emesh = e_mesh_fn(dp)
print("emesh =", emesh)
assert emesh == emesh_ctrl
print("emesh values are equal")
print()
print("----- Minimization ------")
import time as time
print("try optimizer")
from scipy import optimize
start = time.time()
opt = optimize.minimize(e_mesh_fn, np.reshape(dgk, (n_k, n_n)), method="BFGS") # todo: confirm that delta_p_k = delta_g_k when solving only for EMesh
# print(opt)
print("solved in:", time.time() - start)
print("shape opt.x", np.shape(opt.x))
print(opt.x)
from scipy.linalg import solve
print("try solver")
AX, AY, AZ, bX, bY, bZ = e_mesh.get_dEmesh()
start = time.time()
solX = solve(AX, bX)
solY = solve(AY, bY)
solZ = solve(AZ, bZ)
sol = np.vstack((solX, solY, solZ)).reshape(-1, order='F')
print("solved in:", time.time() - start)
print("shape sol", np.shape(sol))
print(sol)
print("dgk")
print(np.reshape(dgk, (n_k, n_n)))
# test if values matches
np.testing.assert_array_equal(np.around(opt.x, 5), np.round(sol, 5))
print("Reached same value!")
```
#### File: FacialRetargeting/utils/compute_delta.py
```python
import numpy as np
def compute_delta(data, ref):
"""
compute the delta between a vector data and a ref vector
:param data:
:param ref:
:return:
"""
deltas = []
for d in data:
delta = d - ref
# check if delta is not filled by only zero -> != ref
if np.any(delta):
deltas.append(d - ref)
return np.array(deltas)
```
#### File: FacialRetargeting/utils/get_key_expressions.py
```python
import numpy as np
from scipy.signal import find_peaks
import matplotlib.pyplot as plt
from utils.plotting import plot_cumulative_correlations
def low_pass_filter(signal, ksize=3, theta=1):
"""
apply low pass filter with a gaussian kernel of size ksize and theta
t:= length of signal
:param signal: input 1D signal (t,)
:param ksize: size of Gaussian kernel
:param theta: variance of Gaussian
:return: filtered signal
"""
# built 1D gaussian filter
x = np.arange(ksize) - int(ksize/2)
filt = np.exp(-x**2/(2*theta**2))/np.sqrt(2*np.pi*theta**2)
# filter signal
return np.convolve(signal, filt, mode='same')
def get_filt_signals(signals, ksize=3, theta=1):
"""
Apply a 1D low pass filter over each row of the input: signals
k:= number of different signals
t:= length of signals
:param signals: input 2D signals (k, t)
:param ksize: size of 1D Gaussian kernel
:param theta: variance of Gaussian
:return:
"""
filt_signals = np.zeros(np.shape(signals))
for i in range(np.shape(signals)[0]):
signal = signals[i]
filt_signal = low_pass_filter(signal, ksize, theta)
filt_signals[i] = filt_signal
return filt_signals
def get_key_expressions(sequence, ksize=3, theta=1, do_plot=False):
"""
Extract key expressions as in 4.2 Key Expression Extraction
k:= number of blendshapes
f:= number of frames
:param sequence: input data (k, f)
:param ksize: int parameter to define the size of the 1D Gaussian kernel size
:param theta: float parameter to define the Gaussian filter
:param do_plot: option to plot the cumulative correlation as in Fig. 8
:return: key expressions within sequence
"""
# apply low band filtering over each row
filtered_seq = get_filt_signals(sequence, ksize, theta)
# sum filtered correlations coefficients over column
cumul_seq = np.sum(filtered_seq, axis=0)
# extract local peaks
key_expressions, _ = find_peaks(cumul_seq)
if do_plot:
plot_cumulative_correlations(cumul_seq, key_expressions)
return key_expressions
if __name__ == '__main__':
"""
Built several test cases to try and plot the effect of the three functions:
- low_pass_filter
- get_filt_signals
- get_key_expressions
run: python -m utils.get_key_expressions
"""
np.random.seed(0)
print("--------- test 1D filtering ----------")
# test low_pass_filter
# create random noise signals
f = 5
sample = 800
n_features = 2
x = np.linspace(0, 1, sample)
noise = 0.08 * np.random.normal(0, 1, size=sample)
signal = np.sin(2 * np.pi * f * x) + noise
# apply low pass
filt_signal = low_pass_filter(signal, ksize=3, theta=3)
# plot signals
plt.figure()
plt.title("1D signals")
plt.plot(x, signal, '-b', label='sig 0')
plt.plot(x, filt_signal, '-r', label='sig 0 filt')
plt.legend()
print()
print("--------- test k-size 1D filtering ----------")
# test get_filt_signals function
k = 5
assert k >= 3 # make sure k is bigger than 3 for plotting
# build k phase-shifted signals
signals = np.zeros((5, sample))
for i in range(k):
signals[i] = np.sin(2 * np.pi * f * x + (i*2*np.pi)/k) + noise
# apply 1D low pass filter over each k signals
filt_signals = get_filt_signals(signals, ksize=3, theta=3)
# plot signals
x = np.repeat(np.expand_dims(x, axis=1), k, axis=1).T
plt.figure()
plt.title("k={0} phase-shifted signals".format(str(k)))
plt.plot(x[0], signals[0], '-b', label='signals 0')
plt.plot(x[0], filt_signals[0], '--r', label='signals 0')
plt.plot(x[1], signals[1], '-y', label='signals 1')
plt.plot(x[1], filt_signals[1], '--r', label='signals 1')
plt.plot(x[2], signals[2], '-g', label='signals 2')
plt.plot(x[2], filt_signals[2], '--r', label='signals 2')
plt.legend()
print()
print("--------- test cumuluative extraction ----------")
# test get_key_expressions function
sequence = signals
get_key_expressions(sequence, ksize=3, theta=2, do_plot=1)
plt.show()
``` |
{
"source": "johndpope/finetune",
"score": 3
} |
#### File: finetune/finetune/download.py
```python
import os
from pathlib import Path
import requests
import finetune
def download_data_if_required():
base_url = "https://raw.githubusercontent.com/IndicoDataSolutions/finetune/master/model/"
file_list = [
"encoder_bpe_40000.json",
"params_0.npy",
"params_1.npy",
"params_2.npy",
"params_3.npy",
"params_4.npy",
"params_5.npy",
"params_6.npy",
"params_7.npy",
"params_8.npy",
"params_9.npy",
"params_shapes.json",
"vocab_40000.bpe",
]
for filename in file_list:
folder = os.path.join(
os.path.dirname(finetune.__file__),
'model'
)
if not os.path.exists(folder):
os.mkdir(folder)
local_filepath = os.path.join(folder, filename)
if not Path(local_filepath).exists():
data = requests.get(base_url + filename).content
fd = open(local_filepath, 'wb')
fd.write(data)
fd.close()
if __name__ == "__main__":
download_data_if_required()
```
#### File: finetune/finetune/optimizers.py
```python
import math
import tensorflow as tf
def warmup_cosine(x, warmup=0.002):
s = tf.cast(x <= warmup, tf.float32)
return s*(x/warmup) + (1-s)*(0.5 * (1 + tf.cos(math.pi * x)))
def warmup_constant(x, warmup=0.002):
s = tf.cast(x <= warmup, tf.float32)
return s*(x/warmup) + (1-s)*1
def warmup_linear(x, warmup=0.002):
s = tf.cast(x <= warmup, tf.float32)
return (s*(x/warmup) + (1-s))*(1-x)
schedules = {
'warmup_cosine': warmup_cosine,
'warmup_constant': warmup_constant,
'warmup_linear': warmup_linear,
}
def AdamWeightDecay(params, grads, lr, schedule, t_total, b1=0.9, b2=0.999, e=1e-8, l2=0, vector_l2=False, max_grad_norm=-1, **kwargs):
"""
Adam with weight decay fix
"""
t = tf.Variable(0, dtype=tf.float32, trainable=False)
tt = t + 1
updates = [t.assign(tt)]
if max_grad_norm > 0:
grads, _ = tf.clip_by_global_norm(grads, max_grad_norm)
for p, g in zip(params, grads):
if p is None or g is None:
print("can't train", p.name, g)
else:
if isinstance(g, tf.IndexedSlices):
g = tf.convert_to_tensor(g)
m = tf.Variable(p * 0, dtype=tf.float32, trainable=False)
v = tf.Variable(p * 0, dtype=tf.float32, trainable=False)
lrt = lr * tf.sqrt(1 - b2 ** tt) / (1 - b1 ** tt)
lrt *= schedule(t / t_total)
mt = b1 * m + (1 - b1) * g
vt = b2 * v + (1 - b2) * g * g
if (len(p.get_shape()) > 1 or vector_l2) and l2 > 0:
pt = p - lrt * (mt / (tf.sqrt(vt) + e) + l2 * p)
else:
pt = p - lrt * (mt / (tf.sqrt(vt) + e))
updates.extend([m.assign(mt), v.assign(vt), p.assign(pt)])
return tf.group(*updates)
```
#### File: finetune/finetune/transformer.py
```python
import numpy as np
import tensorflow as tf
from finetune.utils import get_ema_vars, convert_gradient_to_tensor, shape_list, assign_to_gpu, average_grads, make_path
from finetune.activations import act_fns
def _norm(x, g=None, b=None, e=1e-5, axis=[1]):
u = tf.reduce_mean(x, axis=axis, keepdims=True)
s = tf.reduce_mean(tf.square(x-u), axis=axis, keepdims=True)
x = (x - u) * tf.rsqrt(s + e)
if g is not None and b is not None:
x = x*g + b
return x
def norm(x, scope, axis=[-1]):
with tf.variable_scope(scope):
n_state = shape_list(x)[-1]
g = tf.get_variable("g", [n_state], initializer=tf.constant_initializer(1))
b = tf.get_variable("b", [n_state], initializer=tf.constant_initializer(0))
g, b = get_ema_vars(g, b)
return _norm(x, g, b, axis=axis)
def dropout(x, pdrop, train, dropout_placeholder):
if train and pdrop > 0:
x = tf.nn.dropout(x, 1 - (pdrop * dropout_placeholder))
return x
def mask_attn_weights(w):
n = shape_list(w)[-1]
b = tf.matrix_band_part(tf.ones([n, n]), -1, 0)
b = tf.reshape(b, [1, 1, n, n])
w = w * b + -1e9 * (1-b)
return w
def _attn(q, k, v, attn_pdrop, dropout_placeholder, train=False, scale=False):
w = tf.matmul(q, k)
if scale:
n_state = shape_list(v)[-1]
w = w * tf.rsqrt(tf.cast(n_state, tf.float32))
w = mask_attn_weights(w)
w = tf.nn.softmax(w)
w = dropout(w, attn_pdrop, train, dropout_placeholder)
a = tf.matmul(w, v)
return a
def split_states(x, n):
x_shape = shape_list(x)
m = x_shape[-1]
new_x_shape = x_shape[:-1] + [n, m // n]
return tf.reshape(x, new_x_shape)
def merge_states(x):
x_shape = shape_list(x)
new_x_shape = x_shape[:-2] + [np.prod(x_shape[-2:])]
return tf.reshape(x, new_x_shape)
def split_heads(x, n, k=False):
if k:
return tf.transpose(split_states(x, n), [0, 2, 3, 1])
else:
return tf.transpose(split_states(x, n), [0, 2, 1, 3])
def merge_heads(x):
return merge_states(tf.transpose(x, [0, 2, 1, 3]))
def conv1d(x, scope, nf, rf, w_init=tf.random_normal_initializer(stddev=0.02), b_init=tf.constant_initializer(0), pad='VALID', train=False):
with tf.variable_scope(scope):
nx = shape_list(x)[-1]
w = tf.get_variable("w", [rf, nx, nf], initializer=w_init)
b = tf.get_variable("b", [nf], initializer=b_init)
if rf == 1: # faster 1x1 conv
c = tf.reshape(tf.matmul(tf.reshape(x, [-1, nx]), tf.reshape(w, [-1, nf]))+b, shape_list(x)[:-1]+[nf])
else: # was used to train LM
c = tf.nn.conv1d(x, w, stride=1, padding=pad)+b
return c
def attn(x, scope, n_state, n_head, resid_pdrop, attn_pdrop, dropout_placeholder, train=False, scale=False):
assert n_state % n_head == 0
with tf.variable_scope(scope):
c = conv1d(x, 'c_attn', n_state * 3, 1, train=train)
q, k, v = tf.split(c, 3, 2)
q = split_heads(q, n_head)
k = split_heads(k, n_head, k=True)
v = split_heads(v, n_head)
a = _attn(q, k, v, attn_pdrop=attn_pdrop, dropout_placeholder=dropout_placeholder, train=train, scale=scale)
a = merge_heads(a)
a = conv1d(a, 'c_proj', n_state, 1, train=train)
a = dropout(a, resid_pdrop, train, dropout_placeholder)
return a
def mlp(x, scope, n_state, act_fn, resid_pdrop, dropout_placeholder, train=False):
with tf.variable_scope(scope):
nx = shape_list(x)[-1]
act = act_fns[act_fn]
h = act(conv1d(x, 'c_fc', n_state, 1, train=train))
h2 = conv1d(h, 'c_proj', nx, 1, train=train)
h2 = dropout(h2, resid_pdrop, train, dropout_placeholder)
return h2
def block(x, n_head, act_fn, resid_pdrop, attn_pdrop, scope, dropout_placeholder, train=False, scale=False):
with tf.variable_scope(scope):
nx = shape_list(x)[-1]
a = attn(x, 'attn', nx, n_head, resid_pdrop, attn_pdrop, dropout_placeholder, train=train, scale=scale)
n = norm(x + a, 'ln_1')
m = mlp(n, 'mlp', nx * 4, act_fn, resid_pdrop, dropout_placeholder, train=train)
h = norm(n + m, 'ln_2')
return h
def embed(X, we):
we = convert_gradient_to_tensor(we)
e = tf.gather(we, X)
h = tf.reduce_sum(e, 2)
return h
```
#### File: finetune/tests/test_classifier.py
```python
import os
import unittest
import logging
from copy import copy
from pathlib import Path
# required for tensorflow logging control
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
import pandas as pd
import numpy as np
import enso
from enso.download import generic_download
from sklearn.metrics import accuracy_score
from finetune import config
from finetune import LanguageModelClassifier
SST_FILENAME = "SST-binary.csv"
class TestLanguageModelClassifier(unittest.TestCase):
n_sample = 100
n_hidden = 768
dataset_path = os.path.join(
enso.config.DATA_DIRECTORY, 'Classify', 'SST-binary.csv'
)
@classmethod
def _download_sst(cls):
"""
Download Stanford Sentiment Treebank to enso `data` directory
"""
path = Path(cls.dataset_path)
if path.exists():
return
path.parent.mkdir(parents=True, exist_ok=True)
generic_download(
url="https://s3.amazonaws.com/enso-data/SST-binary.csv",
text_column="Text",
target_column="Target",
filename=SST_FILENAME
)
@classmethod
def setUpClass(cls):
cls._download_sst()
def setUp(self):
self.dataset = pd.read_csv(self.dataset_path, nrows=self.n_sample*3)
tf.reset_default_graph()
def test_fit_predict(self):
"""
Ensure model training does not error out
Ensure model returns predictions of the right type
"""
save_file_autosave = 'tests/saved-models/autosave_path'
model = LanguageModelClassifier(verbose=False, autosave_path=save_file_autosave)
train_sample = self.dataset.sample(n=self.n_sample)
valid_sample = self.dataset.sample(n=self.n_sample)
model.fit(train_sample.Text, train_sample.Target)
predictions = model.predict(valid_sample.Text)
for prediction in predictions:
self.assertIsInstance(prediction, (np.int, np.int64))
probabilities = model.predict_proba(valid_sample.Text)
for proba in probabilities:
self.assertIsInstance(proba, dict)
def test_save_load(self):
"""
Ensure saving + loading does not cause errors
Ensure saving + loading does not change predictions
"""
save_file_autosave = 'tests/saved-models/autosave_path'
save_file = 'tests/saved-models/test-save-load'
model = LanguageModelClassifier(verbose=False, autosave_path=save_file_autosave)
train_sample = self.dataset.sample(n=self.n_sample)
valid_sample = self.dataset.sample(n=self.n_sample)
model.fit(train_sample.Text, train_sample.Target)
predictions = model.predict(valid_sample.Text)
model.save(save_file)
model = LanguageModelClassifier.load(save_file)
new_predictions = model.predict(valid_sample.Text)
for i, prediction in enumerate(predictions):
self.assertEqual(prediction, new_predictions[i])
def test_featurize(self):
"""
Ensure featurization returns an array of the right shape
Ensure featurization is still possible after fit
"""
save_file_autosave = 'tests/saved-models/autosave_path'
model = LanguageModelClassifier(verbose=False, autosave_path=save_file_autosave)
train_sample = self.dataset.sample(n=self.n_sample)
features = model.featurize(train_sample.Text)
self.assertEqual(features.shape, (self.n_sample, self.n_hidden))
model.fit(train_sample.Text, train_sample.Target)
features = model.featurize(train_sample.Text)
self.assertEqual(features.shape, (self.n_sample, self.n_hidden))
def test_reasonable_predictions(self):
save_file_autosave = 'tests/saved-models/autosave_path'
model = LanguageModelClassifier(verbose=False, autosave_path=save_file_autosave)
n_per_class = self.n_sample // 2
trX = ['cat'] * n_per_class + ['finance'] * n_per_class
trY = copy(trX)
teX = ['feline'] * n_per_class + ['investment'] * n_per_class
teY = ['cat'] * n_per_class + ['finance'] * n_per_class
model.fit(trX, trY)
predY = model.predict(teX)
self.assertEqual(accuracy_score(teY, predY), 1.00)
``` |
{
"source": "johndpope/GenEdi",
"score": 2
} |
#### File: johndpope/GenEdi/run_pkl_to_image.py
```python
import os
import glob
import pickle
import numpy as np
import matplotlib.pyplot as plt
import h5py
import torch
# path to model generated results
path_gan_sample_pkl = './assert_results/pkl/'
path_gan_sample_img = './sample_jpg/'
# if not os.path.exists(path_gan_sample_pkl):
# os.mkdir(path_gan_sample_pkl)
# if not os.path.exists(path_gan_sample_img):
# os.mkdir(path_gan_sample_img)
# name of new data files
def get_filename_from_idx(idx):
return 'sample_{:0>6}'.format(idx)
filename_sample_z = './sample_z.h5'
# get the pkl file list
list_pathfile_pkl = glob.glob(os.path.join(path_gan_sample_pkl, '*.pkl'))
list_pathfile_pkl.sort()
# loop to transform data and save image
list_z = []
i_counter = 0
for pathfile_pkl in list_pathfile_pkl:
print(pathfile_pkl)
with open(pathfile_pkl, 'rb') as f:
pkl_content = pickle.load(f)
x = pkl_content['x'] # x.shape = [B, (3, 1024, 1024)]
z = pkl_content['z']
num_cur = x.shape[0]
for i in range(num_cur):
# wenjianming
pathfile_cur = os.path.join(path_gan_sample_img, get_filename_from_idx(i_counter))
plt.imsave(path_gan_sample_img + "sample_{:0>6}.png".format(i_counter), x[i])
np.save(pathfile_cur + '_z.npy', z[i].cpu())
i_counter += 1
list_z.append(z)
# save z (latent variables)
z_concat = torch.cat(list_z, axis=0).cpu().numpy()
pathfile_sample_z = os.path.join(path_gan_sample_img, filename_sample_z)
with h5py.File(pathfile_sample_z, 'w') as f:
f.create_dataset('z', data=z_concat)
```
#### File: GenEdi/stylegan2/train.py
```python
import warnings
import functools
import os
import time
import sys
import json
import numpy as np
import torch
import torch.utils.tensorboard
from torch import nn
import torchvision
try:
import apex
from apex import amp
except ImportError:
pass
from . import models, utils, loss_fns
class Trainer:
"""
Class that handles training and logging for stylegan2.
For distributed training, the arguments `rank`, `world_size`,
`master_addr`, `master_port` can all be given as environmnet variables
(only difference is that the keys should be capital cased).
Environment variables if available will override any python
value for the same argument.
Arguments:
G (Generator): The generator model.
D (Discriminator): The discriminator model.
latent_size (int): The size of the latent inputs.
dataset (indexable object): The dataset. Has to implement
'__getitem__' and '__len__'. If `label_size` > 0, this
dataset object has to return both a data entry and its
label when calling '__getitem__'.
device (str, int, list, torch.device): The device to run training on.
Can be a list of integers for parallel training in the same
process. Parallel training can also be achieved by spawning
seperate processes and using the `rank` argument for each
process. In that case, only one device should be specified
per process.
Gs (Generator, optional): A generator copy with the current
moving average of the training generator. If not specified,
a copy of the generator is made for the moving average of
weights.
Gs_beta (float): The beta value for the moving average weights.
Default value is 1 / (2 ^(32 / 10000)).
Gs_device (str, int, torch.device, optional): The device to store
the moving average weights on. If using a different device
than what is specified for the `device` argument, updating
the moving average weights will take longer as the data
will have to be transfered over different devices. If
this argument is not specified, the same device is used
as specified in the `device` argument.
batch_size (int): The total batch size to average gradients
over. This should be the combined batch size of all used
devices (it is later divided by world size for distributed
training).
Example: We want to average gradients over 32 data
entries. To do this we just set `batch_size=32`.
Even if we train on 8 GPUs we still use the same
batch size (each GPU will take 4 data entries per
batch).
Default value is 32.
device_batch_size (int): The number of data entries that can
fit on the specified device at a time.
Example: We want to average gradients over 32 data
entries. To do this we just set `batch_size=32`.
However, our device can only handle a batch of
4 at a time before running out of memory. We
therefor set `device_batch_size=4`. With a
single device (no distributed training), each
batch is split into 32 / 4 parts and gradients
are averaged over all these parts.
Default value is 4.
label_size (int, optional): Number of possible class labels.
This is required for conditioning the GAN with labels.
If not specified it is assumed that no labels are used.
data_workers (int): The number of spawned processes that
handle data loading. Default value is 4.
G_loss (str, callable): The loss function to use
for the generator. If string, it can be one of the
following: 'logistic', 'logistic_ns' or 'wgan'.
If not a string, the callable has to follow
the format of functions found in `stylegan2.loss`.
Default value is 'logistic_ns' (non-saturating logistic).
D_loss (str, callable): The loss function to use
for the discriminator. If string, it can be one of the
following: 'logistic' or 'wgan'.
If not a string, same restriction follows as for `G_loss`.
Default value is 'logistic'.
G_reg (str, callable, None): The regularizer function to use
for the generator. If string, it can only be 'pathreg'
(pathlength regularization). A weight for the regularizer
can be passed after the string name like the following:
G_reg='pathreg:5'
This will assign a weight of 5 to the regularization loss.
If set to None, no geenerator regularization is performed.
Default value is 'pathreg:2'.
G_reg_interval (int): The interval at which to regularize the
generator. If set to 0, the regularization and loss gradients
are combined in a single optimization step every iteration.
If set to 1, the gradients for the regularization and loss
are used separately for two optimization steps. Any value
higher than 1 indicates that regularization should only
be performed at this interval (lazy regularization).
Default value is 4.
G_opt_class (str, class): The optimizer class for the generator.
Default value is 'Adam'.
G_opt_kwargs (dict): Keyword arguments for the generator optimizer
constructor. Default value is {'lr': 2e-3, 'betas': (0, 0.99)}.
G_reg_batch_size (int): Same as `batch_size` but only for
the regularization loss of the generator. Default value
is 16.
G_reg_device_batch_size (int): Same as `device_batch_size`
but only for the regularization loss of the generator.
Default value is 2.
D_reg (str, callable, None): The regularizer function to use
for the discriminator. If string, the following values
can be used: 'r1', 'r2', 'gp'. See doc for `G_reg` for
rest of info on regularizer format.
Default value is 'r1:10'.
D_reg_interval (int): Same as `D_reg_interval` but for the
discriminator. Default value is 16.
D_opt_class (str, class): The optimizer class for the discriminator.
Default value is 'Adam'.
D_opt_kwargs (dict): Keyword arguments for the discriminator optimizer
constructor. Default value is {'lr': 2e-3, 'betas': (0, 0.99)}.
style_mix_prob (float): The probability of passing 2 latents instead of 1
to the generator during training. Default value is 0.9.
G_iter (int): Number of generator iterations for every full training
iteration. Default value is 1.
D_iter (int): Number of discriminator iterations for every full training
iteration. Default value is 1.
pl_avg (float, torch.Tensor): The average pathlength starting value for
pathlength regularization of the generator. Default value is 0.
tensorboard_log_dir (str, optional): A path to a directory to log training values
in for tensorboard. Only used without distributed training or when
distributed training is enabled and the rank of this trainer is 0.
checkpoint_dir (str, optional): A path to a directory to save training
checkpoints to. If not specified, not checkpoints are automatically
saved during training.
checkpoint_interval (int): The interval at which to save training checkpoints.
Default value is 10000.
seen (int): The number of previously trained iterations. Used for logging.
Default value is 0.
half (bool): Use mixed precision training. Default value is False.
rank (int, optional): If set, use distributed training. Expects that
this object has been constructed with the same arguments except
for `rank` in different processes.
world_size (int, optional): If using distributed training, this specifies
the number of nodes in the training.
master_addr (str): The master address for distributed training.
Default value is '127.0.0.1'.
master_port (str): The master port for distributed training.
Default value is '23456'.
"""
def __init__(self,
G,
D,
latent_size,
dataset,
device,
Gs=None,
Gs_beta=0.5 ** (32 / 10000),
Gs_device=None,
batch_size=32,
device_batch_size=4,
label_size=0,
data_workers=4,
G_loss='logistic_ns',
D_loss='logistic',
G_reg='pathreg:2',
G_reg_interval=4,
G_opt_class='Adam',
G_opt_kwargs={'lr': 2e-3, 'betas': (0, 0.99)},
G_reg_batch_size=None,
G_reg_device_batch_size=None,
D_reg='r1:10',
D_reg_interval=16,
D_opt_class='Adam',
D_opt_kwargs={'lr': 2e-3, 'betas': (0, 0.99)},
style_mix_prob=0.9,
G_iter=1,
D_iter=1,
pl_avg=0.,
tensorboard_log_dir=None,
checkpoint_dir=None,
checkpoint_interval=10000,
seen=0,
half=False,
rank=None,
world_size=None,
master_addr='127.0.0.1',
master_port='23456'):
assert not isinstance(G, nn.parallel.DistributedDataParallel) and \
not isinstance(D, nn.parallel.DistributedDataParallel), \
'Encountered a model wrapped in `DistributedDataParallel`. ' + \
'Distributed parallelism is handled by this class and can ' + \
'not be initialized before.'
# We store the training settings in a dict that can be saved as a json file.
kwargs = locals()
# First we remove the arguments that can not be turned into json.
kwargs.pop('self')
kwargs.pop('G')
kwargs.pop('D')
kwargs.pop('Gs')
kwargs.pop('dataset')
# Some arguments may have to be turned into strings to be compatible with json.
kwargs.update(pl_avg=float(pl_avg))
if isinstance(device, torch.device):
kwargs.update(device=str(device))
if isinstance(Gs_device, torch.device):
kwargs.update(device=str(Gs_device))
self.kwargs = kwargs
if device or device == 0:
if isinstance(device, (tuple, list)):
self.device = torch.device(device[0])
else:
self.device = torch.device(device)
else:
self.device = torch.device('cpu')
if self.device.index is not None:
torch.cuda.set_device(self.device.index)
else:
assert not half, 'Mixed precision training only available ' + \
'for CUDA devices.'
# Set up the models
self.G = G.train().to(self.device)
self.D = D.train().to(self.device)
if isinstance(device, (tuple, list)) and len(device) > 1:
assert all(isinstance(dev, int) for dev in device), \
'Multiple devices have to be specified as a list ' + \
'or tuple of integers corresponding to device indices.'
# TODO: Look into bug with torch.autograd.grad and nn.DataParallel
# In the meanwhile just prohibit its use together.
assert G_reg is None and D_reg is None, 'Regularization ' + \
'currently not supported for multi-gpu training in single process. ' + \
'Please use distributed training with one device per process instead.'
device_batch_size *= len(device)
def to_data_parallel(model):
if not isinstance(model, nn.DataParallel):
return nn.DataParallel(model, device_ids=device)
return model
self.G = to_data_parallel(self.G)
self.D = to_data_parallel(self.D)
# Default generator reg batch size is the global batch size
# unless it has been specified otherwise.
G_reg_batch_size = G_reg_batch_size or batch_size
G_reg_device_batch_size = G_reg_device_batch_size or device_batch_size
# Set up distributed training
rank = os.environ.get('RANK', rank)
if rank is not None:
rank = int(rank)
addr = os.environ.get('MASTER_ADDR', master_addr)
port = os.environ.get('MASTER_PORT', master_port)
world_size = os.environ.get('WORLD_SIZE', world_size)
assert world_size is not None, 'Distributed training ' + \
'requires specifying world size.'
world_size = int(world_size)
assert self.device.index is not None, \
'Distributed training is only supported for CUDA.'
assert batch_size % world_size == 0, 'Batch size has to be ' + \
'evenly divisible by world size.'
assert G_reg_batch_size % world_size == 0, 'G reg batch size has to be ' + \
'evenly divisible by world size.'
batch_size = batch_size // world_size
G_reg_batch_size = G_reg_batch_size // world_size
init_method = 'tcp://{}:{}'.format(addr, port)
torch.distributed.init_process_group(
backend='nccl', init_method=init_method, rank=rank, world_size=world_size)
else:
world_size = 1
self.rank = rank
self.world_size = world_size
# Set up variable to keep track of moving average of path lengths
self.pl_avg = torch.tensor(
pl_avg, dtype=torch.float16 if half else torch.float32, device=self.device)
# Broadcast parameters from rank 0 if running distributed
self._sync_distributed(G=self.G, D=self.D, broadcast_weights=True)
# Set up moving average of generator
# Only for non-distributed training or
# if rank is 0
if not self.rank:
# Values for `rank`: None -> not distributed, 0 -> distributed and 'main' node
self.Gs = Gs
if not isinstance(Gs, utils.MovingAverageModule):
self.Gs = utils.MovingAverageModule(
from_module=self.G,
to_module=Gs,
param_beta=Gs_beta,
device=self.device if Gs_device is None else Gs_device
)
else:
self.Gs = None
# Set up loss and regularization functions
self.G_loss = get_loss_fn('G', G_loss)
self.D_loss = get_loss_fn('D', D_loss)
self.G_reg = get_reg_fn('G', G_reg, pl_avg=self.pl_avg)
self.D_reg = get_reg_fn('D', D_reg)
self.G_reg_interval = G_reg_interval
self.D_reg_interval = D_reg_interval
self.G_iter = G_iter
self.D_iter = D_iter
# Set up optimizers (adjust hyperparameters if lazy regularization is active)
self.G_opt = build_opt(self.G, G_opt_class, G_opt_kwargs, self.G_reg, self.G_reg_interval)
self.D_opt = build_opt(self.D, D_opt_class, D_opt_kwargs, self.D_reg, self.D_reg_interval)
# Set up mixed precision training
if half:
assert 'apex' in sys.modules, 'Can not run mixed precision ' + \
'training (`half=True`) without the apex module.'
(self.G, self.D), (self.G_opt, self.D_opt) = amp.initialize(
[self.G, self.D], [self.G_opt, self.D_opt], opt_level='O1')
self.half = half
# Data
sampler = None
if self.rank is not None:
sampler = torch.utils.data.distributed.DistributedSampler(dataset, shuffle=True)
self.dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=device_batch_size,
num_workers=data_workers,
shuffle=sampler is None,
pin_memory=self.device.index is not None,
drop_last=True,
sampler=sampler
)
self.dataloader_iter = None
self.prior_generator = utils.PriorGenerator(
latent_size=latent_size,
label_size=label_size,
batch_size=device_batch_size,
device=self.device
)
assert batch_size % device_batch_size == 0, \
'Batch size has to be evenly divisible by the product of ' + \
'device batch size and world size.'
self.subdivisions = batch_size // device_batch_size
assert G_reg_batch_size % G_reg_device_batch_size == 0, \
'G reg batch size has to be evenly divisible by the product of ' + \
'G reg device batch size and world size.'
self.G_reg_subdivisions = G_reg_batch_size // G_reg_device_batch_size
self.G_reg_device_batch_size = G_reg_device_batch_size
self.tb_writer = None
if tensorboard_log_dir and not self.rank:
self.tb_writer = torch.utils.tensorboard.SummaryWriter(tensorboard_log_dir)
self.label_size = label_size
self.style_mix_prob = style_mix_prob
self.checkpoint_dir = checkpoint_dir
self.checkpoint_interval = checkpoint_interval
self.seen = seen
self.metrics = {}
self.callbacks = []
def _get_batch(self):
"""
Fetch a batch and its labels. If no labels are
available the returned labels will be `None`.
Returns:
data
labels
"""
if self.dataloader_iter is None:
self.dataloader_iter = iter(self.dataloader)
try:
batch = next(self.dataloader_iter)
except StopIteration:
self.dataloader_iter = None
return self._get_batch()
if isinstance(batch, (tuple, list)):
if len(batch) > 1:
data, label = batch[:2]
else:
data, label = batch[0], None
else:
data, label = batch, None
if not self.label_size:
label = None
if torch.is_tensor(data):
data = data.to(self.device)
if torch.is_tensor(label):
label = label.to(self.device)
return data, label
def _sync_distributed(self, G=None, D=None, broadcast_weights=False):
"""
Sync the gradients (and alternatively the weights) of
the specified networks over the distributed training
nodes. Varying buffers are broadcasted from rank 0.
If no distributed training is not enabled, no action
is taken and this is a no-op function.
Arguments:
G (Generator, optional)
D (Discriminator, optional)
broadcast_weights (bool): Broadcast the weights from
node of rank 0 to all other ranks. Default
value is False.
"""
if self.rank is None:
return
for net in [G, D]:
if net is None:
continue
for p in net.parameters():
if p.grad is not None:
torch.distributed.all_reduce(p.grad, async_op=True)
if broadcast_weights:
torch.distributed.broadcast(p.data, src=0, async_op=True)
if G is not None:
if G.dlatent_avg is not None:
torch.distributed.broadcast(G.dlatent_avg, src=0, async_op=True)
if self.pl_avg is not None:
torch.distributed.broadcast(self.pl_avg, src=0, async_op=True)
if G is not None or D is not None:
torch.distributed.barrier(async_op=False)
def _backward(self, loss, opt, mul=1, subdivisions=None):
"""
Reduce loss by world size and subdivisions before
calling backward for the loss. Loss scaling is
performed when mixed precision training is
enabled.
Arguments:
loss (torch.Tensor)
opt (torch.optim.Optimizer)
mul (float): Loss weight. Default value is 1.
subdivisions (int, optional): The number of
subdivisions to divide by. If this is
not specified, the subdvisions from
the specified batch and device size
at construction is used.
Returns:
loss (torch.Tensor): The loss scaled by mul
and subdivisions but not by world size.
"""
if loss is None:
return 0
mul /= subdivisions or self.subdivisions
mul /= self.world_size or 1
if mul != 1:
loss *= mul
if self.half:
with amp.scale_loss(loss, opt) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
return loss * (self.world_size or 1)
def train(self, iterations, callbacks=None, verbose=True):
"""
Train the models for a specific number of iterations.
Arguments:
iterations (int): Number of iterations to train for.
callbacks (callable, list, optional): One
or more callbacks to call at the end of each
iteration. The function is given the total
number of batches that have been processed since
this trainer object was initialized (not reset
when loading a saved checkpoint).
Default value is None (unused).
verbose (bool): Write progress to stdout.
Default value is True.
"""
evaluated_metrics = {}
if self.rank:
verbose = False
if verbose:
progress = utils.ProgressWriter(iterations)
value_tracker = utils.ValueTracker()
for _ in range(iterations):
# Figure out if G and/or D be
# regularized this iteration
G_reg = self.G_reg is not None
if self.G_reg_interval and G_reg:
G_reg = self.seen % self.G_reg_interval == 0
D_reg = self.D_reg is not None
if self.D_reg_interval and D_reg:
D_reg = self.seen % self.D_reg_interval == 0
# -----| Train G |----- #
# Disable gradients for D while training G
self.D.requires_grad_(False)
for _ in range(self.G_iter):
self.G_opt.zero_grad()
G_loss = 0
for i in range(self.subdivisions):
latents, latent_labels = self.prior_generator(
multi_latent_prob=self.style_mix_prob)
loss, _ = self.G_loss(
G=self.G,
D=self.D,
latents=latents,
latent_labels=latent_labels
)
G_loss += self._backward(loss, self.G_opt)
if G_reg:
if self.G_reg_interval:
# For lazy regularization, even if the interval
# is set to 1, the optimization step is taken
# before the gradients of the regularization is gathered.
self._sync_distributed(G=self.G)
self.G_opt.step()
self.G_opt.zero_grad()
G_reg_loss = 0
# Pathreg is expensive to compute which
# is why G regularization has its own settings
# for subdivisions and batch size.
for i in range(self.G_reg_subdivisions):
latents, latent_labels = self.prior_generator(
batch_size=self.G_reg_device_batch_size,
multi_latent_prob=self.style_mix_prob
)
_, reg_loss = self.G_reg(
G=self.G,
latents=latents,
latent_labels=latent_labels
)
G_reg_loss += self._backward(
reg_loss,
self.G_opt, mul=self.G_reg_interval or 1,
subdivisions=self.G_reg_subdivisions
)
self._sync_distributed(G=self.G)
self.G_opt.step()
# Update moving average of weights after
# each G training subiteration
if self.Gs is not None:
self.Gs.update()
# Re-enable gradients for D
self.D.requires_grad_(True)
# -----| Train D |----- #
# Disable gradients for G while training D
self.G.requires_grad_(False)
for _ in range(self.D_iter):
self.D_opt.zero_grad()
D_loss = 0
for i in range(self.subdivisions):
latents, latent_labels = self.prior_generator(
multi_latent_prob=self.style_mix_prob)
reals, real_labels = self._get_batch()
loss, _ = self.D_loss(
G=self.G,
D=self.D,
latents=latents,
latent_labels=latent_labels,
reals=reals,
real_labels=real_labels
)
D_loss += self._backward(loss, self.D_opt)
D_loss += loss
if D_reg:
if self.D_reg_interval:
# For lazy regularization, even if the interval
# is set to 1, the optimization step is taken
# before the gradients of the regularization is gathered.
self._sync_distributed(D=self.D)
self.D_opt.step()
self.D_opt.zero_grad()
D_reg_loss = 0
for i in range(self.subdivisions):
latents, latent_labels = self.prior_generator(
multi_latent_prob=self.style_mix_prob)
reals, real_labels = self._get_batch()
_, reg_loss = self.D_reg(
G=self.G,
D=self.D,
latents=latents,
latent_labels=latent_labels,
reals=reals,
real_labels=real_labels
)
D_reg_loss += self._backward(
reg_loss, self.D_opt, mul=self.D_reg_interval or 1)
self._sync_distributed(D=self.D)
self.D_opt.step()
# Re-enable grads for G
self.G.requires_grad_(True)
if self.tb_writer is not None or verbose:
# In case verbose is true and tensorboard logging enabled
# we calculate grad norm here to only do it once as well
# as making sure we do it before any metrics that may
# possibly zero the grads.
G_grad_norm = utils.get_grad_norm_from_optimizer(self.G_opt)
D_grad_norm = utils.get_grad_norm_from_optimizer(self.D_opt)
for name, metric in self.metrics.items():
if not metric['interval'] or self.seen % metric['interval'] == 0:
evaluated_metrics[name] = metric['eval_fn']()
# Printing and logging
# Tensorboard logging
if self.tb_writer is not None:
self.tb_writer.add_scalar('Loss/G_loss', G_loss, self.seen)
if G_reg:
self.tb_writer.add_scalar('Loss/G_reg', G_reg_loss, self.seen)
self.tb_writer.add_scalar('Grad_norm/G_reg', G_grad_norm, self.seen)
self.tb_writer.add_scalar('Params/pl_avg', self.pl_avg, self.seen)
else:
self.tb_writer.add_scalar('Grad_norm/G_loss', G_grad_norm, self.seen)
self.tb_writer.add_scalar('Loss/D_loss', D_loss, self.seen)
if D_reg:
self.tb_writer.add_scalar('Loss/D_reg', D_reg_loss, self.seen)
self.tb_writer.add_scalar('Grad_norm/D_reg', D_grad_norm, self.seen)
else:
self.tb_writer.add_scalar('Grad_norm/D_loss', D_grad_norm, self.seen)
for name, value in evaluated_metrics.items():
self.tb_writer.add_scalar('Metrics/{}'.format(name), value, self.seen)
# Printing
if verbose:
value_tracker.add('seen', self.seen + 1, beta=0)
value_tracker.add('G_lr', self.G_opt.param_groups[0]['lr'], beta=0)
value_tracker.add('G_loss', G_loss)
if G_reg:
value_tracker.add('G_reg', G_reg_loss)
value_tracker.add('G_reg_grad_norm', G_grad_norm)
value_tracker.add('pl_avg', self.pl_avg, beta=0)
else:
value_tracker.add('G_loss_grad_norm', G_grad_norm)
value_tracker.add('D_lr', self.D_opt.param_groups[0]['lr'], beta=0)
value_tracker.add('D_loss', D_loss)
if D_reg:
value_tracker.add('D_reg', D_reg_loss)
value_tracker.add('D_reg_grad_norm', D_grad_norm)
else:
value_tracker.add('D_loss_grad_norm', D_grad_norm)
for name, value in evaluated_metrics.items():
value_tracker.add(name, value, beta=0)
progress.write(str(value_tracker))
# Callback
for callback in utils.to_list(callbacks) + self.callbacks:
callback(self.seen)
self.seen += 1
# Handle checkpointing
if not self.rank and self.checkpoint_dir and self.checkpoint_interval:
if self.seen % self.checkpoint_interval == 0:
checkpoint_path = os.path.join(
self.checkpoint_dir,
'{}_{}'.format(self.seen, time.strftime('%Y-%m-%d_%H-%M-%S'))
)
self.save_checkpoint(checkpoint_path)
if verbose:
progress.close()
def register_metric(self, name, eval_fn, interval):
"""
Add a metric. This will be evaluated every `interval`
training iteration. Used by tensorboard and progress
updates written to stdout while training.
Arguments:
name (str): A name for the metric. If a metric with
this name already exists it will be overwritten.
eval_fn (callable): A function that evaluates the metric
and returns a python number.
interval (int): The interval to evaluate at.
"""
self.metrics[name] = {'eval_fn': eval_fn, 'interval': interval}
def remove_metric(self, name):
"""
Remove a metric that was previously registered.
Arguments:
name (str): Name of the metric.
"""
if name in self.metrics:
del self.metrics[name]
else:
warnings.warn(
'Attempting to remove metric {} '.format(name) + \
'which does not exist.'
)
def generate_images(self,
num_images,
seed=None,
truncation_psi=None,
truncation_cutoff=None,
label=None,
pixel_min=-1,
pixel_max=1):
"""
Generate some images with the generator and transform them into PIL
images and return them as a list.
Arguments:
num_images (int): Number of images to generate.
seed (int, optional): The seed for the random generation
of input latent values.
truncation_psi (float): See stylegan2.model.Generator.set_truncation()
Default value is None.
truncation_cutoff (int): See stylegan2.model.Generator.set_truncation()
label (int, list, optional): Label to condition all generated images with
or multiple labels, one for each generated image.
pixel_min (float): The min value in the pixel range of the generator.
Default value is -1.
pixel_min (float): The max value in the pixel range of the generator.
Default value is 1.
Returns:
images (list): List of PIL images.
"""
if seed is None:
seed = int(10000 * time.time())
latents, latent_labels = self.prior_generator(num_images, seed=seed)
if label:
assert latent_labels is not None, 'Can not specify label when no labels ' + \
'are used by this model.'
label = utils.to_list(label)
assert all(isinstance(l, int) for l in label), '`label` can only consist of ' + \
'one or more python integers.'
assert len(label) == 1 or len(label) == num_images, '`label` can either ' + \
'specify one label to use for all images or a list of labels of the ' + \
'same length as number of images. Received {} labels '.format(
len(label)) + \
'but {} images are to be generated.'.format(num_images)
if len(label) == 1:
latent_labels.fill_(label[0])
else:
latent_labels = torch.tensor(label).to(latent_labels)
self.Gs.set_truncation(
truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff)
with torch.no_grad():
generated = self.Gs(latents=latents, labels=latent_labels)
assert generated.dim() - 2 == 2, 'Can only generate images when using a ' + \
'network built for 2-dimensional data.'
assert generated.dim() == 4, 'Only generators that produce 2d data ' + \
'can be used to generate images.'
return utils.tensor_to_PIL(generated, pixel_min=pixel_min, pixel_max=pixel_max)
def log_images_tensorboard(self, images, name, resize=256):
"""
Log a list of images to tensorboard by first turning
them into a grid. Can not be performed if rank > 0
or tensorboard_log_dir was not given at construction.
Arguments:
images (list): List of PIL images.
name (str): The name to log images for.
resize (int, tuple): The height and width to use for
each image in the grid. Default value is 256.
"""
assert self.tb_writer is not None, \
'No tensorboard log dir was specified ' + \
'when constructing this object.'
image = utils.stack_images_PIL(images, individual_img_size=resize)
image = torchvision.transforms.ToTensor()(image)
self.tb_writer.add_image(name, image, self.seen)
def add_tensorboard_image_logging(self,
name,
interval,
num_images,
resize=256,
seed=None,
truncation_psi=None,
truncation_cutoff=None,
label=None,
pixel_min=-1,
pixel_max=1):
"""
Set up tensorboard logging of generated images to be performed
at a certain training interval. If distributed training is set up
and this object does not have the rank 0, no logging will be performed
by this object.
All arguments except the ones mentioned below have their description
in the docstring of `generate_images()` and `log_images_tensorboard()`.
Arguments:
interval (int): The interval at which to log generated images.
"""
if self.rank:
return
def callback(seen):
if seen % interval == 0:
images = self.generate_images(
num_images=num_images,
seed=seed,
truncation_psi=truncation_psi,
truncation_cutoff=truncation_cutoff,
label=label,
pixel_min=pixel_min,
pixel_max=pixel_max
)
self.log_images_tensorboard(
images=images,
name=name,
resize=resize
)
self.callbacks.append(callback)
def save_checkpoint(self, dir_path):
"""
Save the current state of this trainer as a checkpoint.
NOTE: The dataset can not be serialized and saved so this
has to be reconstructed and given when loading this checkpoint.
Arguments:
dir_path (str): The checkpoint path.
"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
else:
assert os.path.isdir(dir_path), '`dir_path` points to a file.'
kwargs = self.kwargs.copy()
# Update arguments that may have changed since construction
kwargs.update(
seen=self.seen,
pl_avg=float(self.pl_avg)
)
with open(os.path.join(dir_path, 'kwargs.json'), 'w') as fp:
json.dump(kwargs, fp)
torch.save(self.G_opt.state_dict(), os.path.join(dir_path, 'G_opt.pth'))
torch.save(self.D_opt.state_dict(), os.path.join(dir_path, 'D_opt.pth'))
models.save(self.G, os.path.join(dir_path, 'G.pth'))
models.save(self.D, os.path.join(dir_path, 'D.pth'))
if self.Gs is not None:
models.save(self.Gs, os.path.join(dir_path, 'Gs.pth'))
@classmethod
def load_checkpoint(cls, checkpoint_path, dataset, **kwargs):
"""
Load a checkpoint into a new Trainer object and return that
object. If the path specified points at a folder containing
multiple checkpoints, the latest one will be used.
The dataset can not be serialized and saved so it is required
to be explicitly given when loading a checkpoint.
Arguments:
checkpoint_path (str): Path to a checkpoint or to a folder
containing one or more checkpoints.
dataset (indexable): The dataset to use.
**kwargs (keyword arguments): Any other arguments to override
the ones saved in the checkpoint. Useful for when training
is continued on a different device or when distributed training
is changed.
"""
checkpoint_path = _find_checkpoint(checkpoint_path)
_is_checkpoint(checkpoint_path, enforce=True)
with open(os.path.join(checkpoint_path, 'kwargs.json'), 'r') as fp:
loaded_kwargs = json.load(fp)
loaded_kwargs.update(**kwargs)
device = torch.device('cpu')
if isinstance(loaded_kwargs['device'], (list, tuple)):
device = torch.device(loaded_kwargs['device'][0])
for name in ['G', 'D']:
fpath = os.path.join(checkpoint_path, name + '.pth')
loaded_kwargs[name] = models.load(fpath, map_location=device)
if os.path.exists(os.path.join(checkpoint_path, 'Gs.pth')):
loaded_kwargs['Gs'] = models.load(
os.path.join(checkpoint_path, 'Gs.pth'),
map_location=device if loaded_kwargs['Gs_device'] is None \
else torch.device(loaded_kwargs['Gs_device'])
)
obj = cls(dataset=dataset, **loaded_kwargs)
for name in ['G_opt', 'D_opt']:
fpath = os.path.join(checkpoint_path, name + '.pth')
state_dict = torch.load(fpath, map_location=torch.device(loaded_kwargs['device']))
getattr(obj, name).load_state_dict(state_dict)
return obj
# ----------------------------------------------------------------------------
# Checkpoint helper functions
def _is_checkpoint(dir_path, enforce=False):
if not dir_path:
if enforce:
raise ValueError('Not a checkpoint.')
return False
if not os.path.exists(dir_path):
if enforce:
raise FileNotFoundError('{} could not be found.'.format(dir_path))
return False
if not os.path.isdir(dir_path):
if enforce:
raise NotADirectoryError('{} is not a directory.'.format(dir_path))
return False
fnames = os.listdir(dir_path)
for fname in ['G.pth', 'D.pth', 'G_opt.pth', 'D_opt.pth', 'kwargs.json']:
if fname not in fnames:
if enforce:
raise FileNotFoundError(
'Could not find {} in {}.'.format(fname, dir_path))
return False
return True
def _find_checkpoint(dir_path):
if not dir_path:
return None
if not os.path.exists(dir_path) or not os.path.isdir(dir_path):
return None
if _is_checkpoint(dir_path):
return dir_path
checkpoint_names = []
for name in os.listdir(dir_path):
if _is_checkpoint(os.path.join(dir_path, name)):
checkpoint_names.append(name)
if not checkpoint_names:
return None
def get_iteration(name):
return int(name.split('_')[0])
def get_timestamp(name):
return '_'.join(name.split('_')[1:])
# Python sort is stable, meaning that this sort operation
# will guarantee that the order of values after the first
# sort will stay for a set of values that have the same
# key value.
checkpoint_names = sorted(
sorted(checkpoint_names, key=get_iteration), key=get_timestamp)
return os.path.join(dir_path, checkpoint_names[-1])
# ----------------------------------------------------------------------------
# Reg and loss function fetchers
def build_opt(net, opt_class, opt_kwargs, reg, reg_interval):
opt_kwargs['lr'] = opt_kwargs.get('lr', 1e-3)
if reg not in [None, False] and reg_interval:
mb_ratio = reg_interval / (reg_interval + 1.)
opt_kwargs['lr'] *= mb_ratio
if 'momentum' in opt_kwargs:
opt_kwargs['momentum'] = opt_kwargs['momentum'] ** mb_ratio
if 'betas' in opt_kwargs:
betas = opt_kwargs['betas']
opt_kwargs['betas'] = (betas[0] ** mb_ratio, betas[1] ** mb_ratio)
if isinstance(opt_class, str):
opt_class = getattr(torch.optim, opt_class.title())
return opt_class(net.parameters(), **opt_kwargs)
# ----------------------------------------------------------------------------
# Reg and loss function fetchers
_LOSS_FNS = {
'G': {
'logistic': loss_fns.G_logistic,
'logistic_ns': loss_fns.G_logistic_ns,
'wgan': loss_fns.G_wgan
},
'D': {
'logistic': loss_fns.D_logistic,
'wgan': loss_fns.D_wgan
}
}
def get_loss_fn(net, loss):
if callable(loss):
return loss
net = net.upper()
assert net in ['G', 'D'], 'Unknown net type {}'.format(net)
loss = loss.lower()
for name in _LOSS_FNS[net].keys():
if loss == name:
return _LOSS_FNS[net][name]
raise ValueError('Unknow {} loss {}'.format(net, loss))
_REG_FNS = {
'G': {
'pathreg': loss_fns.G_pathreg
},
'D': {
'r1': loss_fns.D_r1,
'r2': loss_fns.D_r2,
'gp': loss_fns.D_gp,
}
}
def get_reg_fn(net, reg, **kwargs):
if reg is None:
return None
if callable(reg):
functools.partial(reg, **kwargs)
net = net.upper()
assert net in ['G', 'D'], 'Unknown net type {}'.format(net)
reg = reg.lower()
gamma = None
for name in _REG_FNS[net].keys():
if reg.startswith(name):
gamma_chars = [c for c in reg.replace(name, '') if c.isdigit() or c == '.']
if gamma_chars:
kwargs.update(gamma=float(''.join(gamma_chars)))
return functools.partial(_REG_FNS[net][name], **kwargs)
raise ValueError('Unknow regularizer {}'.format(reg))
```
#### File: johndpope/GenEdi/test.py
```python
import matplotlib
import matplotlib.pyplot as plt
from torchsummaryX import summary
import pylab
from stylegan2 import utils
import numpy as np
import torch
import stylegan2
import pickle
import os
# matplotlib.use('TkAgg')
if __name__ == "__main__":
# path to model code and weight
plt.ion()
path_model = './Gs.pth'
device = torch.device('cpu')
latens = torch.randn([3, 512])
G = stylegan2.models.load(path_model)
assert isinstance(G, stylegan2.models.Generator), 'Model type has to be ' + \
'stylegan2.models.Generator. Found {}.'.format(type(G))
G.to(device)
latent_size, label_size = G.latent_size, G.label_size
summary(G, torch.zeros(1, 13, 512))
# for parameters in G.parameters():
# print(parameters)
# for name, parameters in G.named_parameters():
# print(name, ':', parameters.size())
# a = G.G_mapping(latens)
##
# # Generate random latent variables
# latents_ = []
# rnd = np.random.RandomState(seed=6600)
# latents_.append(torch.from_numpy(rnd.randn(latent_size)))
# #latents = torch.from_numpy(np.random.randn(1, latent_size)).to(device=device, dtype=torch.float32)
# # Generate dummy labels
# latents = torch.stack(latents_, dim=0).to(device=device, dtype=torch.float32)
# labels = None
#
#
# def gen_image(latents):
# """
# tool funciton to generate image from latent variables
# :param latents: latent variables
# :return:
# """
# with torch.no_grad():
# images = G(latents, labels=labels)
# return np.clip((images[0].permute(1,2,0).numpy() + 1.0)/2.0, a_min=0.0, a_max=1.0)
#
#
# img_cur = gen_image(latents)
# # """ plot figure with GUI """
# h_fig = plt.figure(figsize=[30, 30])
# h_ax = plt.axes([0.0, 0.0, 0.5, 1.0])
# h_ax.axis('off')
# h_img = plt.imshow(img_cur)
# plt.show()
print()
``` |
{
"source": "johndpope/poor-trader-py",
"score": 3
} |
#### File: poor-trader-py/poor_trader/chart.py
```python
import traceback
import pandas as pd
import numpy as np
from matplotlib import pylab as plt
plt.style.use('ggplot')
def generate_equity_chart(df_equity_curve, fpath, title='Equity Curve'):
df_equity_curve = df_equity_curve.reset_index()
xticks = np.linspace(0, len(df_equity_curve.index.values) - 1, 20 if len(df_equity_curve.index.values) >= 20 else (len(df_equity_curve.index.values)))
xlabels = [pd.to_datetime(df_equity_curve.index.values[int(index)]).strftime('%Y-%m-%d') for index in
xticks]
fig = plt.figure(figsize=(30, 30))
ax = plt.subplot(311)
ax.set_title('Equity Curve : cash={}, equity={}'.format(df_equity_curve.Cash.values[-1], df_equity_curve.Equity.values[-1]), fontsize=18)
ax.bar(df_equity_curve.index, df_equity_curve.Equity, width=1, color='limegreen')
ax.bar(df_equity_curve.index, df_equity_curve.Cash, width=1, color='green')
ax.set_xticks(xticks)
ax.set_xticklabels(xlabels, fontsize=12)
plt.xticks(rotation=90)
ax2 = plt.subplot(312)
ax2.set_title('Drawdown : max drawdown={}'.format(df_equity_curve.Drawdown.min()), fontsize=18)
ax2.bar(df_equity_curve.index, df_equity_curve.Drawdown, width=1, color='red')
ax2.set_xticks(xticks)
ax2.set_xticklabels(xlabels, fontsize=12)
plt.xticks(rotation=90)
ax3 = plt.subplot(313)
ax3.set_title('Drawdown % : max drawdown %={}%'.format(df_equity_curve.DrawdownPercent.min()), fontsize=18)
ax3.bar(df_equity_curve.index, df_equity_curve.DrawdownPercent, width=1, color='red')
ax3.set_yticklabels(['{:3.2f}%'.format(y) for y in ax3.get_yticks()])
ax3.set_xticks(xticks)
ax3.set_xticklabels(xlabels, fontsize=12)
plt.xticks(rotation=90)
if fpath:
try:
plt.savefig(fpath)
except:
print('Error charting {}'.format(title))
print(traceback.print_exc())
else:
plt.show()
plt.clf()
plt.close(fig)
```
#### File: poor_trader/sample/trading_systems.py
```python
import os
import pandas as pd
from poor_trader import config
from poor_trader import trading
from poor_trader import systems
class CombinedIndicators(trading.TradingSystem):
def __init__(self, portfolio, systems_method_list, name='CombinedIndicators'):
super(CombinedIndicators, self).__init__(name=name)
self.portfolio = portfolio
self.market = self.portfolio.market
self.systems_method_list = systems_method_list
self.fpath = config.TRADING_SYSTEMS_PATH / '{}.pkl'.format(self.name)
self.df_indicators = pd.DataFrame()
self.init_indicators()
def init_indicators(self):
if os.path.exists(self.fpath):
self.df_indicators = pd.read_pickle(self.fpath)
else:
symbols = self.market.symbols
df_group_quotes = self.market.historical_data
df = pd.DataFrame()
for fname, df_positions in self.systems_method_list:
df_positions.columns = ['{}_{}'.format(col, fname) for col in df_positions.columns]
df = df.join(df_positions, how='outer')
self.df_indicators = df.copy()
self.df_indicators.to_pickle(self.fpath)
def get_indicators(self, trading_period, symbol, direction):
df = self.df_indicators.filter(regex='^{}_'.format(symbol))
df.columns = [col.replace('{}_'.format(symbol), '') for col in df.columns]
positions = df.loc[:trading_period].dropna().shift(1).iloc[-1]
df = pd.DataFrame()
df['Position'] = positions
direction_str = 'LONG' if direction == trading.Direction.LONG else 'SHORT'
return df[df['Position'] == direction_str]
def get_indicator_name(self, trading_period, symbol, direction):
return '_'.join(self.get_indicators(trading_period, symbol, direction).index.values)
def get_close_indicator_name(self, trading_period, symbol, open_direction):
close_direction = trading.Direction.LONG if open_direction == trading.Direction.SHORT else trading.Direction.SHORT
return self.get_indicator_name(trading_period, symbol, close_direction)
def is_long(self, trading_period, symbol):
open_position = self.portfolio.get_open_position(symbol)
if open_position.empty:
return len(self.get_indicators(trading_period, symbol, trading.Direction.LONG).index.values) > 0
return False
def is_short(self, trading_period, symbol):
open_position = self.portfolio.get_open_position(symbol)
if open_position.empty:
return len(self.get_indicators(trading_period, symbol, trading.Direction.SHORT).index.values) > 0
return False
def is_close(self, trading_period, symbol, open_trades):
short_indicators = self.get_indicator_name(trading_period, symbol, trading.Direction.SHORT)
if len(open_trades.index.values) > 1:
print(open_trades)
raise NotImplementedError
for index in open_trades.index.values:
open_indicators = open_trades.loc[index]['Indicator'].split('_')
close_indicators = short_indicators.split('_')
remaining_indicators = [_ for _ in open_indicators if _ not in close_indicators]
return len(remaining_indicators) <= 0
class Turtle(CombinedIndicators):
def __init__(self, portfolio, name='Turtle'):
symbols = portfolio.market.symbols
df_group_quotes = portfolio.df_group_quotes
super(Turtle, self).__init__(portfolio,
[systems.run_atr_channel_breakout(symbols, df_group_quotes),
systems.run_dcsma(symbols, df_group_quotes),
systems.run_slsma(symbols, df_group_quotes)],
name=name)
```
#### File: poor-trader-py/poor_trader/systems.py
```python
import os
import pandas as pd
import numpy as np
from path import Path
from poor_trader import indicators
from poor_trader.config import SYSTEMS_PATH
def _trim_quotes(symbol, df_group_quotes):
df_quotes = df_group_quotes.filter(regex='^{}_'.format(symbol))
df_quotes.columns = [_.replace(symbol + '_', '') for _ in df_quotes.columns]
df_quotes = df_quotes.loc[df_quotes['Date'].dropna().index]
return df_quotes
def run_atr_channel_breakout(symbols, df_group_quotes, prefix='ATRChannel', top=7, bottom=3, sma=120):
fname = '{}{}|{}|{}'.format(prefix, top, bottom, sma)
fpath = SYSTEMS_PATH / '{}.pkl'.format(fname)
if os.path.exists(fpath):
return fname, pd.read_pickle(fpath)
else:
df_positions = pd.DataFrame()
for symbol in symbols:
print('Running', symbol)
df_quotes = _trim_quotes(symbol, df_group_quotes)
df_atr_channel = indicators.atr_channel(df_quotes, top=top, bottom=bottom, sma=sma, symbol=symbol)
df = pd.DataFrame(index=df_quotes.index)
long_condition = np.logical_and(df_quotes.Close > df_atr_channel.top, df_quotes.Close.shift(1) < df_atr_channel.top.shift(1))
short_condition = np.logical_or(df_quotes.Close < df_atr_channel.bottom, df_quotes.Close < df_atr_channel.mid)
df[symbol] = np.where(long_condition, 'LONG', np.where(short_condition, 'SHORT', 'HOLD'))
df_positions = df_positions.join(df, how='outer')
df_positions.to_pickle(fpath)
return fname, df_positions
def run_atr_channel_breakout_sma(symbols, df_group_quotes, prefix='ATRChannelSMA', top=7, bottom=3, sma=120, fast=100, slow=150):
fname = '{}{}|{}|{}|{}|{}'.format(prefix, top, bottom, sma, fast, slow)
fpath = SYSTEMS_PATH / '{}.pkl'.format(fname)
if os.path.exists(fpath):
return fname, pd.read_pickle(fpath)
else:
df_positions = pd.DataFrame()
for symbol in symbols:
print('Running', symbol)
df_quotes = _trim_quotes(symbol, df_group_quotes)
df_atr_channel = indicators.atr_channel(df_quotes, top=top, bottom=bottom, sma=sma, symbol=symbol)
df_sma = indicators.SMA_cross(df_quotes, fast=fast, slow=slow, symbol=symbol)
df = pd.DataFrame(index=df_quotes.index)
long_condition = np.logical_and(np.logical_and(df_sma.FastSMA > df_sma.SlowSMA, df_quotes.Close > df_sma.FastSMA),
np.logical_and(df_quotes.Close > df_atr_channel.top, df_quotes.Close.shift(1) < df_atr_channel.top.shift(1)))
short_condition = np.logical_or(df_quotes.Close < df_atr_channel.bottom, df_quotes.Close < df_atr_channel.mid)
df[symbol] = np.where(long_condition, 'LONG', np.where(short_condition, 'SHORT', 'HOLD'))
df_positions = df_positions.join(df, how='outer')
df_positions.to_pickle(fpath)
return fname, df_positions
def run_dcsma(symbols, df_group_quotes, prefix='DonchianSMA', high=50, low=50, fast=100, slow=150):
fname = '{}{}|{}|{}|{}'.format(prefix, high, low, fast, slow)
fpath = SYSTEMS_PATH / '{}.pkl'.format(fname)
if os.path.exists(fpath):
return fname, pd.read_pickle(fpath)
else:
df_positions = pd.DataFrame()
for symbol in symbols:
print('Running', symbol)
df_quotes = _trim_quotes(symbol, df_group_quotes)
df_donchian = indicators.donchian_channel(df_quotes, high=high, low=low, symbol=symbol)
df_sma = indicators.SMA_cross(df_quotes, fast=fast, slow=slow, symbol=symbol)
df = pd.DataFrame(index=df_quotes.index)
long_condition = np.logical_and(np.logical_and(df_sma.FastSMA > df_sma.SlowSMA, df_quotes.Close > df_sma.FastSMA),
np.logical_and(df_donchian.high.shift(1) < df_donchian.high, df_donchian.low.shift(1) <= df_donchian.low))
short_condition = np.logical_and(df_donchian.low.shift(1) > df_donchian.low, df_donchian.high.shift(1) >= df_donchian.high)
df[symbol] = np.where(long_condition, 'LONG', np.where(short_condition, 'SHORT', 'HOLD'))
df_positions = df_positions.join(df, how='outer')
df_positions.to_pickle(fpath)
return fname, df_positions
def run_slsma(symbols, df_group_quotes, prefix='SLSMA', st_fast=5, st_slow=10, s_fast=40, s_slow=60, l_fast=100, l_slow=120):
fname = '{}{}|{}|{}|{}|{}|{}'.format(prefix, st_fast, st_slow, s_fast, s_slow, l_fast, l_slow)
fpath = SYSTEMS_PATH / '{}.pkl'.format(fname)
if os.path.exists(fpath):
return fname, pd.read_pickle(fpath)
else:
df_positions = pd.DataFrame()
for symbol in symbols:
print('Running', symbol)
df_quotes = _trim_quotes(symbol, df_group_quotes)
shortest_sma = indicators.SMA_cross(df_quotes, fast=st_fast, slow=st_slow, symbol=symbol)
short_sma = indicators.SMA_cross(df_quotes, fast=s_fast, slow=s_slow, symbol=symbol)
long_sma = indicators.SMA_cross(df_quotes, fast=l_fast, slow=l_slow, symbol=symbol)
df = pd.DataFrame(index=df_quotes.index)
long_condition = np.logical_and(np.logical_and(long_sma.FastSMA > long_sma.SlowSMA, short_sma.FastSMA > long_sma.FastSMA),
np.logical_or(long_sma.FastCrossoverSlow == 1,
np.logical_or(short_sma.FastCrossoverSlow == 1,
np.logical_and(short_sma.FastSMA > short_sma.SlowSMA,
shortest_sma.FastCrossoverSlow == 1))))
short_condition = short_sma.FastSMA < long_sma.FastSMA
df[symbol] = np.where(long_condition, 'LONG', np.where(short_condition, 'SHORT', 'HOLD'))
df_positions = df_positions.join(df, how='outer')
df_positions.to_pickle(fpath)
return fname, df_positions
```
#### File: poor-trader-py/poor_trader/utils.py
```python
import os
import re
import numpy as np
import pandas as pd
import traceback
def makedirs(path):
if not os.path.exists(path):
print('Creating directory', path)
os.makedirs(path)
def load_trades(fpath=None):
columns = ['StartDate', 'EndDate', 'Symbol', 'BuyPrice', 'SellPrice',
'Shares', 'BuyValue', 'SellValue', 'TotalRisk', 'PnL', 'RMultiple',
'LastRecordDate', 'LastPrice', 'LastValue', 'LastPnL', 'LastRMultiple', 'OpenIndicator']
if fpath is None or not os.path.exists(fpath):
return pd.DataFrame(columns=columns)
else:
try:
df = pd.read_csv(fpath, index_col=0)
df['StartDate'] = pd.to_datetime(df['StartDate'])
df['EndDate'] = pd.to_datetime(df['EndDate'])
if 'CloseIndicator' not in df.columns:
df['CloseIndicator'] = ''
return df
except:
print(traceback.print_exc())
return pd.DataFrame(columns=columns)
def load_equity_table(fpath):
if os.path.exists(fpath):
df = pd.read_csv(fpath, index_col=0, parse_dates=True)
return df
def load_quotes(symbol):
df = price_loader.load_price(symbol)
f_boardlot = lambda price : utils.boardlot(price)
df['BoardLot'] = df.Close.map(f_boardlot)
df = df.drop_duplicates(['Date'], keep='first')
return df
def roundn(n, places=4):
try:
return float('%.{}f'.format(places) % n)
except:
return n
def _round(nseries, places=4):
try:
return pd.Series([roundn(n, places) for n in nseries], nseries.index)
except:
return nseries
def round_df(df, places=4):
return df.apply(lambda x : _round(x, places))
def rindex(mylist, myvalue):
return len(mylist) - mylist[::-1].index(myvalue) - 1
def historical_volatility(df_quotes):
logreturns = np.log(df_quotes.Close / df_quotes.Close.shift(1))
return np.round(np.sqrt(252 * logreturns.var()), 1)
def quotes_range(df_quotes):
if len(df_quotes.index.values) == 0:
return 'None'
start = df_quotes.index.values[0]
end = df_quotes.index.values[-1]
try:
start = pd.to_datetime(start)
end = pd.to_datetime(end)
dateformat = '%Y%m%d'
return '{}_to_{}'.format(start.strftime(dateformat), end.strftime(dateformat))
except:
return '{}_to_{}'.format(start, end)
``` |
{
"source": "johndpope/PTI",
"score": 2
} |
#### File: johndpope/PTI/StyleCLIP_Latent_Optimization.py
```python
import torch
import clip
from stylegan2.model import Generator
from argparse import Namespace
import os
import math
import torchvision
from torch import optim
class CLIPLoss(torch.nn.Module):
def __init__(self):
super(CLIPLoss, self).__init__()
self.model, self.preprocess = clip.load("ViT-B/32", device="cuda")
self.upsample = torch.nn.Upsample(scale_factor=7)
self.avg_pool = torch.nn.AvgPool2d(kernel_size=32)
def forward(self, image, text):
image = self.avg_pool(self.upsample(image))
similarity = 1 - self.model(image, text)[0] / 100
return similarity
g_ema = Generator(1024, 512, 8)
g_ema.load_state_dict(torch.load('stylegan2-ffhq-config-f.pt')["g_ema"], strict=False)
g_ema.eval()
g_ema = g_ema.cuda()
args = Namespace()
args.description = 'A really sad face'
args.lr_rampup = 0.05
args.lr = 0.1
args.step = 150
args.l2_lambda = 0.005 # The weight for similarity to the original image.
args.save_intermediate_image_every = 1
args.results_dir = 'results'
# * Optimize a latent vector and get the result.
# The learning rate adjustment function.
def get_lr(t, initial_lr, rampdown=0.50, rampup=0.05):
lr_ramp = min(1, (1 - t) / rampdown)
lr_ramp = 0.5 - 0.5 * math.cos(lr_ramp * math.pi)
lr_ramp = lr_ramp * min(1, t / rampup)
return initial_lr * lr_ramp
text_inputs = torch.cat([clip.tokenize(args.description)]).cuda()
os.makedirs(args.results_dir, exist_ok=True)
# Initialize the latent vector to be updated.
latent = latent_code_init.detach().clone()
latent.requires_grad = True
clip_loss = CLIPLoss()
optimizer = optim.Adam([latent], lr=args.lr)
for i in range(args.step):
# Adjust the learning rate.
t = i / args.step
lr = get_lr(t, args.lr)
optimizer.param_groups[0]["lr"] = lr
# Generate an image using the latent vector.
img_gen, _ = g_ema([latent], input_is_latent=True, randomize_noise=False)
# Calculate the loss value.
c_loss = clip_loss(img_gen, text_inputs)
l2_loss = ((latent_code_init - latent) ** 2).sum()
loss = c_loss + args.l2_lambda * l2_loss
# Get gradient and update the latent vector.
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Log the current state.
print(f"lr: {lr}, loss: {loss.item():.4f}")
if args.save_intermediate_image_every > 0 and i % args.save_intermediate_image_every == 0:
with torch.no_grad():
img_gen, _ = g_ema([latent], input_is_latent=True, randomize_noise=False)
torchvision.utils.save_image(img_gen, f"results/{str(i).zfill(5)}.png", normalize=True, range=(-1, 1))
with torch.no_grad():
img_orig, _ = g_ema([latent_code_init], input_is_latent=True, randomize_noise=False)
```
#### File: PTI/utils/align_data.py
```python
from configs import paths_config
import dlib
import glob
import os
from tqdm import tqdm
from utils.alignment import align_face
def pre_process_images(raw_images_path):
current_directory = os.getcwd()
IMAGE_SIZE = 1024
predictor = dlib.shape_predictor(paths_config.dlib)
os.chdir(raw_images_path)
images_names = glob.glob(f'*')
aligned_images = []
for image_name in tqdm(images_names):
try:
aligned_image = align_face(filepath=f'{raw_images_path}/{image_name}',
predictor=predictor, output_size=IMAGE_SIZE)
aligned_images.append(aligned_image)
except Exception as e:
print(e)
os.makedirs(paths_config.input_data_path, exist_ok=True)
for image, name in zip(aligned_images, images_names):
real_name = name.split('.')[0]
image.save(f'{paths_config.input_data_path}/{real_name}.jpeg')
os.chdir(current_directory)
if __name__ == "__main__":
pre_process_images('')
``` |
{
"source": "johndpope/python-imgcat",
"score": 3
} |
#### File: python-imgcat/imgcat/imgcat.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import sys
import os
import struct
import io
import subprocess
import contextlib
IS_PY_2 = (sys.version_info[0] <= 2)
IS_PY_3 = (not IS_PY_2)
if IS_PY_2:
FileNotFoundError = IOError # pylint: disable=redefined-builtin
from urllib import urlopen # type: ignore # pylint: disable=no-name-in-module
else: # PY3
from urllib.request import urlopen
def get_image_shape(buf):
'''
Extracts image shape as 2-tuple (width, height) from the content buffer.
Supports GIF, PNG and other image types (e.g. JPEG) if PIL/Pillow is installed.
Returns (None, None) if it can't be identified.
'''
def _unpack(fmt, buffer, mode='Image'):
try:
return struct.unpack(fmt, buffer)
except struct.error:
raise ValueError("Invalid {} file".format(mode))
# TODO: handle 'stream-like' data efficiently, not storing all the content into memory
L = len(buf)
if L >= 10 and buf[:6] in (b'GIF87a', b'GIF89a'):
return _unpack("<hh", buf[6:10], mode='GIF')
elif L >= 24 and buf.startswith(b'\211PNG\r\n\032\n') and buf[12:16] == b'IHDR':
return _unpack(">LL", buf[16:24], mode='PNG')
elif L >= 16 and buf.startswith(b'\211PNG\r\n\032\n'):
return _unpack(">LL", buf[8:16], mode='PNG')
else:
# everything else: get width/height from PIL
# TODO: it might be inefficient to write again the memory-loaded content to buffer...
b = io.BytesIO()
b.write(buf)
try:
from PIL import Image
im = Image.open(b)
return im.width, im.height
except (IOError, OSError) as ex:
# PIL.Image.open throws an error -- probably invalid byte input are given
sys.stderr.write("Warning: PIL cannot identify image; this may not be an image file" + "\n")
except ImportError:
# PIL not available
sys.stderr.write("Warning: cannot determine the image size; please install Pillow" + "\n")
sys.stderr.flush()
finally:
b.close()
return None, None
def _isinstance(obj, module, clsname):
"""A helper that works like isinstance(obj, module:clsname), but even when
the module hasn't been imported or the type is not importable."""
if module not in sys.modules:
return False
try:
clstype = getattr(sys.modules[module], clsname)
return isinstance(obj, clstype)
except AttributeError:
return False
def to_content_buf(data):
# TODO: handle 'stream-like' data efficiently, rather than storing into RAM
if isinstance(data, bytes):
return data
elif isinstance(data, io.BufferedReader) or \
(IS_PY_2 and isinstance(data, file)): # pylint: disable=undefined-variable
buf = data
return buf.read()
elif isinstance(data, io.TextIOWrapper):
return data.buffer.read()
elif _isinstance(data, 'numpy', 'ndarray'):
# numpy ndarray: convert to png
im = data
if len(im.shape) == 2:
mode = 'L' # 8-bit pixels, grayscale
im = im.astype(sys.modules['numpy'].uint8)
elif len(im.shape) == 3 and im.shape[2] in (3, 4):
mode = None # RGB/RGBA
if im.dtype.kind == 'f':
im = (im * 255).astype('uint8')
else:
raise ValueError("Expected a 3D ndarray (RGB/RGBA image) or 2D (grayscale image), "
"but given shape: {}".format(im.shape))
try:
from PIL import Image
except ImportError as e:
raise ImportError(e.msg +
"\nTo draw numpy arrays, we require Pillow. " +
"(pip install Pillow)") # TODO; reraise
with io.BytesIO() as buf:
# mode: https://pillow.readthedocs.io/en/4.2.x/handbook/concepts.html#concept-modes
Image.fromarray(im, mode=mode).save(buf, format='png')
return buf.getvalue()
elif _isinstance(data, 'torch', 'Tensor'):
# pytorch tensor: convert to png
im = data
try:
from torchvision import transforms
except ImportError as e:
raise ImportError(e.msg +
"\nTo draw torch tensor, we require torchvision. " +
"(pip install torchvision)")
with io.BytesIO() as buf:
transforms.ToPILImage()(im).save(buf, format='png')
return buf.getvalue()
elif _isinstance(data, 'tensorflow.python.framework.ops', 'EagerTensor'):
im = data
return to_content_buf(im.numpy())
elif _isinstance(data, 'PIL.Image', 'Image'):
# PIL/Pillow images
img = data
with io.BytesIO() as buf:
img.save(buf, format='png')
return buf.getvalue()
elif _isinstance(data, 'matplotlib.figure', 'Figure'):
# matplotlib figures
fig = data
if fig.canvas is None:
from matplotlib.backends.backend_agg import FigureCanvasAgg
FigureCanvasAgg(fig)
with io.BytesIO() as buf:
fig.savefig(buf)
return buf.getvalue()
else:
raise TypeError("Unsupported type : {}".format(type(data)))
def get_tty_size():
with open('/dev/tty') as tty:
rows, columns = subprocess.check_output(['stty', 'size'], stdin=tty).split()
return int(rows), int(columns)
def imgcat(data, filename=None,
width=None, height=None, preserve_aspect_ratio=True,
pixels_per_line=24,
fp=None):
'''
Print image on terminal (iTerm2).
Follows the file-transfer protocol of iTerm2 described at
https://www.iterm2.com/documentation-images.html.
Args:
data: the content of image in buffer interface, numpy array, etc.
width: the width for displaying image, in number of characters (columns)
height: the height for displaying image, in number of lines (rows)
fp: The buffer to write to, defaults sys.stdout
'''
if fp is None:
fp = sys.stdout if IS_PY_2 \
else sys.stdout.buffer # for stdout, use buffer interface (py3)
buf = to_content_buf(data)
if len(buf) == 0:
raise ValueError("Empty buffer")
if height is None:
im_width, im_height = get_image_shape(buf)
if im_height:
assert pixels_per_line > 0
height = (im_height + (pixels_per_line - 1)) // pixels_per_line
# automatically limit height to the current tty,
# otherwise the image will be just erased
try:
tty_height, _ = get_tty_size()
height = max(1, min(height, tty_height - 9))
except OSError:
# may not be a terminal
pass
else:
# image height unavailable, fallback?
height = 10
from . import iterm2
iterm2._write_image(buf, fp,
filename=filename, width=width, height=height,
preserve_aspect_ratio=preserve_aspect_ratio)
def main():
import argparse
try:
from imgcat import __version__
except ImportError:
__version__ = 'N/A'
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('input', nargs='*', type=str,
help='Path to the images.')
parser.add_argument('--height', default=None, type=int,
help='The number of rows (in terminal) for displaying images.')
parser.add_argument('--width', default=None, type=int,
help='The number of columns (in terminal) for displaying images.')
parser.add_argument('-v', '--version', action='version',
version='python-imgcat %s' % __version__)
args = parser.parse_args()
kwargs = dict()
if args.height: kwargs['height'] = args.height
if args.width: kwargs['width'] = args.width
# read from stdin?
if not sys.stdin.isatty():
if not args.input or list(args.input) == ['-']:
stdin = sys.stdin if IS_PY_2 else sys.stdin.buffer
imgcat(to_content_buf(stdin), **kwargs)
return 0
# imgcat from arguments
for fname in args.input:
# filename: open local file or download from web
try:
if fname.startswith('http://') or fname.startswith('https://'):
with contextlib.closing(urlopen(fname)) as fp:
buf = fp.read() # pylint: disable=no-member
else:
with io.open(fname, 'rb') as fp:
buf = fp.read()
except IOError as e:
sys.stderr.write(str(e))
sys.stderr.write('\n')
return (e.errno or 1)
imgcat(buf, filename=os.path.basename(fname), **kwargs)
if not args.input:
parser.print_help()
return 0
if __name__ == '__main__':
sys.exit(main())
``` |
{
"source": "johndpope/Singing-Voice-Conversion-with-conditional-VAW-GAN",
"score": 2
} |
#### File: Singing-Voice-Conversion-with-conditional-VAW-GAN/codes/main-vawgan.py
```python
import json
import os
from importlib import import_module
import numpy as np
import tensorflow as tf
from analyzer import Tanhize, read
from util.wrapper import validate_log_dirs
args = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
'corpus_name', 'singing_vc', 'Corpus name')
tf.app.flags.DEFINE_string(
'logdir_root', None, 'root of log dir')
tf.app.flags.DEFINE_string(
'logdir', None, 'log dir')
tf.app.flags.DEFINE_string(
'restore_from', None, 'restore from dir (not from *.ckpt)')
tf.app.flags.DEFINE_string('gpu_cfg', None, 'GPU configuration')
tf.app.flags.DEFINE_integer('summary_freq', 1000, 'Update summary')
tf.app.flags.DEFINE_string(
'ckpt', None, 'specify the ckpt in restore_from (if there are multiple ckpts)') # TODO
tf.app.flags.DEFINE_string(
'architecture', 'architecture-vawgan-vcc2016.json', 'network architecture')
tf.app.flags.DEFINE_string('model_module', 'model.vawgan', 'Model module')
tf.app.flags.DEFINE_string('model', 'VAWGAN', 'Model: ConvVAE, VAWGAN')
tf.app.flags.DEFINE_string('trainer_module', 'trainer.vawgan', 'Trainer module')
tf.app.flags.DEFINE_string('trainer', 'VAWGANTrainer', 'Trainer: VAETrainer, VAWGANTrainer')
def main(unused_args=None):
''' NOTE: The input is rescaled to [-1, 1] '''
module = import_module(args.model_module, package=None)
MODEL = getattr(module, args.model)
module = import_module(args.trainer_module, package=None)
TRAINER = getattr(module, args.trainer)
dirs = validate_log_dirs(args)
tf.gfile.MakeDirs(dirs['logdir'])
with open(args.architecture) as f:
arch = json.load(f)
with open(os.path.join(dirs['logdir'], args.architecture), 'w') as f:
json.dump(arch, f, indent=4)
normalizer = Tanhize(
xmax=np.fromfile('./etc/{}_xmax.npf'.format(args.corpus_name)),
xmin=np.fromfile('./etc/{}_xmin.npf'.format(args.corpus_name)),
)
x_s, y_s, f0_s = read(
file_pattern=arch['training']['src_dir'],
batch_size=arch['training']['batch_size'],
capacity=2048,
min_after_dequeue=1024,
normalizer=normalizer,
data_format='NHWC',
)
x_t, y_t, f0_t = read(
file_pattern=arch['training']['trg_dir'],
batch_size=arch['training']['batch_size'],
capacity=2048,
min_after_dequeue=1024,
normalizer=normalizer,
data_format='NHWC',
)
machine = MODEL(arch, is_training=True)
# y_s_new = tf.stack([y_s,f0_s],axis=1)
# y_t_new = tf.stack([y_t,f0_t],axis=1)
loss = machine.loss(x_s, y_s, f0_s, x_t, y_t, f0_t)
trainer = TRAINER(loss, arch, args, dirs)
trainer.train(nIter=arch['training']['max_iter'], machine=machine)
if __name__ == '__main__':
main()
``` |
{
"source": "johndpope/StyleSegments",
"score": 2
} |
#### File: segmentation/keras_segmentation/predict.py
```python
import glob
import random
import json
import os
import six
import cv2
import numpy as np
from tqdm import tqdm
from time import time
from .train import find_latest_checkpoint
from .data_utils.data_loader import get_image_array, get_segmentation_array,\
DATA_LOADER_SEED, class_colors, get_pairs_from_paths
from .models.config import IMAGE_ORDERING
random.seed(DATA_LOADER_SEED)
def model_from_checkpoint_path(checkpoints_path):
from .models.all_models import model_from_name
assert (os.path.isfile(checkpoints_path+"_config.json")
), "Checkpoint not found."
model_config = json.loads(
open(checkpoints_path+"_config.json", "r").read())
latest_weights = find_latest_checkpoint(checkpoints_path)
assert (latest_weights is not None), "Checkpoint not found."
model = model_from_name[model_config['model_class']](
model_config['n_classes'], input_height=model_config['input_height'],
input_width=model_config['input_width'])
print("loaded weights ", latest_weights)
model.load_weights(latest_weights)
return model
def get_colored_segmentation_image(seg_arr, n_classes, colors=class_colors):
output_height = seg_arr.shape[0]
output_width = seg_arr.shape[1]
seg_img = np.zeros((output_height, output_width, 3))
for c in range(n_classes):
seg_arr_c = seg_arr[:, :] == c
seg_img[:, :, 0] += ((seg_arr_c)*(colors[c][0])).astype('uint8')
seg_img[:, :, 1] += ((seg_arr_c)*(colors[c][1])).astype('uint8')
seg_img[:, :, 2] += ((seg_arr_c)*(colors[c][2])).astype('uint8')
return seg_img
def get_legends(class_names, colors=class_colors):
n_classes = len(class_names)
legend = np.zeros(((len(class_names) * 25) + 25, 125, 3),
dtype="uint8") + 255
class_names_colors = enumerate(zip(class_names[:n_classes],
colors[:n_classes]))
for (i, (class_name, color)) in class_names_colors:
color = [int(c) for c in color]
cv2.putText(legend, class_name, (5, (i * 25) + 17),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0), 1)
cv2.rectangle(legend, (100, (i * 25)), (125, (i * 25) + 25),
tuple(color), -1)
return legend
def overlay_seg_image(inp_img, seg_img):
orininal_h = inp_img.shape[0]
orininal_w = inp_img.shape[1]
seg_img = cv2.resize(seg_img, (orininal_w, orininal_h), interpolation=cv2.INTER_NEAREST)
fused_img = (inp_img/2 + seg_img/2).astype('uint8')
return fused_img
def concat_lenends(seg_img, legend_img):
new_h = np.maximum(seg_img.shape[0], legend_img.shape[0])
new_w = seg_img.shape[1] + legend_img.shape[1]
out_img = np.zeros((new_h, new_w, 3)).astype('uint8') + legend_img[0, 0, 0]
out_img[:legend_img.shape[0], : legend_img.shape[1]] = np.copy(legend_img)
out_img[:seg_img.shape[0], legend_img.shape[1]:] = np.copy(seg_img)
return out_img
def visualize_segmentation(seg_arr, inp_img=None, n_classes=None,
colors=class_colors, class_names=None,
overlay_img=False, show_legends=False,
prediction_width=None, prediction_height=None):
if n_classes is None:
n_classes = np.max(seg_arr)
seg_img = get_colored_segmentation_image(seg_arr, n_classes, colors=colors)
if inp_img is not None:
original_h = inp_img.shape[0]
original_w = inp_img.shape[1]
seg_img = cv2.resize(seg_img, (original_w, original_h), interpolation=cv2.INTER_NEAREST)
if (prediction_height is not None) and (prediction_width is not None):
seg_img = cv2.resize(seg_img, (prediction_width, prediction_height), interpolation=cv2.INTER_NEAREST)
if inp_img is not None:
inp_img = cv2.resize(inp_img,
(prediction_width, prediction_height))
if overlay_img:
assert inp_img is not None
seg_img = overlay_seg_image(inp_img, seg_img)
if show_legends:
assert class_names is not None
legend_img = get_legends(class_names, colors=colors)
seg_img = concat_lenends(seg_img, legend_img)
return seg_img
def predict(model=None, inp=None, out_fname=None,
checkpoints_path=None, overlay_img=False,
class_names=None, show_legends=False, colors=class_colors,
prediction_width=None, prediction_height=None):
if model is None and (checkpoints_path is not None):
model = model_from_checkpoint_path(checkpoints_path)
assert (inp is not None)
assert ((type(inp) is np.ndarray) or isinstance(inp, six.string_types)),\
"Input should be the CV image or the input file name"
if isinstance(inp, six.string_types):
inp = cv2.imread(inp)
assert len(inp.shape) == 3, "Image should be h,w,3 "
output_width = model.output_width
output_height = model.output_height
input_width = model.input_width
input_height = model.input_height
n_classes = model.n_classes
x = get_image_array(inp, input_width, input_height,
ordering=IMAGE_ORDERING)
pr = model.predict(np.array([x]))[0]
pr = pr.reshape((output_height, output_width, n_classes)).argmax(axis=2)
seg_img = visualize_segmentation(pr, inp, n_classes=n_classes,
colors=colors, overlay_img=overlay_img,
show_legends=show_legends,
class_names=class_names,
prediction_width=prediction_width,
prediction_height=prediction_height)
if out_fname is not None:
cv2.imwrite(out_fname, seg_img)
return pr
def predict_multiple(model=None, inps=None, inp_dir=None, out_dir=None,
checkpoints_path=None, overlay_img=False,
class_names=None, show_legends=False, colors=class_colors,
prediction_width=None, prediction_height=None):
if model is None and (checkpoints_path is not None):
model = model_from_checkpoint_path(checkpoints_path)
if inps is None and (inp_dir is not None):
inps = glob.glob(os.path.join(inp_dir, "*.jpg")) + glob.glob(
os.path.join(inp_dir, "*.png")) + \
glob.glob(os.path.join(inp_dir, "*.jpeg"))
inps = sorted(inps)
assert type(inps) is list
all_prs = []
for i, inp in enumerate(tqdm(inps)):
if out_dir is None:
out_fname = None
else:
if isinstance(inp, six.string_types):
out_fname = os.path.join(out_dir, os.path.basename(inp))
else:
out_fname = os.path.join(out_dir, str(i) + ".jpg")
pr = predict(model, inp, out_fname,
overlay_img=overlay_img, class_names=class_names,
show_legends=show_legends, colors=colors,
prediction_width=prediction_width,
prediction_height=prediction_height)
all_prs.append(pr)
return all_prs
def set_video(inp, video_name):
cap = cv2.VideoCapture(inp)
fps = int(cap.get(cv2.CAP_PROP_FPS))
video_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
video_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
size = (video_width, video_height)
fourcc = cv2.VideoWriter_fourcc(*"XVID")
video = cv2.VideoWriter(video_name, fourcc, fps, size)
return cap, video, fps
def predict_video(model=None, inp=None, output=None,
checkpoints_path=None, display=False, overlay_img=True,
class_names=None, show_legends=False, colors=class_colors,
prediction_width=None, prediction_height=None):
if model is None and (checkpoints_path is not None):
model = model_from_checkpoint_path(checkpoints_path)
n_classes = model.n_classes
cap, video, fps = set_video(inp, output)
while(cap.isOpened()):
prev_time = time()
ret, frame = cap.read()
if frame is not None:
pr = predict(model=model, inp=frame)
fused_img = visualize_segmentation(
pr, frame, n_classes=n_classes,
colors=colors,
overlay_img=overlay_img,
show_legends=show_legends,
class_names=class_names,
prediction_width=prediction_width,
prediction_height=prediction_height
)
else:
break
print("FPS: {}".format(1/(time() - prev_time)))
if output is not None:
video.write(fused_img)
if display:
cv2.imshow('Frame masked', fused_img)
if cv2.waitKey(fps) & 0xFF == ord('q'):
break
cap.release()
if output is not None:
video.release()
cv2.destroyAllWindows()
def evaluate(model=None, inp_images=None, annotations=None,
inp_images_dir=None, annotations_dir=None, checkpoints_path=None):
if model is None:
assert (checkpoints_path is not None),\
"Please provide the model or the checkpoints_path"
model = model_from_checkpoint_path(checkpoints_path)
if inp_images is None:
assert (inp_images_dir is not None),\
"Please provide inp_images or inp_images_dir"
assert (annotations_dir is not None),\
"Please provide inp_images or inp_images_dir"
paths = get_pairs_from_paths(inp_images_dir, annotations_dir)
paths = list(zip(*paths))
inp_images = list(paths[0])
annotations = list(paths[1])
assert type(inp_images) is list
assert type(annotations) is list
tp = np.zeros(model.n_classes)
fp = np.zeros(model.n_classes)
fn = np.zeros(model.n_classes)
n_pixels = np.zeros(model.n_classes)
for inp, ann in tqdm(zip(inp_images, annotations)):
pr = predict(model, inp)
gt = get_segmentation_array(ann, model.n_classes,
model.output_width, model.output_height,
no_reshape=True)
gt = gt.argmax(-1)
pr = pr.flatten()
gt = gt.flatten()
for cl_i in range(model.n_classes):
tp[cl_i] += np.sum((pr == cl_i) * (gt == cl_i))
fp[cl_i] += np.sum((pr == cl_i) * ((gt != cl_i)))
fn[cl_i] += np.sum((pr != cl_i) * ((gt == cl_i)))
n_pixels[cl_i] += np.sum(gt == cl_i)
cl_wise_score = tp / (tp + fp + fn + 0.000000000001)
n_pixels_norm = n_pixels / np.sum(n_pixels)
frequency_weighted_IU = np.sum(cl_wise_score*n_pixels_norm)
mean_IU = np.mean(cl_wise_score)
return {
"frequency_weighted_IU": frequency_weighted_IU,
"mean_IU": mean_IU,
"class_wise_IU": cl_wise_score
}
```
#### File: segmentation/keras_segmentation/train.py
```python
import json
from .data_utils.data_loader import image_segmentation_generator, \
verify_segmentation_dataset
import glob
import six
from keras.callbacks import Callback
def find_latest_checkpoint(checkpoints_path, fail_safe=True):
def get_epoch_number_from_path(path):
return path.replace(checkpoints_path, "").strip(".")
# Get all matching files
all_checkpoint_files = glob.glob(checkpoints_path + ".*")
all_checkpoint_files = [ ff.replace(".index" , "" ) for ff in all_checkpoint_files ] # to make it work for newer versions of keras
# Filter out entries where the epoc_number part is pure number
all_checkpoint_files = list(filter(lambda f: get_epoch_number_from_path(f)
.isdigit(), all_checkpoint_files))
if not len(all_checkpoint_files):
# The glob list is empty, don't have a checkpoints_path
if not fail_safe:
raise ValueError("Checkpoint path {0} invalid"
.format(checkpoints_path))
else:
return None
# Find the checkpoint file with the maximum epoch
latest_epoch_checkpoint = max(all_checkpoint_files,
key=lambda f:
int(get_epoch_number_from_path(f)))
return latest_epoch_checkpoint
def masked_categorical_crossentropy(gt, pr):
from keras.losses import categorical_crossentropy
mask = 1 - gt[:, :, 0]
return categorical_crossentropy(gt, pr) * mask
class CheckpointsCallback(Callback):
def __init__(self, checkpoints_path):
self.checkpoints_path = checkpoints_path
def on_epoch_end(self, epoch, logs=None):
if self.checkpoints_path is not None:
self.model.save_weights(self.checkpoints_path + "." + str(epoch))
print("saved ", self.checkpoints_path + "." + str(epoch))
def train(model,
train_images,
train_annotations,
input_height=None,
input_width=None,
n_classes=None,
verify_dataset=True,
checkpoints_path=None,
epochs=5,
batch_size=2,
validate=False,
val_images=None,
val_annotations=None,
val_batch_size=2,
auto_resume_checkpoint=False,
load_weights=None,
steps_per_epoch=512,
val_steps_per_epoch=512,
gen_use_multiprocessing=False,
ignore_zero_class=False,
optimizer_name='adam',
do_augment=False,
augmentation_name="aug_all"):
from .models.all_models import model_from_name
# check if user gives model name instead of the model object
if isinstance(model, six.string_types):
# create the model from the name
assert (n_classes is not None), "Please provide the n_classes"
if (input_height is not None) and (input_width is not None):
model = model_from_name[model](
n_classes, input_height=input_height, input_width=input_width)
else:
model = model_from_name[model](n_classes)
n_classes = model.n_classes
input_height = model.input_height
input_width = model.input_width
output_height = model.output_height
output_width = model.output_width
if validate:
assert val_images is not None
assert val_annotations is not None
if optimizer_name is not None:
if ignore_zero_class:
loss_k = masked_categorical_crossentropy
else:
loss_k = 'categorical_crossentropy'
model.compile(loss=loss_k,
optimizer=optimizer_name,
metrics=['accuracy'])
if checkpoints_path is not None:
with open(checkpoints_path+"_config.json", "w") as f:
json.dump({
"model_class": model.model_name,
"n_classes": n_classes,
"input_height": input_height,
"input_width": input_width,
"output_height": output_height,
"output_width": output_width
}, f)
if load_weights is not None and len(load_weights) > 0:
print("Loading weights from ", load_weights)
model.load_weights(load_weights)
if auto_resume_checkpoint and (checkpoints_path is not None):
latest_checkpoint = find_latest_checkpoint(checkpoints_path)
if latest_checkpoint is not None:
print("Loading the weights from latest checkpoint ",
latest_checkpoint)
model.load_weights(latest_checkpoint)
if verify_dataset:
print("Verifying training dataset")
verified = verify_segmentation_dataset(train_images,
train_annotations,
n_classes)
assert verified
if validate:
print("Verifying validation dataset")
verified = verify_segmentation_dataset(val_images,
val_annotations,
n_classes)
assert verified
train_gen = image_segmentation_generator(
train_images, train_annotations, batch_size, n_classes,
input_height, input_width, output_height, output_width,
do_augment=do_augment, augmentation_name=augmentation_name)
if validate:
val_gen = image_segmentation_generator(
val_images, val_annotations, val_batch_size,
n_classes, input_height, input_width, output_height, output_width)
callbacks = [
CheckpointsCallback(checkpoints_path)
]
if not validate:
model.fit_generator(train_gen, steps_per_epoch,
epochs=epochs, callbacks=callbacks)
else:
model.fit_generator(train_gen,
steps_per_epoch,
validation_data=val_gen,
validation_steps=val_steps_per_epoch,
epochs=epochs, callbacks=callbacks,
use_multiprocessing=gen_use_multiprocessing)
``` |
{
"source": "johndpope/text2image_manipulation",
"score": 3
} |
#### File: mapper/dataset/latents_dataset.py
```python
from torch.utils.data import Dataset
class LatentsDataset(Dataset):
def __init__(self, latents, opts):
self.latents = latents
self.opts = opts
def __len__(self):
return self.latents.shape[0]
def __getitem__(self, index):
w_ori = self.latents[index]
t = "blonde hair"
return [w_ori, t]
```
#### File: text2image_manipulation/mapper/latent_mappers.py
```python
import torch
from torch import nn
from torch.nn import Module
from models.stylegan2.model import EqualLinear, PixelNorm
class Mapper(Module):
def __init__(self, opts):
super(Mapper, self).__init__()
self.opts = opts
layers = [PixelNorm()]
for i in range(4):
layers.append(
EqualLinear(
512, 512, lr_mul=0.01, activation='fused_lrelu' # vector size
)
)
self.mapping = nn.Sequential(*layers)
def forward(self, x):
x = self.mapping(x)
return x
class Mapper_cat(Module):
def __init__(self, opts):
super(Mapper_cat, self).__init__()
self.opts = opts
layers = [PixelNorm()]
for i in range(7):
layers.append(
EqualLinear(
1024, 1024, lr_mul=0.01, activation='fused_lrelu' # vector size
)
)
layers.append(
EqualLinear(
1024, 512, lr_mul=0.01, activation='fused_lrelu' # vector size
)
)
self.mapping = nn.Sequential(*layers)
def forward(self, x):
x = self.mapping(x)
return x
class Mapper_sum(Module):
def __init__(self, opts):
super(Mapper_sum, self).__init__()
self.opts = opts
layers = [PixelNorm()]
for i in range(8):
layers.append(
EqualLinear(
512, 512, lr_mul=0.01, activation='fused_lrelu' # vector size
)
)
self.mapping = nn.Sequential(*layers)
def forward(self, x):
x = self.mapping(x)
return x
class Mapper_multi(Module):
def __init__(self, opts):
super(Mapper_multi, self).__init__()
self.opts = opts
layers = [PixelNorm()]
layers.append(
EqualLinear(
512, 1024, lr_mul=0.01, activation='fused_lrelu' # vector size
)
)
for i in range(8):
layers.append(
EqualLinear(
1024, 1024, lr_mul=0.01, activation='fused_lrelu' # vector size
)
)
layers.append(
EqualLinear(
1024, 512, lr_mul=0.01, activation='fused_lrelu' # vector size
)
)
self.mapping = nn.Sequential(*layers)
def forward(self, x):
x = self.mapping(x)
return x
class SingleMapper(Module):
def __init__(self, opts):
super(SingleMapper, self).__init__()
self.opts = opts
self.mapping = Mapper(opts)
def forward(self, x):
out = self.mapping(x)
return out
class LevelsMapper(Module):
def __init__(self, opts):
super(LevelsMapper, self).__init__()
self.mapper_mode = opts.mapper_mode
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.opts = opts
if not opts.no_coarse_mapper:
if opts.mapper_mode == "Mapper_cat":
self.course_mapping = Mapper_cat(opts)
elif opts.mapper_mode == "Mapper_sum":
self.course_mapping = Mapper_sum(opts)
elif opts.mapper_mode == "Mapper":
self.course_mapping = Mapper(opts)
elif opts.mapper_mode == "Mapper_multi":
self.course_mapping = Mapper_multi(opts)
if not opts.no_medium_mapper:
if opts.mapper_mode == "Mapper_cat":
self.medium_mapping = Mapper_cat(opts)
elif opts.mapper_mode == "Mapper_sum":
self.medium_mapping = Mapper_sum(opts)
elif opts.mapper_mode == "Mapper":
self.medium_mapping = Mapper(opts)
elif opts.mapper_mode == "Mapper_multi":
self.medium_mapping = Mapper_multi(opts)
if not opts.no_fine_mapper:
if opts.mapper_mode == "Mapper_cat":
self.fine_mapping = Mapper_cat(opts)
elif opts.mapper_mode == "Mapper_sum":
self.fine_mapping = Mapper_sum(opts)
elif opts.mapper_mode == "Mapper":
self.fine_mapping = Mapper(opts)
elif opts.mapper_mode == "Mapper_multi":
self.fine_mapping = Mapper_multi(opts)
def forward(self, x):
s1,s2,s3 = x.size()
x_coarse = x[:, :4, :]
x_medium = x[:, 4:8, :]
x_fine = x[:, 8:, :]
if not self.opts.no_coarse_mapper:
x_coarse = self.course_mapping(x_coarse)
else:
x_coarse = torch.zeros([s1,4,512]).to(self.device)
if not self.opts.no_medium_mapper:
x_medium = self.medium_mapping(x_medium)
else:
x_medium = torch.zeros([s1,4,512]).to(self.device)
if not self.opts.no_fine_mapper:
x_fine = self.fine_mapping(x_fine)
else:
x_fine = torch.zeros([s1,10,512]).to(self.device)
out = torch.cat([x_coarse, x_medium, x_fine], dim=1)
return out
``` |
{
"source": "johndpotts/movie-trailer-website-generator",
"score": 3
} |
#### File: johndpotts/movie-trailer-website-generator/media.py
```python
import webbrowser
import urllib
import json
import os
# user needs to obtain APIs for The Movie Database and Youtube
# and save as environmental variables
YOUTUBE_API =(os.environ['YOUTUBE_API'])
TMDB_API = (os.environ['TMDB_API'])
class Movie():
def __init__(self, movie_title):
self.title = movie_title
# call up tmdb api to load poster and overview for each movie
tmdb_connection = urllib.urlopen("https://api.themoviedb.org/3/search/movie?api_key="+TMDB_API+"&query="+movie_title) # NOQA
tmdb_json_data = tmdb_connection.read()
tmdb_results = (json.loads(tmdb_json_data))
self.summary = tmdb_results["results"][0]["overview"]
self.poster_image_url = "https://image.tmdb.org/t/p/w640"+tmdb_results["results"][0]["poster_path"] # NOQA
# call up youtube API to search for a trailer for each movie
youtube_connection = urllib.urlopen("https://www.googleapis.com/youtube/v3/search?type=video&part=snippet&q="+movie_title+"official%20trailer&maxResults=5&key="+YOUTUBE_API) # NOQA
youtube_json_data = youtube_connection.read()
youtube_results = (json.loads(youtube_json_data))
self.trailer_youtube_url = "https://www.youtube.com/watch?v="+youtube_results["items"][0]["id"]["videoId"] # NOQA
def show_trailer(self):
webbrowser.open(self.trailer_youtube_url)
``` |
{
"source": "john-drago/fluoro",
"score": 2
} |
#### File: vox_fluoro/vox_fluoro_img_stnd_hyperas/vox_fluoro_img_stnd_hyperas.py
```python
import numpy as np
import h5py
import tensorflow as tf
import keras
import os
import json
import csv
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/hyperparameter/vox_fluoro'), 'vox_fluoro_img_stnd_hyperas'))
os.makedirs(save_dir, exist_ok=True)
def data_comp(first_indx=None, last_indx=None, num_of_samples=5):
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
def split_train_test(shape, num_of_samples=5, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
test_indxs, train_indxs = split_train_test(len(label_init), num_of_samples=5)
test_indxs = sorted(list(test_indxs))
train_indxs = sorted(list(train_indxs))
vox_mat_train = vox_init[:]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train = image_init[:]
image_mat_train = image_mat_train[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_train = label_init[:]
label_mat_train = label_mat_train[train_indxs]
label_file.close()
return vox_mat_train, image_mat_train, cali_mat_train, label_mat_train
def fluoro_model(vox_mat_train, image_mat_train, cali_mat_train, label_mat_train):
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (199, 164, 566, 1)
cali_input_shape = (6,)
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
var_dset = stats_file['var']
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
v_conv_1_filters = {{choice([20, 30, 40])}}
v_conv_1_kernel = {{choice([5, 7, 11, 13, 21])}}
v_conv_1_strides = {{choice([1, 2])}}
v_conv_1_pad = 'same'
v_spatial_drop_rate_1 = {{uniform(0, 1)}}
v_pool_1_size = {{choice([2, 3])}}
v_pool_1_pad = 'same'
v_conv_2_filters = {{choice([40, 50, 60, 80])}}
v_conv_2_kernel = {{choice([5, 7, 11])}}
v_conv_2_strides = {{choice([1, 2])}}
v_conv_2_pad = 'same'
v_spatial_drop_rate_2 = {{uniform(0, 1)}}
v_pool_2_size = {{choice([2, 3])}}
v_pool_2_pad = 'same'
v_conv_3_filters = {{choice([40, 50, 60, 80])}}
v_conv_3_kernel = {{choice([3, 5, 7])}}
v_conv_3_strides = {{choice([1, 2])}}
v_conv_3_pad = 'same'
v_spatial_drop_rate_3 = {{uniform(0, 1)}}
v_pool_3_size = {{choice([2, 3])}}
v_pool_3_pad = 'same'
dense_1_v_units = {{choice([750, 1000, 1500])}}
dense_2_v_units = {{choice([500, 750, 1000])}}
dense_3_v_units = {{choice([250, 500, 750])}}
conv_1_filters = {{choice([20, 30, 40, 50, 60])}}
conv_1_kernel = {{choice([3, 5, 7])}}
conv_1_strides = {{choice([1, 2])}}
conv_1_pad = 'same'
spatial_drop_rate_1 = {{uniform(0, 1)}}
pool_1_size = {{choice([2, 3])}}
pool_1_pad = 'same'
conv_2_filters = {{choice([40, 50, 60, 80])}}
conv_2_kernel = {{choice([3, 5, 7])}}
conv_2_strides = {{choice([1, 2])}}
conv_2_pad = 'same'
spatial_drop_rate_2 = {{uniform(0, 1)}}
pool_2_size = {{choice([2, 3])}}
pool_2_pad = 'same'
conv_3_filters = {{choice([40, 50, 60, 80])}}
conv_3_kernel = {{choice([3, 5, 7])}}
conv_3_strides = {{choice([1, 2])}}
conv_3_pad = 'same'
pool_3_size = {{choice([2, 3])}}
pool_3_pad = 'same'
dense_1_f_units = {{choice([40, 60, 80])}}
dense_2_f_units = {{choice([40, 60, 80])}}
dense_3_f_units = {{choice([40, 60, 80])}}
dense_1_cali_units = {{choice([10, 20, 30])}}
dense_2_cali_units = {{choice([10, 20, 30])}}
dense_1_co_units = {{choice([60, 80, 100, 200])}}
drop_1_comb_rate = {{uniform(0, 1)}}
dense_2_co_units = {{choice([20, 40, 60])}}
dense_3_co_units = {{choice([20, 40, 60])}}
dense_4_co_units = {{choice([20, 40, 60])}}
main_output_units = 6
main_output_act = 'linear'
conv_regularizer = keras.regularizers.l1_l2(l1={{uniform(0, 1)}}, l2={{uniform(0, 1)}})
dense_regularizer_1 = keras.regularizers.l1_l2(l1={{uniform(0, 1)}}, l2={{uniform(0, 1)}})
dense_regularizer_2 = keras.regularizers.l1_l2(l1={{uniform(0, 1)}}, l2={{uniform(0, 1)}})
activation_fn = {{choice(['elu', 'relu'])}}
kern_init = {{choice(['glorot_uniform', 'glorot_normal'])}}
model_opt = {{choice(['adam', 'nadam', 'adagrad', 'rmsprop'])}}
model_epochs = {{choice([30, 40, 50])}}
model_batchsize = 3
model_loss = cust_mean_squared_error_var
model_metric = cust_mean_squared_error_var
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
v_conv_1 = tf.keras.layers.Conv3D(filters=v_conv_1_filters, kernel_size=v_conv_1_kernel, strides=v_conv_1_strides, padding=v_conv_1_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(input_vox)
v_spat_1 = tf.keras.layers.SpatialDropout3D(rate=v_spatial_drop_rate_1)(v_conv_1)
v_pool_1 = tf.keras.layers.MaxPooling3D(pool_size=v_pool_1_size, padding=v_pool_1_pad, data_format=channel_order)(v_spat_1)
v_bn_2 = tf.keras.layers.BatchNormalization()(v_pool_1)
v_conv_2 = tf.keras.layers.Conv3D(filters=v_conv_2_filters, kernel_size=v_conv_2_kernel, strides=v_conv_2_strides, padding=v_conv_2_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(v_bn_2)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=v_spatial_drop_rate_2)(v_conv_2)
v_pool_2 = tf.keras.layers.MaxPooling3D(pool_size=v_pool_2_size, padding=v_pool_2_pad, data_format=channel_order)(v_spat_2)
v_conv_3 = tf.keras.layers.Conv3D(filters=v_conv_3_filters, kernel_size=v_conv_3_kernel, strides=v_conv_3_strides, padding=v_conv_3_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(v_pool_2)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=v_spatial_drop_rate_3)(v_conv_3)
v_pool_3 = tf.keras.layers.MaxPooling3D(pool_size=v_pool_3_size, padding=v_pool_3_pad, data_format=channel_order)(v_spat_3)
v_flatten_1 = tf.keras.layers.Flatten()(v_pool_3)
dense_1_v = tf.keras.layers.Dense(units=dense_1_v_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(v_flatten_1)
dense_2_v = tf.keras.layers.Dense(units=dense_2_v_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(dense_1_v)
dense_3_v = tf.keras.layers.Dense(units=dense_3_v_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(dense_2_v)
per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=conv_1_filters, kernel_size=conv_1_kernel, strides=conv_1_strides, padding=conv_1_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(per_image_stand_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(conv_1_1)
pool_1_1 = tf.keras.layers.MaxPooling2D(pool_size=pool_1_size, padding=pool_1_pad, data_format=channel_order)(spat_1_1)
conv_2_1 = tf.keras.layers.Conv2D(filters=conv_2_filters, kernel_size=conv_2_kernel, strides=conv_2_strides, padding=conv_2_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(pool_1_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=spatial_drop_rate_2)(conv_2_1)
pool_2_1 = tf.keras.layers.MaxPooling2D(pool_size=pool_2_size, padding=pool_2_pad, data_format=channel_order)(spat_2_1)
conv_3_1 = tf.keras.layers.Conv2D(filters=conv_3_filters, kernel_size=conv_3_kernel, strides=conv_3_strides, padding=conv_3_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(pool_2_1)
pool_3_1 = tf.keras.layers.MaxPooling2D(pool_size=pool_3_size, padding=pool_3_pad, data_format=channel_order)(conv_3_1)
flatten_1_1 = tf.keras.layers.Flatten()(pool_3_1)
dense_1_f_1 = tf.keras.layers.Dense(units=dense_1_f_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(flatten_1_1)
dense_2_f_1 = tf.keras.layers.Dense(units=dense_2_f_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(dense_1_f_1)
dense_3_f_1 = tf.keras.layers.Dense(units=dense_3_f_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(dense_2_f_1)
per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
conv_1_2 = tf.keras.layers.Conv2D(filters=conv_1_filters, kernel_size=conv_1_kernel, strides=conv_1_strides, padding=conv_1_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(per_image_stand_2)
spat_1_2 = tf.keras.layers.SpatialDropout2D(rate=spatial_drop_rate_1)(conv_1_2)
pool_1_2 = tf.keras.layers.MaxPooling2D(pool_size=pool_1_size, padding=pool_1_pad, data_format=channel_order)(spat_1_2)
conv_2_2 = tf.keras.layers.Conv2D(filters=conv_2_filters, kernel_size=conv_2_kernel, strides=conv_2_strides, padding=conv_2_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(pool_1_2)
spat_2_2 = tf.keras.layers.SpatialDropout2D(rate=spatial_drop_rate_2)(conv_2_2)
pool_2_2 = tf.keras.layers.MaxPooling2D(pool_size=pool_2_size, padding=pool_2_pad, data_format=channel_order)(spat_2_2)
conv_3_2 = tf.keras.layers.Conv2D(filters=conv_3_filters, kernel_size=conv_3_kernel, strides=conv_3_strides, padding=conv_3_pad, data_format=channel_order, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=conv_regularizer)(pool_2_2)
pool_3_2 = tf.keras.layers.MaxPooling2D(pool_size=pool_3_size, padding=pool_3_pad, data_format=channel_order)(conv_3_2)
flatten_1_2 = tf.keras.layers.Flatten()(pool_3_2)
dense_1_f_2 = tf.keras.layers.Dense(units=dense_1_f_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(flatten_1_2)
dense_2_f_2 = tf.keras.layers.Dense(units=dense_2_f_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(dense_1_f_2)
dense_3_f_2 = tf.keras.layers.Dense(units=dense_3_f_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(dense_2_f_2)
dense_1_cali = tf.keras.layers.Dense(units=dense_1_cali_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(input_cali)
dense_2_cali = tf.keras.layers.Dense(units=dense_2_cali_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_1)(dense_1_cali)
dense_0_comb = tf.keras.layers.concatenate([dense_3_v, dense_3_f_1, dense_3_f_2, dense_2_cali])
dense_1_comb = tf.keras.layers.Dense(units=dense_1_co_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_2)(dense_0_comb)
dense_drop_1 = tf.keras.layers.Dropout(rate=drop_1_comb_rate)(dense_1_comb)
dense_2_comb = tf.keras.layers.Dense(units=dense_2_co_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_2)(dense_drop_1)
dense_3_comb = tf.keras.layers.Dense(units=dense_3_co_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_2)(dense_2_comb)
dense_4_comb = tf.keras.layers.Dense(units=dense_4_co_units, activation=activation_fn, kernel_initializer=kern_init, activity_regularizer=dense_regularizer_2)(dense_3_comb)
main_output = tf.keras.layers.Dense(units=main_output_units, activation=main_output_act, kernel_initializer=kern_init, name='main_output')(dense_4_comb)
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=model_opt, loss=model_loss, metrics=[model_metric])
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': np.expand_dims(image_mat_train[:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(image_mat_train[:, 1, :, :], axis=-1), 'input_cali': cali_mat_train}, y=label_mat_train, validation_split=0.2, epochs=model_epochs, batch_size=model_batchsize, shuffle=True, verbose=False)
return {'loss': np.amin(result.history['loss']), 'status': STATUS_OK, 'model': model}
best_run, best_model = optim.minimize(model=fluoro_model, data=data_comp, algo=tpe.suggest, max_evals=3, trials=Trials())
json1 = json.dumps(best_run)
f = open(os.path.abspath(os.path.join(save_dir, 'best_run_hyperas.json')), 'w')
f.write(json1)
f.close()
w = csv.writer(open(os.path.abspath(os.path.join(save_dir, 'best_run_hyperas.csv')), 'w'))
for key, val in best_run.items():
w.writerow([key, val])
best_model.save(os.path.abspath(os.path.join(save_dir, 'vox_fluoro_img_stnd_hyperas' + '_' + 'best_model_hyperas.h5')))
```
#### File: code/scratch/graphical_rotation_of_frame.py
```python
import numpy as np
from numpy import *
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
FancyArrowPatch.draw(self, renderer)
####################################################
# This part is just for reference if
# you are interested where the data is
# coming from
# The plot is at the bottom
#####################################################
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(0, 0, 0, c='black', s=50)
x = Arrow3D([0, 1], [0, 0], [0, 0], color=(0.5, 0, 0))
ax.add_artist(x)
y = Arrow3D([0, 0], [0, 1], [0, 0], color=(0, 0.5, 0))
ax.add_artist(y)
z = Arrow3D([0, 0], [0, 0], [0, 1], color=(0, 0, 0.5))
ax.add_artist(z)
plt.show()
```
#### File: code/scratch/unsup_segmentation.py
```python
import numpy as np
import h5py
import tensorflow as tf
import keras
import os
import sys
from sklearn.model_selection import train_test_split
from sklearn.cluster import KMeans
expr_name = sys.argv[0][:-3]
expr_no = '2'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/scratch/unsup_seg'), expr_name))
os.makedirs(save_dir, exist_ok=True)
def data_comp(number_of_samples=None):
# vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
# vox_init = vox_file['vox_dset']
# vox_mat = vox_init[:number_of_samples]
# vox_file.close()
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
image_mat = image_init[:number_of_samples]
image_file.close()
# label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
# label_init = label_file['labels_dset']
# label_mat = label_init[:number_of_samples]
# label_file.close()
# cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
# cali_init = cali_file['cali_len3_rot']
# cali_mat = cali_init[:number_of_samples]
# cali_file.close()
image_train_cum, image_test = train_test_split(image_mat, shuffle=True, test_size=0.2, random_state=42)
# print('Image mat size:',image_mat.shape)
# print('Label mat size:',label_mat.shape)
# print('Cali mat size:',cali_mat.shape)
# print('Image cum size:',image_train_cum.shape)
# print('Label cum size:',label_train_cum.shape)
# print('Cali cum size:',cali_train_cum.shape)
# print('Image test size:',image_test.shape)
# print('Label test size:',label_test.shape)
# print('Cali test size:',cali_test.shape)
image_train_sub, image_val = train_test_split(image_train_cum, shuffle=True, test_size=0.2, random_state=42)
print('Image sub size:', image_train_sub.shape)
# print('Label sub size:', label_train_sub.shape)
# print('Cali sub size:', cali_train_sub.shape)
print('Image val size:', image_val.shape)
# print('Label val size:', label_val.shape)
# print('Cali val size:', cali_val.shape)
# print(vox_mat.shape, image_mat.shape, cali_mat.shape)
return image_train_sub, image_val
# return image_train_cum, cali_train_cum, label_train_cum
# -----------------------------------------------------------------
class KMeansLayer(keras.layers.Layer):
def __init__(self, clusters=8, n_init=5, trainable=False, **kwargs):
super(KMeansLayer, self).__init__(**kwargs)
self.clusters = clusters
self.n_init = n_init
def build(self, input_shape):
# self.input_shape = input_shape
# print(input_shape[0])
self.output_s = (input_shape[0],input_shape[1], input_shape[2],1)
self.depth = input_shape[3]
self.built=True
def call(self, inputs):
output=tf.Variable(initial_value=tf.keras.backend.random_uniform(shape=(6,128,128,1)),dtype=tf.float32,trainable=False,validate_shape=False)
# output=tf.Variable(initial_value=tf.keras.backend.random_uniform(shape=tf.convert_to_tensor(inputs.get_shape()[0],inputs.get_shape()[1],inputs.get_shape()[2],1)),dtype=tf.float32)
def KMeansFunc(input_tens,clusters=self.clusters,n_init=self.n_init):
base_mat = np.zeros((input_tens.shape[0],input_tens.shape[1],input_tens.shape[2]))
for frame in range(input_tens.shape[0]):
init_mat = np.zeros((input_tens.shape[1]*input_tens.shape[2]))
print(init_mat.shape)
reshape_mat = np.reshape(input_tens[frame],(input_tens.shape[1]*input_tens.shape[2],input_tens.shape[3]))
print(reshape_mat.shape)
kmeans_init = KMeans(n_clusters=clusters, n_init=n_init)
class_pred = kmeans_init.fit_predict(reshape_mat)
for clust in range(8):
init_mat[class_pred==clust] = np.mean(reshape_mat[class_pred==clust],axis=1)
init_mat[class_pred==clust] = np.mean(reshape_mat[class_pred==clust],None)
print(base_mat.shape)
base_mat[frame]=np.reshape(init_mat,(input_tens.shape[1],input_tens.shape[2]))
return np.expand_dims(base_mat,axis=-1).astype('float32')
output = tf.py_func(KMeansFunc,[inputs],tf.float32) + self.kernel-self.kernel
# output=tf.placeholder(shape=(inputs.get_shape()[0],inputs.get_shape()[1],inputs.get_shape()[2],1),dtype=tf.float32)
return output
def compute_output_shape(self, input_shape):
return self.output_s
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
# vox_input_shape = (198, 162, 564, 1)
# cali_input_shape = (6,)
def root_mean_squared_error(y_true, y_pred):
return keras.backend.sqrt(keras.backend.mean(keras.backend.square(y_pred - y_true)))
params = {
# 2D CONV
'conv_1_1_filters': 64,
'conv_1_1_kernel': 5,
'conv_1_1_strides': 1,
'conv_1_1_pad': 'same',
'spatial_drop_rate_1_1': 0.5,
'conv_1_2_filters': 64,
'conv_1_2_kernel': 3,
'conv_1_2_strides': 1,
'conv_1_2_pad': 'same',
'spatial_drop_rate_1_2': 0.5,
'pool_1_size': 2,
'pool_1_pad': 'same',
'conv_2_1_filters': 128,
'conv_2_1_kernel': 3,
'conv_2_1_strides': 1,
'conv_2_1_pad': 'same',
'spatial_drop_rate_2_1': 0.5,
'conv_2_2_filters': 128,
'conv_2_2_kernel': 3,
'conv_2_2_strides': 1,
'conv_2_2_pad': 'same',
'spatial_drop_rate_2_2': 0.5,
'pool_2_size': 2,
'pool_2_pad': 'same',
'conv_3_1_filters': 256,
'conv_3_1_kernel': 3,
'conv_3_1_strides': 1,
'conv_3_1_pad': 'same',
'spatial_drop_rate_3_1': 0.5,
'conv_3_2_filters': 256,
'conv_3_2_kernel': 3,
'conv_3_2_strides': 1,
'conv_3_2_pad': 'same',
'spatial_drop_rate_3_2': 0.5,
'pool_3_size': 2,
'pool_3_pad': 'same',
'conv_4_1_filters': 512,
'conv_4_1_kernel': 3,
'conv_4_1_strides': 1,
'conv_4_1_pad': 'same',
'spatial_drop_rate_4_1': 0.5,
'conv_4_2_filters': 512,
'conv_4_2_kernel': 3,
'conv_4_2_strides': 1,
'conv_4_2_pad': 'same',
'spatial_drop_rate_4_2': 0.5,
'pool_4_size': 2,
'pool_4_pad': 'same',
'conv_5_1_filters': 1024,
'conv_5_1_kernel': 3,
'conv_5_1_strides': 1,
'conv_5_1_pad': 'same',
'conv_5_2_filters': 1024,
'conv_5_2_kernel': 3,
'conv_5_2_strides': 1,
'conv_5_2_pad': 'same',
'up_conv_1_filters': 512,
'up_conv_1_kernel': 2,
'up_conv_1_strides': 1,
'up_conv_1_pad': 'same',
'up_1_size': 2,
'up_1_int': 'bilinear',
'conv_6_1_filters': 512,
'conv_6_1_kernel': 3,
'conv_6_1_strides': 1,
'conv_6_1_pad': 'same',
'conv_6_2_filters': 512,
'conv_6_2_kernel': 3,
'conv_6_2_strides': 1,
'conv_6_2_pad': 'same',
'up_conv_2_filters': 256,
'up_conv_2_kernel': 2,
'up_conv_2_strides': 1,
'up_conv_2_pad': 'same',
'up_2_size': 2,
'up_2_int': 'bilinear',
'conv_7_1_filters': 256,
'conv_7_1_kernel': 3,
'conv_7_1_strides': 1,
'conv_7_1_pad': 'same',
'conv_7_2_filters': 256,
'conv_7_2_kernel': 3,
'conv_7_2_strides': 1,
'conv_7_2_pad': 'same',
'up_conv_3_filters': 128,
'up_conv_3_kernel': 2,
'up_conv_3_strides': 1,
'up_conv_3_pad': 'same',
'up_3_size': 2,
'up_3_int': 'bilinear',
'conv_8_1_filters': 128,
'conv_8_1_kernel': 3,
'conv_8_1_strides': 1,
'conv_8_1_pad': 'same',
'conv_8_2_filters': 128,
'conv_8_2_kernel': 3,
'conv_8_2_strides': 1,
'conv_8_2_pad': 'same',
'up_conv_4_filters': 64,
'up_conv_4_kernel': 2,
'up_conv_4_strides': 1,
'up_conv_4_pad': 'same',
'up_4_size': 2,
'up_4_int': 'bilinear',
'conv_9_1_filters': 64,
'conv_9_1_kernel': 3,
'conv_9_1_strides': 1,
'conv_9_1_pad': 'same',
'conv_9_2_filters': 64,
'conv_9_2_kernel': 64,
'conv_9_2_strides': 1,
'conv_9_2_pad': 'same',
'conv_k_1_filters': 20,
'conv_k_1_kernel': 3,
'conv_k_1_strides': 1,
'conv_k_1_pad': 'same',
'conv_k_2_filters': 3,
'conv_k_2_kernel': 1,
'conv_k_2_strides': 1,
'conv_k_2_pad': 'same',
# General Housekeeping
'regularizer_l1': 0.1,
'regularizer_l2': 0.25,
'activation_fn': 'elu',
'kern_init': 'glorot_uniform',
'model_opt': keras.optimizers.RMSprop(),
'learning_rate': 0.001,
'model_epochs': 50,
'model_batchsize': 6,
'model_loss': 'mse',
'model_metric': 'mse'
}
# -----------------------------------------------------------------
# vox_ph_shape = list(vox_input_shape)
# img_ph_shape = list(img_input_shape)
# cali_ph_shape = list(cali_input_shape)
# vox_ph_shape.insert(0, 2)
# img_ph_shape.insert(0, 2)
# cali_ph_shape.insert(0, 2)
# vox_ph = tf.placeholder('float32', shape=vox_ph_shape)
# fluoro_1_ph = tf.placeholder('float16', shape=img_ph_shape)
# fluoro_2_ph = tf.placeholder('float16', shape=img_ph_shape)
# cali_ph = tf.placeholder('float16', shape=cali_ph_shape)
# input_vox = keras.Input(shape=vox_input_shape, name='input_vox', tensor=vox_ph)
# input_fluoro_1 = keras.Input(shape=img_input_shape, name='input_fluoro_1', tensor=fluoro_1_ph)
# input_fluoro_2 = keras.Input(shape=img_input_shape, name='input_fluoro_2', tensor=fluoro_2_ph)
# input_cali = keras.Input(shape=cali_input_shape, name='input_cali', tensor=cali_ph)
# -----------------------------------------------------------------
# Input Layers
input_fluoro_1 = keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
# -----------------------------------------------------------------
# d
bn_1 = keras.layers.BatchNormalization(input_shape=img_input_shape)(input_fluoro_1)
conv_1_1 = keras.layers.Conv2D(filters=params['conv_1_1_filters'], kernel_size=params['conv_1_1_kernel'], strides=params['conv_1_1_strides'], padding=params['conv_1_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(bn_1)
spat_1_1 = keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1_1'])(conv_1_1)
conv_1_2 = keras.layers.Conv2D(filters=params['conv_1_2_filters'], kernel_size=params['conv_1_2_kernel'], strides=params['conv_1_2_strides'], padding=params['conv_1_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(spat_1_1)
spat_1_2 = keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1_2'])(conv_1_2)
pool_1 = keras.layers.MaxPooling2D(pool_size=params['pool_1_size'], padding=params['pool_1_pad'], data_format=channel_order)(spat_1_2)
conv_2_1 = keras.layers.SeparableConv2D(filters=params['conv_2_1_filters'], kernel_size=params['conv_2_1_kernel'], strides=params['conv_2_1_strides'], padding=params['conv_2_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(pool_1)
spat_2_1 = keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2_1'])(conv_2_1)
conv_2_2 = keras.layers.SeparableConv2D(filters=params['conv_2_2_filters'], kernel_size=params['conv_2_2_kernel'], strides=params['conv_2_2_strides'], padding=params['conv_2_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(spat_2_1)
spat_2_2 = keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2_2'])(conv_2_2)
pool_2 = keras.layers.MaxPooling2D(pool_size=params['pool_2_size'], padding=params['pool_2_pad'], data_format=channel_order)(spat_2_2)
conv_3_1 = keras.layers.SeparableConv2D(filters=params['conv_3_1_filters'], kernel_size=params['conv_3_1_kernel'], strides=params['conv_3_1_strides'], padding=params['conv_3_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(pool_2)
spat_3_1 = keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3_1'])(conv_3_1)
conv_3_2 = keras.layers.SeparableConv2D(filters=params['conv_3_2_filters'], kernel_size=params['conv_3_2_kernel'], strides=params['conv_3_2_strides'], padding=params['conv_3_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(spat_3_1)
spat_3_2 = keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3_2'])(conv_3_2)
pool_3 = keras.layers.MaxPooling2D(pool_size=params['pool_3_size'], padding=params['pool_3_pad'], data_format=channel_order)(spat_3_2)
conv_4_1 = keras.layers.SeparableConv2D(filters=params['conv_4_1_filters'], kernel_size=params['conv_4_1_kernel'], strides=params['conv_4_1_strides'], padding=params['conv_4_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(pool_3)
spat_4_1 = keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4_1'])(conv_4_1)
conv_4_2 = keras.layers.SeparableConv2D(filters=params['conv_4_2_filters'], kernel_size=params['conv_4_2_kernel'], strides=params['conv_4_2_strides'], padding=params['conv_4_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(spat_4_1)
spat_4_2 = keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4_2'])(conv_4_2)
pool_4 = keras.layers.MaxPooling2D(pool_size=params['pool_4_size'], padding=params['pool_4_pad'], data_format=channel_order)(spat_4_2)
conv_5_1 = keras.layers.SeparableConv2D(filters=params['conv_5_1_filters'], kernel_size=params['conv_5_1_kernel'], strides=params['conv_5_1_strides'], padding=params['conv_5_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(pool_4)
conv_5_2 = keras.layers.SeparableConv2D(filters=params['conv_5_2_filters'], kernel_size=params['conv_5_2_kernel'], strides=params['conv_5_2_strides'], padding=params['conv_5_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(conv_5_1)
up_conv_1 = keras.layers.SeparableConv2D(filters=params['up_conv_1_filters'], kernel_size=params['up_conv_1_kernel'], strides=params['up_conv_1_strides'], padding=params['up_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(conv_5_2)
up_1 = keras.layers.UpSampling2D(size=(params['up_1_size'], params['up_1_size']), interpolation=params['up_1_int'])(up_conv_1)
conv_6_1 = keras.layers.SeparableConv2D(filters=params['conv_6_1_filters'], kernel_size=params['conv_6_1_kernel'], strides=params['conv_6_1_strides'], padding=params['conv_6_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(up_1)
conv_6_2 = keras.layers.SeparableConv2D(filters=params['conv_6_2_filters'], kernel_size=params['conv_6_2_kernel'], strides=params['conv_6_2_strides'], padding=params['conv_6_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(conv_6_1)
up_conv_2 = keras.layers.SeparableConv2D(filters=params['up_conv_2_filters'], kernel_size=params['up_conv_2_kernel'], strides=params['up_conv_2_strides'], padding=params['up_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(conv_6_2)
up_2 = keras.layers.UpSampling2D(size=(params['up_2_size'], params['up_2_size']), interpolation=params['up_2_int'])(up_conv_2)
conv_7_1 = keras.layers.SeparableConv2D(filters=params['conv_7_1_filters'], kernel_size=params['conv_7_1_kernel'], strides=params['conv_7_1_strides'], padding=params['conv_7_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(up_2)
conv_7_2 = keras.layers.SeparableConv2D(filters=params['conv_7_2_filters'], kernel_size=params['conv_7_2_kernel'], strides=params['conv_7_2_strides'], padding=params['conv_7_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(conv_7_1)
up_conv_3 = keras.layers.SeparableConv2D(filters=params['up_conv_3_filters'], kernel_size=params['up_conv_3_kernel'], strides=params['up_conv_3_strides'], padding=params['up_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(conv_7_2)
up_3 = keras.layers.UpSampling2D(size=(params['up_3_size'], params['up_3_size']), interpolation=params['up_3_int'])(up_conv_3)
conv_8_1 = keras.layers.SeparableConv2D(filters=params['conv_8_1_filters'], kernel_size=params['conv_8_1_kernel'], strides=params['conv_8_1_strides'], padding=params['conv_8_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(up_3)
conv_8_2 = keras.layers.SeparableConv2D(filters=params['conv_8_2_filters'], kernel_size=params['conv_8_2_kernel'], strides=params['conv_8_2_strides'], padding=params['conv_8_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(conv_8_1)
up_conv_4 = keras.layers.SeparableConv2D(filters=params['up_conv_4_filters'], kernel_size=params['up_conv_4_kernel'], strides=params['up_conv_2_strides'], padding=params['up_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(conv_8_2)
up_4 = keras.layers.UpSampling2D(size=(params['up_4_size'], params['up_4_size']), interpolation=params['up_4_int'])(up_conv_4)
conv_9_1 = keras.layers.SeparableConv2D(filters=params['conv_9_1_filters'], kernel_size=params['conv_9_1_kernel'], strides=params['conv_9_1_strides'], padding=params['conv_9_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(up_4)
conv_9_2 = keras.layers.SeparableConv2D(filters=params['conv_9_2_filters'], kernel_size=params['conv_9_2_kernel'], strides=params['conv_9_2_strides'], padding=params['conv_9_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(conv_9_1)
conv_k_1 = keras.layers.SeparableConv2D(filters=params['conv_k_1_filters'], kernel_size=params['conv_k_1_kernel'], strides=params['conv_k_1_strides'], padding=params['conv_k_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'])(conv_9_2)
# kmeans_out = KMeansLayer(clusters=8,n_init=5)(conv_k_1)
# kmeans_out.trainable=False
conv_k_2 = keras.layers.SeparableConv2D(filters=params['conv_k_2_filters'], kernel_size=params['conv_k_2_kernel'], strides=params['conv_k_2_strides'], padding=params['conv_k_2_pad'], data_format=channel_order, activation='linear', kernel_initializer=params['kern_init'])(conv_k_1)
kmeans_out = keras.layers.Lambda(function=KMeansFunc)(conv_k_2)
# kmeans_out = keras.layers.SeparableConv2D(filters=1, kernel_size=1, strides=1, padding='same', data_format=channel_order, activation='linear', kernel_initializer=params['kern_init'], use_bias=False)(conv_k_2)
# def KMeansFunc(x):
# # batch_mat = np.zeros((x.shape[0],x.shape[1],x.shape[2]))
# def inner_fn(x, clusters=8,n_init=5):
# batch_mat = np.zeros((x.shape[0],x.shape[1],x.shape[2]))
# for frame in range(keras.backend.shape(x)[0]):
# input_mat = x[frame]
# init_mat = np.zeros((input_mat.shape[0]*input_mat.shape[1]))
# kmeans_init = KMeans(n_clusters=clusters,n_init=n_init)
# reshape_mat = np.reshape(input_mat,(input_mat.shape[0]*input_mat.shape[1],input_mat.shape[2]))
# class_pred = kmeans_init.fit_predict(reshape_mat)
# for clust in range(clusters):
# init_mat[class_pred==clust] = np.mean(reshape_mat[class_pred==clust],axis=1)
# init_mat[class_pred==clust] = np.mean(reshape_mat[class_pred==clust],None)
# batch_mat[frame] = np.reshape(init_mat,(input_mat.shape[0],input_mat.shape[1])).astype(np.float32)
# batch_mat = np.expand_dims(batch_mat,axis=-1)
# return batch_mat.astype(np.float32)
# return tf.py_func(inner_fn,[x],tf.float32)
# def KMeansFunc_outputshape(input_shape):
# return (input_shape[0],input_shape[1], input_shape[2],1)
# kmeans_out = keras.layers.Lambda(KMeansFunc)(conv_k_2)
# print(dir(kmeans_out))
# print(kmeans_out.graph)
# -----------------------------------------------------------------
# Main Output
# main_output = keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(conv_k_1)
# -----------------------------------------------------------------
# Model Housekeeping
model = keras.Model(inputs=[input_fluoro_1], outputs=kmeans_out)
keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
model.compile(optimizer=params['model_opt'], loss=params['model_loss'], metrics=[params['model_metric']])
# image_train_sub, image_val = data_comp(200)
# result = model.fit(x={'input_vox': np.expand_dims(vox_train_sub, axis=-1), 'input_fluoro_1': np.expand_dims(image_train_sub[:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(image_train_sub[:, 1, :, :], axis=-1), 'input_cali': cali_train_sub}, y=label_train_sub, validation_data=([np.expand_dims(vox_val, axis=-1), np.expand_dims(image_val[:, 0, :, :], axis=-1), np.expand_dims(image_val[:, 1, :, :], axis=-1), cali_val], label_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
# model.save(os.path.join(os.getcwd(), 'test_model_save.h5'))
# def KMeansFunc(input_tens,clusters=8,n_init=5):
# base_mat = np.zeros((1,128,128,1))
# global xaaa
# xaaa = 0
# def KMeans_base(input_tens,base_mat=base_mat):
# global xaaa
# xaaa +=1
# init_mat = np.zeros((input_tens.shape[0]*input_tens.shape[1]))
# print(init_mat.shape)
# reshape_mat = np.reshape(input_tens[frame],(input_tens.shape[0]*input_tens.shape[1],input_tens.shape[2]))
# print(reshape_mat.shape)
# kmeans_init = KMeans(n_clusters=clusters, n_init=n_init)
# class_pred = kmeans_init.fit_predict(reshape_mat)
# for clust in range(8):
# init_mat[class_pred==clust] = np.mean(reshape_mat[class_pred==clust],axis=1)
# init_mat[class_pred==clust] = np.mean(reshape_mat[class_pred==clust],None)
# print(base_mat.shape)
# base_mat[frame]=np.reshape(init_mat,(input_tens.shape[1],input_tens.shape[2]))
# return np.expand_dims(base_mat,axis=-1).astype('float32')
# class KMeansLayer(keras.layers.Layer):
# def __init__(self, clusters, n_init, trainable=False, **kwargs):
# super(KMeansLayer, self).__init__(**kwargs)
# self.clusters = clusters
# self.n_init = n_init
# def build(self, input_shape):
# # self.input_shape = input_shape
# input_shape = input_shape
# self.output_s = (input_shape[0],input_shape[1], input_shape[2],1)
# self.depth = input_shape[3]
# super(KMeansLayer, self).build(input_shape)
# def call(self, inputs, **kwargs):
# def KMeansFunc(input_tens):
# batch_mat = np.zeros((input_tens.shape[0],input_tens.shape[1],input_tens.shape[2]))
# for frame in range(input_tens.shape[0]):
# input_mat = input_tens[frame]
# init_mat = np.zeros((input_mat.shape[0]*input_mat.shape[1]))
# kmeans_init = KMeans(n_clusters=self.clusters,n_init=self.n_init)
# reshape_mat = np.reshape(input_mat,(input_mat.shape[0]*input_mat.shape[1],input_mat.shape[2]))
# class_pred = kmeans_init.fit_predict(reshape_mat)
# for clust in range(clusters):
# init_mat[class_pred==clust] = np.mean(reshape_mat[class_pred==clust],axis=1)
# init_mat[class_pred==clust] = np.mean(reshape_mat[class_pred==clust],None)
# batch_mat[frame] = np.reshape(init_mat,(input_mat.shape[0],input_mat.shape[1])).astype(np.float32)
# batch_mat = np.expand_dims(batch_mat,axis=-1)
# return tf.convert_to_tensor(batch_mat.astype(np.float32))
# return tf.py_func(KMeansFunc,[inputs],tf.float32)
# def compute_output_shape(self, input_shape):
# return self.output_s
```
#### File: history/vox_fluoro_no_bn_mae/vox_fluoro_no_bn_mae.py
```python
import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
# 2019-09-20
# We are continuing the usage of the architecture based on the residual nets
# In this file, we are goign to continue normalizing the calibration inputs between -1 and 1, but we will only run the min max on the training data set.
# We likewise are going to normalize the label data set, but we will only run the function over the training data set. Moreover, we scale the label data from between -1 and 1.
# We have also removed all dropout from this model, and we will see if the model can overfit the data. We also are going to remove all batch normalization.
# We are going to also do per image normalization between -1 and 1.
# In this file, we are going to use 'mse' for the loss, and Nadam for the optimizer.
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
os.makedirs(save_dir, exist_ok=True)
# -----------------------------------------------------------------
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
# -----------------------------------------------------------------
params = {
# ---
# 3D CONV
# ---
# Entry Layers
'v_intra_act_fn': None,
'v_res_act_fn': 'elu',
'v_conv_0_filters': 30,
'v_conv_0_kernel': 11,
'v_conv_0_strides_0': 2,
'v_conv_0_strides_1': 2,
'v_conv_0_strides_2': 2,
'v_conv_0_pad': 'same',
'v_spatial_drop_rate_0': 0,
'v_conv_1_filters': 30,
'v_conv_1_kernel': 7,
'v_conv_1_strides_0': 2,
'v_conv_1_strides_1': 2,
'v_conv_1_strides_2': 3,
'v_conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'v_pool_0_size': 2,
'v_pool_0_pad': 'same',
# ---
# Second Run of Entry Layers
'v_conv_2_filters': 30,
'v_conv_2_kernel': 5,
'v_conv_2_strides_0': 2,
'v_conv_2_strides_1': 2,
'v_conv_2_strides_2': 2,
'v_conv_2_pad': 'same',
# ---
# Run of Residual Layers
# 1
'v_conv_3_filters': 30,
'v_conv_3_kernel': 3,
'v_conv_3_strides_0': 1,
'v_conv_3_strides_1': 1,
'v_conv_3_strides_2': 1,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_2': 0,
'v_conv_4_filters': 30,
'v_conv_4_kernel': 3,
'v_conv_4_strides_0': 1,
'v_conv_4_strides_1': 1,
'v_conv_4_strides_2': 1,
'v_conv_4_pad': 'same',
# 2
'v_conv_5_filters': 30,
'v_conv_5_kernel': 3,
'v_conv_5_strides_0': 1,
'v_conv_5_strides_1': 1,
'v_conv_5_strides_2': 1,
'v_conv_5_pad': 'same',
'v_spatial_drop_rate_3': 0,
'v_conv_6_filters': 30,
'v_conv_6_kernel': 3,
'v_conv_6_strides_0': 1,
'v_conv_6_strides_1': 1,
'v_conv_6_strides_2': 1,
'v_conv_6_pad': 'same',
# 3
'v_conv_7_filters': 30,
'v_conv_7_kernel': 3,
'v_conv_7_strides_0': 1,
'v_conv_7_strides_1': 1,
'v_conv_7_strides_2': 1,
'v_conv_7_pad': 'same',
'v_spatial_drop_rate_4': 0,
'v_conv_8_filters': 30,
'v_conv_8_kernel': 3,
'v_conv_8_strides_0': 1,
'v_conv_8_strides_1': 1,
'v_conv_8_strides_2': 1,
'v_conv_8_pad': 'same',
# 4
'v_conv_9_filters': 40,
'v_conv_9_kernel': 3,
'v_conv_9_strides_0': 2,
'v_conv_9_strides_1': 2,
'v_conv_9_strides_2': 2,
'v_conv_9_pad': 'same',
'v_spatial_drop_rate_5': 0,
'v_conv_10_filters': 40,
'v_conv_10_kernel': 3,
'v_conv_10_strides_0': 1,
'v_conv_10_strides_1': 1,
'v_conv_10_strides_2': 1,
'v_conv_10_pad': 'same',
'v_conv_11_filters': 40,
'v_conv_11_kernel': 3,
'v_conv_11_strides_0': 2,
'v_conv_11_strides_1': 2,
'v_conv_11_strides_2': 2,
'v_conv_11_pad': 'same',
# 5
'v_conv_12_filters': 50,
'v_conv_12_kernel': 2,
'v_conv_12_strides_0': 2,
'v_conv_12_strides_1': 2,
'v_conv_12_strides_2': 2,
'v_conv_12_pad': 'same',
'v_spatial_drop_rate_6': 0,
'v_conv_13_filters': 50,
'v_conv_13_kernel': 2,
'v_conv_13_strides_0': 1,
'v_conv_13_strides_1': 1,
'v_conv_13_strides_2': 1,
'v_conv_13_pad': 'same',
'v_conv_14_filters': 50,
'v_conv_14_kernel': 1,
'v_conv_14_strides_0': 2,
'v_conv_14_strides_1': 2,
'v_conv_14_strides_2': 2,
'v_conv_14_pad': 'same',
# 6
'v_conv_15_filters': 50,
'v_conv_15_kernel': 2,
'v_conv_15_strides_0': 2,
'v_conv_15_strides_1': 2,
'v_conv_15_strides_2': 2,
'v_conv_15_pad': 'same',
'v_spatial_drop_rate_7': 0,
'v_conv_16_filters': 50,
'v_conv_16_kernel': 2,
'v_conv_16_strides_0': 1,
'v_conv_16_strides_1': 1,
'v_conv_16_strides_2': 1,
'v_conv_16_pad': 'same',
'v_conv_17_filters': 50,
'v_conv_17_kernel': 1,
'v_conv_17_strides_0': 2,
'v_conv_17_strides_1': 2,
'v_conv_17_strides_2': 2,
'v_conv_17_pad': 'same',
# ---
# Final Convs
'v_spatial_drop_rate_8': 0,
'v_conv_18_filters': 50,
'v_conv_18_kernel': 2,
'v_conv_18_strides_0': 1,
'v_conv_18_strides_1': 1,
'v_conv_18_strides_2': 1,
'v_conv_18_pad': 'valid',
'drop_1_v_rate': 0,
'dense_1_v_units': 75,
'drop_2_v_rate': 0,
'dense_2_v_units': 50,
# ---
# 2D CONV
# ---
'intra_act_fn': None,
'res_act_fn': 'elu',
# Entry Fluoro Layers
'conv_0_filters': 30,
'conv_0_kernel': 5,
'conv_0_strides': 2,
'conv_0_pad': 'same',
'spatial_drop_rate_0': 0,
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'pool_0_size': 2,
'pool_0_pad': 'same',
# ---
# Run Of Residual Layers
# 1
'conv_2_filters': 30,
'conv_2_kernel': 3,
'conv_2_strides': 1,
'conv_2_pad': 'same',
'spatial_drop_rate_1': 0,
'conv_3_filters': 30,
'conv_3_kernel': 3,
'conv_3_strides': 1,
'conv_3_pad': 'same',
# 2
'conv_4_filters': 30,
'conv_4_kernel': 3,
'conv_4_strides': 1,
'conv_4_pad': 'same',
'spatial_drop_rate_2': 0,
'conv_5_filters': 30,
'conv_5_kernel': 3,
'conv_5_strides': 1,
'conv_5_pad': 'same',
# 3
'conv_6_filters': 30,
'conv_6_kernel': 3,
'conv_6_strides': 1,
'conv_6_pad': 'same',
'spatial_drop_rate_3': 0,
'conv_7_filters': 30,
'conv_7_kernel': 3,
'conv_7_strides': 1,
'conv_7_pad': 'same',
# 4
'conv_8_filters': 30,
'conv_8_kernel': 3,
'conv_8_strides': 1,
'conv_8_pad': 'same',
'spatial_drop_rate_4': 0,
'conv_9_filters': 30,
'conv_9_kernel': 3,
'conv_9_strides': 1,
'conv_9_pad': 'same',
# 5
'conv_10_filters': 30,
'conv_10_kernel': 3,
'conv_10_strides': 1,
'conv_10_pad': 'same',
'spatial_drop_rate_5': 0,
'conv_11_filters': 30,
'conv_11_kernel': 3,
'conv_11_strides': 1,
'conv_11_pad': 'same',
# 6
'conv_12_filters': 30,
'conv_12_kernel': 3,
'conv_12_strides': 1,
'conv_12_pad': 'same',
'spatial_drop_rate_6': 0,
'conv_13_filters': 30,
'conv_13_kernel': 3,
'conv_13_strides': 1,
'conv_13_pad': 'same',
# ---
# COMB FLUOROS
# ---
# ---
# RES NET AFTER COMB FLUORO
# ---
'c_intra_act_fn': None,
'c_res_act_fn': 'elu',
# 0
'comb_0_filters': 60,
'comb_0_kernel': 3,
'comb_0_strides': 1,
'comb_0_pad': 'same',
'comb_spatial_0': 0,
'comb_1_filters': 60,
'comb_1_kernel': 3,
'comb_1_strides': 1,
'comb_1_pad': 'same',
# 1
'comb_2_filters': 60,
'comb_2_kernel': 3,
'comb_2_strides': 1,
'comb_2_pad': 'same',
'comb_spatial_1': 0,
'comb_3_filters': 60,
'comb_3_kernel': 3,
'comb_3_strides': 1,
'comb_3_pad': 'same',
# 2
'comb_4_filters': 60,
'comb_4_kernel': 3,
'comb_4_strides': 1,
'comb_4_pad': 'same',
'comb_spatial_2': 0,
'comb_5_filters': 60,
'comb_5_kernel': 3,
'comb_5_strides': 1,
'comb_5_pad': 'same',
# 3
'comb_6_filters': 60,
'comb_6_kernel': 3,
'comb_6_strides': 1,
'comb_6_pad': 'same',
'comb_spatial_3': 0,
'comb_7_filters': 60,
'comb_7_kernel': 3,
'comb_7_strides': 1,
'comb_7_pad': 'same',
# 4
'comb_8_filters': 60,
'comb_8_kernel': 3,
'comb_8_strides': 1,
'comb_8_pad': 'same',
'comb_spatial_4': 0,
'comb_9_filters': 60,
'comb_9_kernel': 3,
'comb_9_strides': 1,
'comb_9_pad': 'same',
# 5
'comb_10_filters': 60,
'comb_10_kernel': 2,
'comb_10_strides': 2,
'comb_10_pad': 'same',
'comb_spatial_5': 0,
'comb_11_filters': 60,
'comb_11_kernel': 2,
'comb_11_strides': 1,
'comb_11_pad': 'same',
'comb_12_filters': 60,
'comb_12_kernel': 1,
'comb_12_strides': 2,
'comb_12_pad': 'same',
# 6
'comb_13_filters': 60,
'comb_13_kernel': 2,
'comb_13_strides': 2,
'comb_13_pad': 'same',
'comb_spatial_6': 0,
'comb_14_filters': 60,
'comb_14_kernel': 2,
'comb_14_strides': 1,
'comb_14_pad': 'same',
'comb_15_filters': 60,
'comb_15_kernel': 1,
'comb_15_strides': 2,
'comb_15_pad': 'same',
# 7
'comb_16_filters': 60,
'comb_16_kernel': 2,
'comb_16_strides': 2,
'comb_16_pad': 'same',
'comb_spatial_7': 0,
'comb_17_filters': 60,
'comb_17_kernel': 2,
'comb_17_strides': 1,
'comb_17_pad': 'same',
'comb_18_filters': 60,
'comb_18_kernel': 1,
'comb_18_strides': 2,
'comb_18_pad': 'same',
# ---
# Final Convs After Fluoro
'comb_19_filters': 60,
'comb_19_kernel': 2,
'comb_19_strides': 1,
'comb_19_pad': 'valid',
# ---
# Dense After Fluoro Convs
'dense_comb_0_units': 50,
'drop_1_comb': 0,
'dense_comb_1_units': 50,
# ---
# Activation Function for Fluoro Vox Comb
'flu_vox_act_fn': 'elu',
# ---
# Combine Fluoro and Vox
'vox_flu_units_0': 60,
'vox_flu_drop_1': 0,
'vox_flu_units_1': 50,
'vox_flu_drop_2': 0,
'vox_flu_units_2': 30,
'vox_flu_drop_3': 0,
'vox_flu_units_3': 15,
'vox_flu_units_4': 10,
# ---
# Cali Units
'cali_0_units': 20,
'drop_1_cali': 0,
'cali_1_units': 20,
'drop_2_cali': 0,
'cali_2_units': 20,
'cali_3_units': 10,
# ---
# Activation Function for Top Level Comb
'top_level_act_fn': 'elu',
'top_level_intra': None,
# ---
# Top Level Dense
'top_drop_0': 0,
'top_dense_0': 10,
'top_dense_1': 10,
'top_dense_2': 10,
'top_drop_1': 0,
'top_dense_3': 10,
'top_dense_4': 10,
'top_drop_2': 0,
'top_dense_5': 10,
'top_dense_6': 10,
'top_drop_3': 0,
'top_dense_7': 10,
'top_dense_8': 10,
'top_drop_4': 0,
'top_dense_9': 10,
'top_dense_10': 10,
'top_drop_5': 0,
'top_dense_11': 10,
'top_dense_12': 6,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'v_conv_regularizer': None,
'conv_regularizer': None,
'dense_regularizer_1': None,
'dense_regularizer_2': None,
# 'v_conv_regularizer': tf.keras.regularizers.l1(1e-7),
# 'conv_regularizer': tf.keras.regularizers.l1(1e-7),
# 'dense_regularizer_1': tf.keras.regularizers.l1(1e-7),
# 'dense_regularizer_2': tf.keras.regularizers.l1(1e-7),
'activation_fn': 'elu',
'kern_init': 'he_uniform',
'model_opt': tf.keras.optimizers.Nadam,
'learning_rate': 0.001,
'model_epochs': 30,
'model_batchsize': 6,
'model_loss': 'mae',
'model_metric': 'mae'
}
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (197, 162, 564, 1)
cali_input_shape = (6,)
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# VOXEL CONVS
# -----------------------------------------------------------------
# ---
# Entry Layers
v_conv_0 = tf.keras.layers.Conv3D(filters=params['v_conv_0_filters'], kernel_size=params['v_conv_0_kernel'], strides=(params['v_conv_0_strides_0'], params['v_conv_0_strides_1'], params['v_conv_0_strides_2']), padding=params['v_conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(input_vox)
# bn_0 = tf.keras.layers.BatchNormalization()(v_conv_0)
v_spat_0 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_0'])(v_conv_0)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_0)
# ---
# Pool After Initial Layers
v_pool_0 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_0_size'], padding=params['v_pool_0_pad'], data_format=channel_order)(v_conv_1)
# ---
# Second Run of Entry Layers
# bn_1 = tf.keras.layers.BatchNormalization()(v_pool_0)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_pool_0)
# ---
# Run of Residual Layers
# bn_2 = tf.keras.layers.BatchNormalization()(v_conv_2)
# 1
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_conv_2)
# bn_3 = tf.keras.layers.BatchNormalization()(v_conv_3)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(v_conv_3)
v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_2)
# bn_4 = tf.keras.layers.BatchNormalization()(v_conv_4)
v_add_0 = tf.keras.layers.Add()([v_conv_4, v_conv_2])
v_act_0 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_0)
# 2
v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_0)
# bn_5 = tf.keras.layers.BatchNormalization()(v_conv_5)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(v_conv_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_3)
# bn_6 = tf.keras.layers.BatchNormalization()(v_conv_6)
v_add_1 = tf.keras.layers.Add()([v_conv_6, v_act_0])
v_act_1 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_1)
# 3
v_conv_7 = tf.keras.layers.Conv3D(filters=params['v_conv_7_filters'], kernel_size=params['v_conv_7_kernel'], strides=(params['v_conv_7_strides_0'], params['v_conv_7_strides_1'], params['v_conv_7_strides_2']), padding=params['v_conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_1)
# bn_7 = tf.keras.layers.BatchNormalization()(v_conv_7)
v_spat_4 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_4'])(v_conv_7)
v_conv_8 = tf.keras.layers.Conv3D(filters=params['v_conv_8_filters'], kernel_size=params['v_conv_8_kernel'], strides=(params['v_conv_8_strides_0'], params['v_conv_8_strides_1'], params['v_conv_8_strides_2']), padding=params['v_conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_4)
# bn_8 = tf.keras.layers.BatchNormalization()(v_conv_8)
v_add_2 = tf.keras.layers.Add()([v_conv_8, v_act_1])
v_act_2 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_2)
# 4
v_conv_9 = tf.keras.layers.Conv3D(filters=params['v_conv_9_filters'], kernel_size=params['v_conv_9_kernel'], strides=(params['v_conv_9_strides_0'], params['v_conv_9_strides_1'], params['v_conv_9_strides_2']), padding=params['v_conv_9_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_2)
# bn_9 = tf.keras.layers.BatchNormalization()(v_conv_9)
v_spat_5 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_5'])(v_conv_9)
v_conv_10 = tf.keras.layers.Conv3D(filters=params['v_conv_10_filters'], kernel_size=params['v_conv_10_kernel'], strides=(params['v_conv_10_strides_0'], params['v_conv_10_strides_1'], params['v_conv_10_strides_2']), padding=params['v_conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_5)
# bn_10 = tf.keras.layers.BatchNormalization()(v_conv_10)
v_conv_11 = tf.keras.layers.Conv3D(filters=params['v_conv_11_filters'], kernel_size=params['v_conv_11_kernel'], strides=(params['v_conv_11_strides_0'], params['v_conv_11_strides_1'], params['v_conv_11_strides_2']), padding=params['v_conv_11_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_2)
# bn_11 = tf.keras.layers.BatchNormalization()(v_conv_11)
v_add_3 = tf.keras.layers.Add()([v_conv_10, v_conv_11])
v_act_3 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_3)
# 5
v_conv_12 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_3)
# bn_12 = tf.keras.layers.BatchNormalization()(v_conv_12)
v_spat_6 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(v_conv_12)
v_conv_13 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_6)
# bn_13 = tf.keras.layers.BatchNormalization()(v_conv_13)
v_conv_14 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_3)
# bn_14 = tf.keras.layers.BatchNormalization()(v_conv_14)
v_add_4 = tf.keras.layers.Add()([v_conv_13, v_conv_14])
v_act_4 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_4)
# 6
v_conv_15 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_4)
# bn_15 = tf.keras.layers.BatchNormalization()(v_conv_15)
v_spat_7 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(v_conv_15)
v_conv_16 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_7)
# bn_16 = tf.keras.layers.BatchNormalization()(v_conv_16)
v_conv_17 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_4)
# bn_17 = tf.keras.layers.BatchNormalization()(v_conv_17)
v_add_5 = tf.keras.layers.Add()([v_conv_16, v_conv_17])
v_act_5 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_5)
# ---
# Final Conv Layers
# bn_18 = tf.keras.layers.BatchNormalization()(v_act_5)
v_spat_8 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_8'])(v_act_5)
v_conv_18 = tf.keras.layers.Conv3D(filters=params['v_conv_18_filters'], kernel_size=params['v_conv_18_kernel'], strides=(params['v_conv_18_strides_0'], params['v_conv_18_strides_1'], params['v_conv_18_strides_2']), padding=params['v_conv_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_8)
# ---
# Dense Layers
v_flatten_0 = tf.keras.layers.Flatten()(v_conv_18)
# bn_19 = tf.keras.layers.BatchNormalization()(v_flatten_0)
dense_1_v_drop = tf.keras.layers.Dropout(params['drop_1_v_rate'])(v_flatten_0)
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_1_v_drop)
# bn_20 = tf.keras.layers.BatchNormalization()(dense_1_v)
dense_2_v_drop = tf.keras.layers.Dropout(params['drop_2_v_rate'])(dense_1_v)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_2_v_drop)
# bn_21_v = tf.keras.layers.BatchNormalization()(dense_2_v)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 1
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
# per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
# bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_1)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_fluoro_1)
# bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(conv_0_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
# bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_0_1)
# bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_2_1)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
# bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([conv_3_1, pool_0_1])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
# bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_4_1)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
# bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, conv_5_1])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
# bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(conv_6_1)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
# bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, conv_7_1])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
# bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(conv_8_1)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
# bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, conv_9_1])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
# bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(conv_10_1)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
# bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, conv_11_1])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
# bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(conv_12_1)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
# bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, conv_13_1])
act_5_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# FLUORO ANALYSIS 2
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
# per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
# bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_2)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(input_fluoro_2)
# bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(conv_0_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
# bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(pool_0_1)
# bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_2_1)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
# bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([conv_3_1, pool_0_1])
act_0 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
# bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(conv_4_1)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
# bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, conv_5_1])
act_1 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
# bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(conv_6_1)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
# bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, conv_7_1])
act_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
# bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(conv_8_1)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
# bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, conv_9_1])
act_3 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
# bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(conv_10_1)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
# bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
add_4 = tf.keras.layers.Add()([act_3, conv_11_1])
act_4 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_4)
# 6
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
# bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(conv_12_1)
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
# bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
add_5 = tf.keras.layers.Add()([act_4, conv_13_1])
act_5_2 = tf.keras.layers.Activation(activation=params['res_act_fn'])(add_5)
# -----------------------------------------------------------------
# COMBINE FLUOROS
# -----------------------------------------------------------------
comb_fluoro_0 = tf.keras.layers.concatenate([act_5_1, act_5_2])
# -----------------------------------------------------------------
# RES NETS AFTER COMBINED FLUORO
# -----------------------------------------------------------------
# 0
comb_0 = tf.keras.layers.Conv2D(filters=params['comb_0_filters'], kernel_size=params['comb_0_kernel'], strides=params['comb_0_strides'], padding=params['comb_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(comb_fluoro_0)
# bn_0 = tf.keras.layers.BatchNormalization()(comb_0)
spat_0 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_0'])(comb_0)
comb_1 = tf.keras.layers.Conv2D(filters=params['comb_1_filters'], kernel_size=params['comb_1_kernel'], strides=params['comb_1_strides'], padding=params['comb_1_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0)
# bn_1 = tf.keras.layers.BatchNormalization()(comb_1)
add_0 = tf.keras.layers.Add()([comb_fluoro_0, comb_1])
act_0 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_0)
# 1
comb_2 = tf.keras.layers.Conv2D(filters=params['comb_2_filters'], kernel_size=params['comb_2_kernel'], strides=params['comb_2_strides'], padding=params['comb_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
# bn_2 = tf.keras.layers.BatchNormalization()(comb_2)
spat_1 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_1'])(comb_2)
comb_3 = tf.keras.layers.Conv2D(filters=params['comb_3_filters'], kernel_size=params['comb_3_kernel'], strides=params['comb_3_strides'], padding=params['comb_3_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1)
# bn_3 = tf.keras.layers.BatchNormalization()(comb_3)
add_1 = tf.keras.layers.Add()([act_0, comb_3])
act_1 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_1)
# 2
comb_4 = tf.keras.layers.Conv2D(filters=params['comb_4_filters'], kernel_size=params['comb_4_kernel'], strides=params['comb_4_strides'], padding=params['comb_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
# bn_4 = tf.keras.layers.BatchNormalization()(comb_4)
spat_2 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_2'])(comb_4)
comb_5 = tf.keras.layers.Conv2D(filters=params['comb_5_filters'], kernel_size=params['comb_5_kernel'], strides=params['comb_5_strides'], padding=params['comb_5_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2)
# bn_5 = tf.keras.layers.BatchNormalization()(comb_5)
add_2 = tf.keras.layers.Add()([act_1, comb_5])
act_2 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_2)
# 3
comb_6 = tf.keras.layers.Conv2D(filters=params['comb_6_filters'], kernel_size=params['comb_6_kernel'], strides=params['comb_6_strides'], padding=params['comb_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
# bn_6 = tf.keras.layers.BatchNormalization()(comb_6)
spat_3 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_3'])(comb_6)
comb_7 = tf.keras.layers.Conv2D(filters=params['comb_7_filters'], kernel_size=params['comb_7_kernel'], strides=params['comb_7_strides'], padding=params['comb_7_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3)
# bn_7 = tf.keras.layers.BatchNormalization()(comb_7)
add_3 = tf.keras.layers.Add()([act_2, comb_7])
act_3 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_3)
# 4
comb_8 = tf.keras.layers.Conv2D(filters=params['comb_8_filters'], kernel_size=params['comb_8_kernel'], strides=params['comb_8_strides'], padding=params['comb_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
# bn_8 = tf.keras.layers.BatchNormalization()(comb_8)
spat_4 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_4'])(comb_8)
comb_9 = tf.keras.layers.Conv2D(filters=params['comb_9_filters'], kernel_size=params['comb_9_kernel'], strides=params['comb_9_strides'], padding=params['comb_9_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4)
# bn_9 = tf.keras.layers.BatchNormalization()(comb_9)
add_4 = tf.keras.layers.Add()([act_3, comb_9])
act_4 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_4)
# 5
comb_10 = tf.keras.layers.Conv2D(filters=params['comb_10_filters'], kernel_size=params['comb_10_kernel'], strides=params['comb_10_strides'], padding=params['comb_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
# bn_10 = tf.keras.layers.BatchNormalization()(comb_10)
spat_5 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_5'])(comb_10)
comb_11 = tf.keras.layers.Conv2D(filters=params['comb_11_filters'], kernel_size=params['comb_11_kernel'], strides=params['comb_11_strides'], padding=params['comb_11_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5)
# bn_11 = tf.keras.layers.BatchNormalization()(comb_11)
comb_12 = tf.keras.layers.Conv2D(filters=params['comb_12_filters'], kernel_size=params['comb_12_kernel'], strides=params['comb_12_strides'], padding=params['comb_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
# bn_12 = tf.keras.layers.BatchNormalization()(comb_12)
add_5 = tf.keras.layers.Add()([comb_11, comb_12])
act_5 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_5)
# 6
comb_13 = tf.keras.layers.Conv2D(filters=params['comb_13_filters'], kernel_size=params['comb_13_kernel'], strides=params['comb_13_strides'], padding=params['comb_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
# bn_13 = tf.keras.layers.BatchNormalization()(comb_13)
spat_6 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_6'])(comb_13)
comb_14 = tf.keras.layers.Conv2D(filters=params['comb_14_filters'], kernel_size=params['comb_14_kernel'], strides=params['comb_14_strides'], padding=params['comb_14_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6)
# bn_14 = tf.keras.layers.BatchNormalization()(comb_14)
comb_15 = tf.keras.layers.Conv2D(filters=params['comb_15_filters'], kernel_size=params['comb_15_kernel'], strides=params['comb_15_strides'], padding=params['comb_15_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
# bn_15 = tf.keras.layers.BatchNormalization()(comb_15)
add_6 = tf.keras.layers.Add()([comb_14, comb_15])
act_6 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_6)
# 7
comb_16 = tf.keras.layers.Conv2D(filters=params['comb_16_filters'], kernel_size=params['comb_16_kernel'], strides=params['comb_16_strides'], padding=params['comb_16_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
# bn_16 = tf.keras.layers.BatchNormalization()(comb_16)
spat_7 = tf.keras.layers.SpatialDropout2D(rate=params['comb_spatial_7'])(comb_16)
comb_17 = tf.keras.layers.Conv2D(filters=params['comb_17_filters'], kernel_size=params['comb_17_kernel'], strides=params['comb_17_strides'], padding=params['comb_17_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_7)
# bn_17 = tf.keras.layers.BatchNormalization()(comb_17)
comb_18 = tf.keras.layers.Conv2D(filters=params['comb_18_filters'], kernel_size=params['comb_18_kernel'], strides=params['comb_18_strides'], padding=params['comb_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_6)
# bn_18 = tf.keras.layers.BatchNormalization()(comb_18)
add_7 = tf.keras.layers.Add()([comb_17, comb_18])
act_7 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_7)
# ---
# Conv After End of Res Net
comb_19 = tf.keras.layers.Conv2D(filters=params['comb_19_filters'], kernel_size=params['comb_19_kernel'], strides=params['comb_19_strides'], padding=params['comb_19_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_7)
# ---
# Dense At End of Convs
comb_flatten_1 = tf.keras.layers.Flatten()(comb_19)
# bn_19 = tf.keras.layers.BatchNormalization()(comb_flatten_1)
dense_0_comb = tf.keras.layers.Dense(units=params['dense_comb_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(comb_flatten_1)
# bn_20 = tf.keras.layers.BatchNormalization()(dense_0_comb)
dense_comb_1 = tf.keras.layers.Dropout(params['drop_1_comb'])(dense_0_comb)
dense_1_comb = tf.keras.layers.Dense(units=params['dense_comb_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(dense_comb_1)
# bn_21_f = tf.keras.layers.BatchNormalization()(dense_1_comb)
# -----------------------------------------------------------------
# COMBINE FLUORO NETS AND VOXEL NETS
# -----------------------------------------------------------------
fluoro_vox_comb = tf.keras.layers.Add()([dense_1_comb, dense_2_v])
fluoro_vox_act = tf.keras.layers.Activation(activation=params['flu_vox_act_fn'])(fluoro_vox_comb)
# -----------------------------------------------------------------
# DENSE AFTER FLUORO AND VOXEL
# -----------------------------------------------------------------
# bn_0 = tf.keras.layers.BatchNormalization()(fluoro_vox_act)
vox_flu_0 = tf.keras.layers.Dense(units=params['vox_flu_units_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(fluoro_vox_act)
# bn_1 = tf.keras.layers.BatchNormalization()(vox_flu_0)
vox_flu_drop_1 = tf.keras.layers.Dropout(params['vox_flu_drop_1'])(vox_flu_0)
vox_flu_1 = tf.keras.layers.Dense(units=params['vox_flu_units_1'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_1)
# bn_2 = tf.keras.layers.BatchNormalization()(vox_flu_1)
vox_flu_drop_2 = tf.keras.layers.Dropout(params['vox_flu_drop_2'])(vox_flu_1)
vox_flu_2 = tf.keras.layers.Dense(units=params['vox_flu_units_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_2)
# bn_3 = tf.keras.layers.BatchNormalization()(vox_flu_2)
vox_flu_drop_3 = tf.keras.layers.Dropout(params['vox_flu_drop_3'])(vox_flu_2)
vox_flu_3 = tf.keras.layers.Dense(units=params['vox_flu_units_3'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_drop_3)
# bn_4 = tf.keras.layers.BatchNormalization()(vox_flu_3)
vox_flu_4 = tf.keras.layers.Dense(units=params['vox_flu_units_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(vox_flu_3)
# bn_5_comb = tf.keras.layers.BatchNormalization()(vox_flu_4)
# -----------------------------------------------------------------
# CALIBRATION DENSE
# -----------------------------------------------------------------
# bn_0 = tf.keras.layers.BatchNormalization()(input_cali)
cali_0 = tf.keras.layers.Dense(units=params['cali_0_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(input_cali)
# bn_1 = tf.keras.layers.BatchNormalization()(cali_0)
drop_1_cali = tf.keras.layers.Dropout(params['drop_1_cali'])(cali_0)
cali_1 = tf.keras.layers.Dense(units=params['cali_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(drop_1_cali)
# bn_2 = tf.keras.layers.BatchNormalization()(cali_1)
drop_2_cali = tf.keras.layers.Dropout(params['drop_2_cali'])(cali_1)
cali_2 = tf.keras.layers.Dense(units=params['cali_2_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(drop_2_cali)
# bn_3 = tf.keras.layers.BatchNormalization()(cali_2)
cali_3 = tf.keras.layers.Dense(units=params['cali_3_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(cali_2)
# bn_4_c = tf.keras.layers.BatchNormalization()(cali_3)
# -----------------------------------------------------------------
# COMBINE CALI AND VOX/FLUORO
# -----------------------------------------------------------------
top_level_comb = tf.keras.layers.Add()([cali_3, vox_flu_4])
top_level_act = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(top_level_comb)
# -----------------------------------------------------------------
# TOP LEVEL DENSE TO OUTPUT
# -----------------------------------------------------------------
top_dense_0 = tf.keras.layers.Dense(units=params['top_dense_0'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_level_act)
# bn_0 = tf.keras.layers.BatchNormalization()(top_dense_0)
top_drop_0 = tf.keras.layers.Dropout(params['top_drop_0'])(top_dense_0)
top_dense_1 = tf.keras.layers.Dense(units=params['top_dense_1'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_0)
# bn_1 = tf.keras.layers.BatchNormalization()(top_dense_1)
add_0 = tf.keras.layers.Add()([top_dense_1, cali_3])
act_0 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_0)
top_dense_2 = tf.keras.layers.Dense(units=params['top_dense_2'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_0)
# bn_2 = tf.keras.layers.BatchNormalization()(top_dense_2)
top_drop_1 = tf.keras.layers.Dropout(params['top_drop_1'])(top_dense_2)
top_dense_3 = tf.keras.layers.Dense(units=params['top_dense_3'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_1)
# bn_3 = tf.keras.layers.BatchNormalization()(top_dense_3)
add_1 = tf.keras.layers.Add()([top_dense_3, act_0])
act_1 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_1)
top_dense_4 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_1)
# bn_4 = tf.keras.layers.BatchNormalization()(top_dense_4)
top_drop_2 = tf.keras.layers.Dropout(params['top_drop_2'])(top_dense_4)
top_dense_5 = tf.keras.layers.Dense(units=params['top_dense_5'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_2)
# bn_5 = tf.keras.layers.BatchNormalization()(top_dense_5)
add_2 = tf.keras.layers.Add()([top_dense_5, act_1])
act_2 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_2)
top_dense_6 = tf.keras.layers.Dense(units=params['top_dense_6'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_2)
# bn_6 = tf.keras.layers.BatchNormalization()(top_dense_6)
top_drop_3 = tf.keras.layers.Dropout(params['top_drop_3'])(top_dense_6)
top_dense_7 = tf.keras.layers.Dense(units=params['top_dense_7'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_3)
# bn_7 = tf.keras.layers.BatchNormalization()(top_dense_7)
add_3 = tf.keras.layers.Add()([top_dense_7, act_2])
act_3 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_3)
top_dense_8 = tf.keras.layers.Dense(units=params['top_dense_8'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_3)
# bn_8 = tf.keras.layers.BatchNormalization()(top_dense_8)
top_drop_4 = tf.keras.layers.Dropout(params['top_drop_4'])(top_dense_8)
top_dense_9 = tf.keras.layers.Dense(units=params['top_dense_9'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_drop_4)
# bn_9 = tf.keras.layers.BatchNormalization()(top_dense_9)
add_4 = tf.keras.layers.Add()([top_dense_9, act_3])
act_4 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_4)
top_dense_10 = tf.keras.layers.Dense(units=params['top_dense_10'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(act_4)
# bn_10 = tf.keras.layers.BatchNormalization()(top_dense_10)
top_dense_11 = tf.keras.layers.Dense(units=params['top_dense_11'], activation=params['top_level_intra'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_2'])(top_dense_10)
# bn_11 = tf.keras.layers.BatchNormalization()(top_dense_11)
add_5 = tf.keras.layers.Add()([top_dense_11, act_4])
act_5 = tf.keras.layers.Activation(activation=params['top_level_act_fn'])(add_5)
top_dense_12 = tf.keras.layers.Dense(units=params['top_dense_4'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=None)(act_5)
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(top_dense_12)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
model.summary()
# -----------------------------------------------------------------
def min_max_norm(data_set, feature_range=(-1, 1), axis=0, data_min=None, data_max=None):
if data_min is None:
data_min = np.min(data_set, axis=axis)
else:
data_set = np.where(data_set < data_min, data_min, data_set)
if data_max is None:
data_max = np.max(data_set, axis=axis)
else:
data_set = np.where(data_set > data_max, data_max, data_set)
data_in_std_range = (data_set - data_min) / (data_max - data_min)
data_scaled = data_in_std_range * (feature_range[1] - feature_range[0]) + feature_range[0]
return data_scaled
# -----------------------------------------------------------------
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
# -----------------------------------------------------------------
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_mark_origin_comp.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images_norm_std.h5py'), 'r')
image_grp_1 = image_file['image_1']
image_grp_2 = image_file['image_2']
image_init_1 = image_grp_1['min_max_dset_per_image']
image_init_2 = image_grp_2['min_max_dset_per_image']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train_1 = image_init_1[:]
image_mat_val_1 = image_mat_train_1[val_indxs]
image_mat_train_1 = image_mat_train_1[train_indxs]
image_mat_train_2 = image_init_2[:]
image_mat_val_2 = image_mat_train_2[val_indxs]
image_mat_train_2 = image_mat_train_2[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_train_min = np.min(cali_mat_train, axis=0)
cali_train_max = np.max(cali_mat_train, axis=0)
cali_train_std = np.std(cali_mat_train, axis=0)
cali_train_avg = np.mean(cali_mat_train, axis=0)
var_dict['cali_train_avg'] = cali_train_avg
var_dict['cali_train_std'] = cali_train_std
var_dict['cali_train_min'] = cali_train_min
var_dict['cali_train_max'] = cali_train_max
cali_train_min_max = min_max_norm(cali_mat_train)
cali_val_min_max = min_max_norm(cali_mat_val, data_min=cali_train_min, data_max=cali_train_max)
cali_file.close()
label_mat_train = label_init[:]
label_mat_val = label_mat_train[val_indxs]
label_mat_train = label_mat_train[train_indxs]
label_train_avg = np.mean(label_mat_train, axis=0)
label_train_std = np.std(label_mat_train, axis=0)
label_train_min = np.min(label_mat_train, axis=0)
label_train_max = np.max(label_mat_train, axis=0)
label_train_min_max = min_max_norm(label_mat_train, feature_range=(-1, 1))
label_val_min_max = min_max_norm(label_mat_val, feature_range=(-1, 1), data_min=label_train_min, data_max=label_train_max)
var_dict['label_train_avg'] = label_train_avg
var_dict['label_train_std'] = label_train_std
var_dict['label_train_min'] = label_train_min
var_dict['label_train_max'] = label_train_max
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': image_mat_train_1, 'input_fluoro_2': image_mat_train_2, 'input_cali': cali_train_min_max}, y=label_train_min_max, validation_data=([np.expand_dims(vox_mat_val, axis=-1), image_mat_val_1, image_mat_val_2, cali_val_min_max], label_val_min_max), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
hist_file.close()
```
#### File: history/vox_fluoro_res_test/vox_fluoro_res_test.py
```python
import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
# We are going to try to do some residual netowrks
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
print(save_dir)
os.makedirs(save_dir, exist_ok=True)
# -----------------------------------------------------------------
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
# -----------------------------------------------------------------
params = {
# ---
# 3D CONV
# ---
# Entry Layers
'v_conv_0_filters': 30,
'v_conv_0_kernel': 9,
'v_conv_0_strides_0': 2,
'v_conv_0_strides_1': 2,
'v_conv_0_strides_2': 2,
'v_conv_0_pad': 'same',
'v_spatial_drop_rate_0': 0.3,
'v_conv_1_filters': 30,
'v_conv_1_kernel': 5,
'v_conv_1_strides_0': 2,
'v_conv_1_strides_1': 2,
'v_conv_1_strides_2': 3,
'v_conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'v_pool_0_size': 2,
'v_pool_0_pad': 'same',
# ---
# Second Run of Entry Layers
'v_conv_2_filters': 30,
'v_conv_2_kernel': 5,
'v_conv_2_strides_0': 2,
'v_conv_2_strides_1': 2,
'v_conv_2_strides_2': 2,
'v_conv_2_pad': 'same',
# ---
# Run of Residual Layers
# 1
'v_conv_3_filters': 30,
'v_conv_3_kernel': 3,
'v_conv_3_strides_0': 1,
'v_conv_3_strides_1': 1,
'v_conv_3_strides_2': 1,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_2': 0.3,
'v_conv_4_filters': 30,
'v_conv_4_kernel': 3,
'v_conv_4_strides_0': 1,
'v_conv_4_strides_1': 1,
'v_conv_4_strides_2': 1,
'v_conv_4_pad': 'same',
# 2
'v_conv_5_filters': 30,
'v_conv_5_kernel': 3,
'v_conv_5_strides_0': 1,
'v_conv_5_strides_1': 1,
'v_conv_5_strides_2': 1,
'v_conv_5_pad': 'same',
'v_spatial_drop_rate_3': 0.3,
'v_conv_6_filters': 30,
'v_conv_6_kernel': 3,
'v_conv_6_strides_0': 1,
'v_conv_6_strides_1': 1,
'v_conv_6_strides_2': 1,
'v_conv_6_pad': 'same',
# 3
'v_conv_7_filters': 30,
'v_conv_7_kernel': 3,
'v_conv_7_strides_0': 1,
'v_conv_7_strides_1': 1,
'v_conv_7_strides_2': 1,
'v_conv_7_pad': 'same',
'v_spatial_drop_rate_4': 0.3,
'v_conv_8_filters': 30,
'v_conv_8_kernel': 3,
'v_conv_8_strides_0': 1,
'v_conv_8_strides_1': 1,
'v_conv_8_strides_2': 1,
'v_conv_8_pad': 'same',
# 4
'v_conv_9_filters': 40,
'v_conv_9_kernel': 3,
'v_conv_9_strides_0': 2,
'v_conv_9_strides_1': 2,
'v_conv_9_strides_2': 2,
'v_conv_9_pad': 'same',
'v_spatial_drop_rate_5': 0.3,
'v_conv_10_filters': 40,
'v_conv_10_kernel': 3,
'v_conv_10_strides_0': 1,
'v_conv_10_strides_1': 1,
'v_conv_10_strides_2': 1,
'v_conv_10_pad': 'same',
'v_conv_11_filters': 40,
'v_conv_11_kernel': 3,
'v_conv_11_strides_0': 2,
'v_conv_11_strides_1': 2,
'v_conv_11_strides_2': 2,
'v_conv_11_pad': 'same',
# 5
'v_conv_12_filters': 50,
'v_conv_12_kernel': 2,
'v_conv_12_strides_0': 2,
'v_conv_12_strides_1': 2,
'v_conv_12_strides_2': 2,
'v_conv_12_pad': 'same',
'v_spatial_drop_rate_6': 0.3,
'v_conv_13_filters': 50,
'v_conv_13_kernel': 2,
'v_conv_13_strides_0': 1,
'v_conv_13_strides_1': 1,
'v_conv_13_strides_2': 1,
'v_conv_13_pad': 'same',
'v_conv_14_filters': 50,
'v_conv_14_kernel': 1,
'v_conv_14_strides_0': 2,
'v_conv_14_strides_1': 2,
'v_conv_14_strides_2': 2,
'v_conv_14_pad': 'same',
# 6
'v_conv_15_filters': 50,
'v_conv_15_kernel': 2,
'v_conv_15_strides_0': 2,
'v_conv_15_strides_1': 2,
'v_conv_15_strides_2': 2,
'v_conv_15_pad': 'same',
'v_spatial_drop_rate_7': 0.3,
'v_conv_16_filters': 50,
'v_conv_16_kernel': 2,
'v_conv_16_strides_0': 1,
'v_conv_16_strides_1': 1,
'v_conv_16_strides_2': 1,
'v_conv_16_pad': 'same',
'v_conv_17_filters': 50,
'v_conv_17_kernel': 1,
'v_conv_17_strides_0': 2,
'v_conv_17_strides_1': 2,
'v_conv_17_strides_2': 2,
'v_conv_17_pad': 'same',
# ---
# Final Convs
'v_spatial_drop_rate_8': 0.5,
'v_conv_18_filters': 50,
'v_conv_18_kernel': 2,
'v_conv_18_strides_0': 1,
'v_conv_18_strides_1': 1,
'v_conv_18_strides_2': 1,
'v_conv_18_pad': 'valid',
'dense_1_v_units': 75,
'dense_2_v_units': 50,
# ---
# 2D CONV
# ---
# Entry Fluoro Layers
'conv_0_filters': 30,
'conv_0_kernel': 5,
'conv_0_strides': 2,
'conv_0_pad': 'same',
'spatial_drop_rate_0': 0.3,
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'pool_0_size': 2,
'pool_0_pad': 'same',
# ---
# Run Of Residual Layers
# 1
'conv_2_filters': 30,
'conv_2_kernel': 3,
'conv_2_strides': 1,
'conv_2_pad': 'same',
'spatial_drop_rate_1': 0.3,
'conv_3_filters': 30,
'conv_3_kernel': 3,
'conv_3_strides': 1,
'conv_3_pad': 'same',
# 2
'conv_4_filters': 30,
'conv_4_kernel': 3,
'conv_4_strides': 1,
'conv_4_pad': 'same',
'spatial_drop_rate_2': 0.3,
'conv_5_filters': 30,
'conv_5_kernel': 3,
'conv_5_strides': 1,
'conv_5_pad': 'same',
# 3
'conv_6_filters': 30,
'conv_6_kernel': 3,
'conv_6_strides': 1,
'conv_6_pad': 'same',
'spatial_drop_rate_3': 0.3,
'conv_7_filters': 30,
'conv_7_kernel': 3,
'conv_7_strides': 1,
'conv_7_pad': 'same',
# 4
'conv_8_filters': 30,
'conv_8_kernel': 3,
'conv_8_strides': 1,
'conv_8_pad': 'same',
'spatial_drop_rate_4': 0.3,
'conv_9_filters': 30,
'conv_9_kernel': 3,
'conv_9_strides': 1,
'conv_9_pad': 'same',
# 5
'conv_10_filters': 40,
'conv_10_kernel': 3,
'conv_10_strides': 2,
'conv_10_pad': 'same',
'spatial_drop_rate_5': 0.3,
'conv_11_filters': 40,
'conv_11_kernel': 3,
'conv_11_strides': 1,
'conv_11_pad': 'same',
'conv_12_filters': 40,
'conv_12_kernel': 1,
'conv_12_strides': 2,
'conv_12_pad': 'same',
# 6
'conv_13_filters': 40,
'conv_13_kernel': 3,
'conv_13_strides': 2,
'conv_13_pad': 'same',
'spatial_drop_rate_6': 0.3,
'conv_14_filters': 40,
'conv_14_kernel': 3,
'conv_14_strides': 1,
'conv_14_pad': 'same',
'conv_15_filters': 40,
'conv_15_kernel': 1,
'conv_15_strides': 2,
'conv_15_pad': 'same',
# 7
'conv_16_filters': 40,
'conv_16_kernel': 3,
'conv_16_strides': 2,
'conv_16_pad': 'same',
'spatial_drop_rate_7': 0.3,
'conv_17_filters': 40,
'conv_17_kernel': 3,
'conv_17_strides': 1,
'conv_17_pad': 'same',
'conv_18_filters': 40,
'conv_18_kernel': 1,
'conv_18_strides': 2,
'conv_18_pad': 'same',
# ---
# Final Conv Layers
'spatial_drop_rate_8': 0.3,
'conv_19_filters': 50,
'conv_19_kernel': 2,
'conv_19_strides': 1,
'conv_19_pad': 'valid',
# ---
# Dense Layers
'dense_0_f_units': 50,
'dense_1_f_units': 50,
'dense_comb_1_units': 50,
'dense_comb_2_units': 50,
# Calibration Dense Layers
'dense_1_cali_units': 20,
'dense_2_cali_units': 6,
'dense_comb_v_1_units': 20,
'dense_comb_v_2_units': 6,
# Top Level Dense Units
'dense_1_co_units': 250,
'drop_1_comb_rate': 0.2,
'dense_2_co_units': 150,
'dense_3_co_units': 100,
'drop_2_comb_rate': 0.2,
'dense_4_co_units': 20,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'v_conv_regularizer': None,
'conv_regularizer': None,
'dense_regularizer_1': None,
'dense_regularizer_2': None,
'activation_fn': 'elu',
'v_intra_act_fn': None,
'v_res_act_fn': 'elu',
'c_intra_act_fn': None,
'c_res_act_fn': 'elu',
'res_act_fn': 'elu',
'kern_init': 'glorot_uniform',
'model_opt': tf.keras.optimizers.Adam,
'learning_rate': 0.001,
'model_epochs': 50,
'model_batchsize': 5,
'model_loss': cust_mean_squared_error_var,
'model_metric': cust_mean_squared_error_var
}
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (199, 164, 566, 1)
cali_input_shape = (6,)
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# ---
# Entry Layers
v_conv_0 = tf.keras.layers.Conv3D(filters=params['v_conv_0_filters'], kernel_size=params['v_conv_0_kernel'], strides=(params['v_conv_0_strides_0'], params['v_conv_0_strides_1'], params['v_conv_0_strides_2']), padding=params['v_conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(input_vox)
bn_0 = tf.keras.layers.BatchNormalization()(v_conv_0)
v_spat_0 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_0'])(bn_0)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_0)
# ---
# Pool After Initial Layers
v_pool_0 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_0_size'], padding=params['v_pool_0_pad'], data_format=channel_order)(v_conv_1)
# ---
# Second Run of Entry Layers
bn_1 = tf.keras.layers.BatchNormalization()(v_pool_0)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(v_conv_2)
# 1
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(v_conv_3)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(bn_3)
v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_2)
bn_4 = tf.keras.layers.BatchNormalization()(v_conv_4)
v_add_0 = tf.keras.layers.Add()([bn_4, bn_2])
v_act_0 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_0)
# 2
v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_0)
bn_5 = tf.keras.layers.BatchNormalization()(v_conv_5)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(bn_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_3)
bn_6 = tf.keras.layers.BatchNormalization()(v_conv_6)
v_add_1 = tf.keras.layers.Add()([bn_6, v_act_0])
v_act_1 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_1)
# 3
v_conv_7 = tf.keras.layers.Conv3D(filters=params['v_conv_7_filters'], kernel_size=params['v_conv_7_kernel'], strides=(params['v_conv_7_strides_0'], params['v_conv_7_strides_1'], params['v_conv_7_strides_2']), padding=params['v_conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_1)
bn_7 = tf.keras.layers.BatchNormalization()(v_conv_7)
v_spat_4 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_4'])(bn_7)
v_conv_8 = tf.keras.layers.Conv3D(filters=params['v_conv_8_filters'], kernel_size=params['v_conv_8_kernel'], strides=(params['v_conv_8_strides_0'], params['v_conv_8_strides_1'], params['v_conv_8_strides_2']), padding=params['v_conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_4)
bn_8 = tf.keras.layers.BatchNormalization()(v_conv_8)
v_add_2 = tf.keras.layers.Add()([bn_8, v_act_1])
v_act_2 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_2)
# 4
v_conv_9 = tf.keras.layers.Conv3D(filters=params['v_conv_9_filters'], kernel_size=params['v_conv_9_kernel'], strides=(params['v_conv_9_strides_0'], params['v_conv_9_strides_1'], params['v_conv_9_strides_2']), padding=params['v_conv_9_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_2)
bn_9 = tf.keras.layers.BatchNormalization()(v_conv_9)
v_spat_5 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_5'])(bn_9)
v_conv_10 = tf.keras.layers.Conv3D(filters=params['v_conv_10_filters'], kernel_size=params['v_conv_10_kernel'], strides=(params['v_conv_10_strides_0'], params['v_conv_10_strides_1'], params['v_conv_10_strides_2']), padding=params['v_conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_5)
bn_10 = tf.keras.layers.BatchNormalization()(v_conv_10)
v_conv_11 = tf.keras.layers.Conv3D(filters=params['v_conv_11_filters'], kernel_size=params['v_conv_11_kernel'], strides=(params['v_conv_11_strides_0'], params['v_conv_11_strides_1'], params['v_conv_11_strides_2']), padding=params['v_conv_11_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_2)
bn_11 = tf.keras.layers.BatchNormalization()(v_conv_11)
v_add_3 = tf.keras.layers.Add()([bn_10, bn_11])
v_act_3 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_3)
# 5
v_conv_12 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_3)
bn_12 = tf.keras.layers.BatchNormalization()(v_conv_12)
v_spat_6 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_12)
v_conv_13 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_6)
bn_13 = tf.keras.layers.BatchNormalization()(v_conv_13)
v_conv_14 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_3)
bn_14 = tf.keras.layers.BatchNormalization()(v_conv_14)
v_add_4 = tf.keras.layers.Add()([bn_13, bn_14])
v_act_4 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_4)
# 6
v_conv_15 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_4)
bn_15 = tf.keras.layers.BatchNormalization()(v_conv_15)
v_spat_7 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_15)
v_conv_16 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_7)
bn_16 = tf.keras.layers.BatchNormalization()(v_conv_16)
v_conv_17 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_4)
bn_17 = tf.keras.layers.BatchNormalization()(v_conv_17)
v_add_5 = tf.keras.layers.Add()([bn_16, bn_17])
v_act_5 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_5)
# ---
# Final Conv Layers
bn_18 = tf.keras.layers.BatchNormalization()(v_act_5)
v_spat_8 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_8'])(bn_18)
v_conv_18 = tf.keras.layers.Conv3D(filters=params['v_conv_18_filters'], kernel_size=params['v_conv_18_kernel'], strides=(params['v_conv_18_strides_0'], params['v_conv_18_strides_1'], params['v_conv_18_strides_2']), padding=params['v_conv_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_8)
# ---
# Dense Layers
v_flatten_0 = tf.keras.layers.Flatten()(v_conv_18)
bn_15 = tf.keras.layers.BatchNormalization()(v_flatten_0)
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_15)
bn_16 = tf.keras.layers.BatchNormalization()(dense_1_v)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_16)
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_1)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
add_4 = tf.keras.layers.Add()([bn_12, bn_13])
act_4 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_4)
# 6
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_14)
conv_14_1 = tf.keras.layers.Conv2D(filters=params['conv_14_filters'], kernel_size=params['conv_14_kernel'], strides=params['conv_14_strides'], padding=params['conv_14_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_15 = tf.keras.layers.BatchNormalization()(conv_14_1)
conv_15_1 = tf.keras.layers.Conv2D(filters=params['conv_15_filters'], kernel_size=params['conv_15_kernel'], strides=params['conv_15_strides'], padding=params['conv_15_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_16 = tf.keras.layers.BatchNormalization()(conv_15_1)
add_5 = tf.keras.layers.Add()([bn_15, bn_16])
act_5 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_5)
# 7
conv_16_1 = tf.keras.layers.Conv2D(filters=params['conv_16_filters'], kernel_size=params['conv_15_kernel'], strides=params['conv_16_strides'], padding=params['conv_16_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_17 = tf.keras.layers.BatchNormalization()(conv_16_1)
spat_7_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_7'])(bn_17)
conv_17_1 = tf.keras.layers.Conv2D(filters=params['conv_17_filters'], kernel_size=params['conv_17_kernel'], strides=params['conv_17_strides'], padding=params['conv_17_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_7_1)
bn_18 = tf.keras.layers.BatchNormalization()(conv_17_1)
conv_18_1 = tf.keras.layers.Conv2D(filters=params['conv_18_filters'], kernel_size=params['conv_18_kernel'], strides=params['conv_18_strides'], padding=params['conv_18_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_19 = tf.keras.layers.BatchNormalization()(conv_18_1)
add_6 = tf.keras.layers.Add()([bn_18, bn_19])
act_6 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_6)
# ---
# Final Conv Layers
bn_20 = tf.keras.layers.BatchNormalization()(act_6)
spat_8_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_8'])(bn_20)
conv_19_1 = tf.keras.layers.Conv2D(filters=params['conv_19_filters'], kernel_size=params['conv_19_kernel'], strides=params['conv_19_strides'], padding=params['conv_19_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_8_1)
# ---
# Dense Layers
flatten_0 = tf.keras.layers.Flatten()(conv_19_1)
bn_21 = tf.keras.layers.BatchNormalization()(flatten_0)
dense_0_f_1 = tf.keras.layers.Dense(units=params['dense_0_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_21)
bn_22 = tf.keras.layers.BatchNormalization()(dense_0_f_1)
dense_1_f_1 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_22)
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_2)
conv_0_2 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_2)
spat_0_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_2 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_2)
# ---
# Pool After Initial Layers
pool_0_2 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_2)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_2)
# 1
conv_2_2 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_2)
spat_1_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_2 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_2)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_2)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_0)
# 2
conv_4_2 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_2)
spat_2_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(bn_5)
conv_5_2 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_2)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_2)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_1)
# 3
conv_6_2 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_2)
spat_3_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_2 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_2)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_2)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_2)
# 4
conv_8_2 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_2)
spat_4_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_2 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_2)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_2)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_3)
# 5
conv_10_2 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_2)
spat_5_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_2 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_2)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_2)
conv_12_2 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_2)
add_4 = tf.keras.layers.Add()([bn_12, bn_13])
act_4 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_4)
# 6
conv_13_2 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_2)
spat_6_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_14)
conv_14_2 = tf.keras.layers.Conv2D(filters=params['conv_14_filters'], kernel_size=params['conv_14_kernel'], strides=params['conv_14_strides'], padding=params['conv_14_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_2)
bn_15 = tf.keras.layers.BatchNormalization()(conv_14_2)
conv_15_2 = tf.keras.layers.Conv2D(filters=params['conv_15_filters'], kernel_size=params['conv_15_kernel'], strides=params['conv_15_strides'], padding=params['conv_15_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_16 = tf.keras.layers.BatchNormalization()(conv_15_2)
add_5 = tf.keras.layers.Add()([bn_15, bn_16])
act_5 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_5)
# 7
conv_16_2 = tf.keras.layers.Conv2D(filters=params['conv_16_filters'], kernel_size=params['conv_15_kernel'], strides=params['conv_16_strides'], padding=params['conv_16_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_17 = tf.keras.layers.BatchNormalization()(conv_16_2)
spat_7_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_7'])(bn_17)
conv_17_2 = tf.keras.layers.Conv2D(filters=params['conv_17_filters'], kernel_size=params['conv_17_kernel'], strides=params['conv_17_strides'], padding=params['conv_17_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_7_2)
bn_18 = tf.keras.layers.BatchNormalization()(conv_17_2)
conv_18_2 = tf.keras.layers.Conv2D(filters=params['conv_18_filters'], kernel_size=params['conv_18_kernel'], strides=params['conv_18_strides'], padding=params['conv_18_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_19 = tf.keras.layers.BatchNormalization()(conv_18_2)
add_6 = tf.keras.layers.Add()([bn_18, bn_19])
act_6 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_6)
# ---
# Final Conv Layers
bn_20 = tf.keras.layers.BatchNormalization()(act_6)
spat_8_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_8'])(bn_20)
conv_19_2 = tf.keras.layers.Conv2D(filters=params['conv_19_filters'], kernel_size=params['conv_19_kernel'], strides=params['conv_19_strides'], padding=params['conv_19_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_8_2)
# ---
# Dense Layers
flatten_0 = tf.keras.layers.Flatten()(conv_19_2)
bn_21 = tf.keras.layers.BatchNormalization()(flatten_0)
dense_0_f_2 = tf.keras.layers.Dense(units=params['dense_0_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_21)
bn_22 = tf.keras.layers.BatchNormalization()(dense_0_f_2)
dense_1_f_2 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_22)
# -----------------------------------------------------------------
bn_0 = tf.keras.layers.BatchNormalization()(input_cali)
dense_1_cali = tf.keras.layers.Dense(units=params['dense_1_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(dense_1_cali)
dense_2_cali = tf.keras.layers.Dense(units=params['dense_2_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_1)
bn_2 = tf.keras.layers.BatchNormalization()(dense_2_cali)
# -----------------------------------------------------------------
# ---
# Combine the fluoro inputs together
dense_comb_f_0 = tf.keras.layers.Add()([dense_1_f_1, dense_1_f_2])
dense_comb_act_0 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(dense_comb_f_0)
bn_0 = tf.keras.layers.BatchNormalization()(dense_comb_act_0)
dense_comb_f_1 = tf.keras.layers.Dense(units=params['dense_comb_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(dense_comb_f_1)
dense_comb_f_2 = tf.keras.layers.Dense(units=params['dense_comb_2_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_1)
# -----------------------------------------------------------------
# ---
# Combine the fluoro with the vox
dense_comb_v_0 = tf.keras.layers.Add()([dense_comb_f_2, dense_2_v])
dense_comb_v_act_0 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(dense_comb_v_0)
bn_0 = tf.keras.layers.BatchNormalization()(dense_comb_v_act_0)
dense_comb_v_1 = tf.keras.layers.Dense(units=params['dense_comb_v_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(dense_comb_v_1)
dense_comb_v_2 = tf.keras.layers.Dense(units=params['dense_comb_v_2_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_1)
# -----------------------------------------------------------------
top_comb = tf.keras.layers.Add()([dense_comb_v_2, bn_2])
top_comb_act = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(top_comb)
top_dense_1 = tf.keras.layers.Dense(units=6, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(top_comb_act)
bn_0 = tf.keras.layers.BatchNormalization()(top_dense_1)
top_dense_2 = tf.keras.layers.Dense(units=6, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(top_dense_1)
add_0 = tf.keras.layers.Add()([top_dense_2, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(add_0)
top_dense_1 = tf.keras.layers.Dense(units=6, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(act_0)
bn_0 = tf.keras.layers.BatchNormalization()(top_dense_1)
top_dense_2 = tf.keras.layers.Dense(units=6, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(top_dense_1)
add_0 = tf.keras.layers.Add()([top_dense_2, act_0])
act_0 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(add_0)
# -----------------------------------------------------------------
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(act_0)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
model.summary()
# -----------------------------------------------------------------
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train = image_init[:]
image_mat_val = image_mat_train[val_indxs]
image_mat_train = image_mat_train[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_train = label_init[:]
label_mat_val = label_mat_train[val_indxs]
label_mat_train = label_mat_train[train_indxs]
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': np.expand_dims(image_mat_train[:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(image_mat_train[:, 1, :, :], axis=-1), 'input_cali': cali_mat_train}, y=label_mat_train, validation_data=([np.expand_dims(vox_mat_val, axis=-1), np.expand_dims(image_mat_val[:, 0, :, :], axis=-1), np.expand_dims(image_mat_val[:, 1, :, :], axis=-1), cali_mat_val], label_mat_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
hist_file.close()
``` |
{
"source": "JohnDreamer/HCCM",
"score": 2
} |
#### File: HCCM/misc/loss_wrapper_hc.py
```python
import torch
import misc.utils as utils
from misc.rewards import init_scorer, get_self_critical_reward
import json
# def merge_sent(seq):
# tmp = seq.new_zeros((seq.shape[0] / 2, seq.shape[1]))
# for i in range(seq.shape[0] / 2):
# index_start = 0
# for j in range(seq.shape[1]):
# if seq[i * 2, j] == 0:
# break
# else:
# index_start += 1
# for j in range(index_start):
# tmp[i, j] = seq[i * 2, index_start - j - 1]
# # index_end = 0
# for j in range(seq.shape[1]):
# if seq[i * 2 + 1, j] == 0:
# break
# else:
# if index_start != 0:
# tmp[i, j] = seq[i * 2 + 1, j]
# else:
# if j + index_start - 1 == seq.shape[1]:
# break
# tmp[i, j + index_start - 1] = seq[i * 2 + 1, j]
# return tmp
class LossWrapper(torch.nn.Module):
def __init__(self, model, opt):
super(LossWrapper, self).__init__()
self.opt = opt
self.model = model
if opt.label_smoothing > 0:
self.crit = utils.LabelSmoothing(smoothing=opt.label_smoothing)
else:
self.crit = utils.LanguageModelCriterion()
if opt.enable_no_interaction == 1:
counts = json.load(open('data/part_hoi_class_count.json', 'r'))
hoi_count = counts['hoi_count']
part_state_count = counts['part_state_count']
hoi_pos_weight = torch.Tensor(hoi_count)
part_state_pos_weight = torch.Tensor(part_state_count)
hoi_pos_weight = 1.0 / (hoi_pos_weight / torch.min(hoi_pos_weight))
part_state_pos_weight = 1.0 / (part_state_pos_weight / torch.min(part_state_pos_weight))
hoi_pos_weight = hoi_pos_weight / torch.min(hoi_pos_weight) / 50.0
part_state_pos_weight = part_state_pos_weight / torch.min(part_state_pos_weight) / 50.0
hoi_pos_weight = hoi_pos_weight.unsqueeze(0)
part_state_pos_weight = part_state_pos_weight.unsqueeze(0)
self.hoi_crit = utils.ClassificationCriterionBCE(hoi_pos_weight)
self.part_state_crit = utils.ClassificationCriterionBCE(part_state_pos_weight)
else:
self.hoi_crit = utils.ClassificationCriterionBCE()
self.part_state_crit = utils.ClassificationCriterionBCE()
self.obj_crit = utils.ClassificationCriterion()
self.rl_crit = utils.RewardCriterion()
def forward(self, fc_feats, att_feats, labels, masks, att_masks, gts, gt_indices, sc_flag, body_feats, part_feats, body_masks, part_masks):
out = {}
if not sc_flag:
loss = self.crit(self.model(fc_feats, att_feats, labels, body_feats, part_feats, body_masks, part_masks, att_masks), labels[:,1:], masks[:,1:])
else:
self.model.eval()
with torch.no_grad():
greedy_res, _ = self.model(fc_feats, att_feats, body_feats, part_feats, body_masks, part_masks, att_masks, mode='sample')
self.model.train()
gen_result, sample_logprobs = self.model(fc_feats, att_feats, body_feats, part_feats, body_masks, part_masks, att_masks, opt={'sample_method':'sample'}, mode='sample')
gts = [gts[_] for _ in gt_indices.tolist()]
# greedy_res_merge = merge_sent(greedy_res)
# gen_result_merge = merge_sent(gen_result)
# reward = get_self_critical_reward(greedy_res_merge, gts, gen_result_merge, self.opt)
reward = get_self_critical_reward(greedy_res, gts, gen_result, self.opt)
reward = torch.from_numpy(reward).float().to(gen_result.device)
# reward = reward.unsqueeze(1).expand(-1, 2)
loss = self.rl_crit(sample_logprobs, gen_result.data, reward)
out['reward'] = reward[:,0].mean()
out['loss'] = loss
return out
``` |
{
"source": "JohnDTill/Forscape",
"score": 3
} |
#### File: Forscape/meta/colours.py
```python
from utils import cpp, table_reader
def to_id(s):
return ''.join(filter(str.isalnum, s)).upper()
def main():
entries = table_reader.csv_to_list_of_tuples(
csv_filepath="colours.csv",
)
headers = table_reader.csv_headers(
csv_filepath="colours.csv",
)
header_writer = cpp.HeaderWriter(
name="themes",
inner_namespace="Typeset",
includes=["string_view", "QColor"],
extra_guards=["HOPE_TYPESET_HEADLESS"],
)
for i in range(1, len(headers)):
header = to_id(headers[i])
header_writer.write(f"constexpr size_t PRESET_{header} = {i-1};\n")
header_writer.write(f"constexpr size_t NUM_COLOUR_PRESETS = {len(headers)-1};\n\n")
for i in range(0, len(entries)):
role = to_id(entries[i].role)
header_writer.write(f"constexpr size_t COLOUR_{role} = {i};\n")
header_writer.write(f"constexpr size_t NUM_COLOUR_ROLES = {len(entries)};\n\n")
header_writer.write(
"void setColour(size_t role, const QColor& colour) noexcept;\n"
"const QColor& getColour(size_t role) noexcept;\n"
"void setPreset(size_t preset) noexcept;\n"
"std::string_view getColourName(size_t role) noexcept;\n"
"std::string_view getPresetName(size_t preset) noexcept;\n\n"
)
header_writer.finalize()
with open("../src/generated/typeset_themes.cpp", "w", encoding="utf-8") as codegen_file:
codegen_file.write("#ifndef HOPE_TYPESET_HEADLESS\n\n")
codegen_file.write("#include \"typeset_themes.h\"\n\n")
codegen_file.write("#include <array>\n\n")
codegen_file.write("namespace Hope {\n\n")
codegen_file.write("namespace Typeset {\n\n")
codegen_file.write("static std::array<QColor, NUM_COLOUR_ROLES> colours;\n\n")
codegen_file.write("static constexpr std::array<std::string_view, NUM_COLOUR_ROLES> colour_names {\n")
for e in entries:
codegen_file.write(f" \"{e.role}\",\n")
codegen_file.write("};\n\n")
codegen_file.write("static constexpr std::array<std::string_view, NUM_COLOUR_PRESETS> preset_names {\n")
for i in range(1, len(headers)):
codegen_file.write(f" \"{headers[i]}\",\n")
codegen_file.write("};\n\n")
codegen_file.write("static const std::array<std::array<QColor, NUM_COLOUR_ROLES>, NUM_COLOUR_PRESETS> presets {\n")
for i in range(1, len(headers)):
codegen_file.write(" std::array<QColor, NUM_COLOUR_ROLES>({\n")
for e in entries:
colour = f"QColor({e[i].replace('|', ', ')})"
codegen_file.write(f" {colour},\n")
codegen_file.write(" }),\n")
codegen_file.write("};\n\n")
codegen_file.write(
"void setColour(size_t role, const QColor& colour) noexcept {\n"
" assert(role < NUM_COLOUR_ROLES);\n"
" colours[role] = colour;\n"
"}\n\n"
"const QColor& getColour(size_t role) noexcept {\n"
" assert(role < NUM_COLOUR_ROLES);\n"
" return colours[role];\n"
"}\n\n"
"void setPreset(size_t preset) noexcept {\n"
" assert(preset < NUM_COLOUR_PRESETS);\n"
" colours = presets[preset];\n"
"}\n\n"
"std::string_view getColourName(size_t role) noexcept {\n"
" assert(role < NUM_COLOUR_ROLES);\n"
" return colour_names[role];\n"
"}\n\n"
"std::string_view getPresetName(size_t preset) noexcept {\n"
" assert(preset < NUM_COLOUR_PRESETS);\n"
" return preset_names[preset];\n"
"}\n\n"
)
codegen_file.write("\n}\n\n}\n\n#endif\n")
if __name__ == "__main__":
main()
```
#### File: Forscape/meta/construct_codes.py
```python
import os
from utils import cpp, table_reader
import glob
def main():
constructs = table_reader.csv_to_list_of_tuples(
csv_filepath="construct_codes.csv",
tuple_name="Construct",
)
header_writer = cpp.HeaderWriter(
name="construct_codes",
)
header_writer.write("constexpr char OPEN = 2;\n"
"constexpr char CLOSE = 3;\n\n")
curr = 1
for con in constructs:
header_writer.write(f"constexpr char {con.name.upper()} = {curr};\n")
curr += 1
header_writer.write("\n")
header_writer.write("#define HOPE_SERIAL_NULLARY_CASES")
for name in [entry.name for entry in constructs if entry.arity == "0"]:
header_writer.write(f" \\\n case {name.upper()}:")
header_writer.write("\n")
header_writer.write("#define HOPE_SERIAL_UNARY_CASES")
for name in [entry.name for entry in constructs if entry.arity == "1"]:
header_writer.write(f" \\\n case {name.upper()}:")
header_writer.write("\n")
header_writer.write("#define HOPE_SERIAL_BINARY_CASES")
for name in [entry.name for entry in constructs if entry.arity == "2"]:
header_writer.write(f" \\\n case {name.upper()}:")
header_writer.write("\n")
header_writer.write("#define HOPE_SERIAL_MATRIX_CASES")
for name in [entry.name for entry in constructs if entry.arity == "nxm"]:
header_writer.write(f" \\\n case {name.upper()}:")
header_writer.write("\n")
header_writer.write("\n")
header_writer.write("#define HOPE_TYPESET_PARSER_CASES")
for entry in constructs:
name = entry.name
header_writer.write(f" \\\n case {name.upper()}: TypesetSetup")
if entry.arity == "0":
header_writer.write(f"Nullary({name});")
elif entry.arity == "nxm":
header_writer.write(f"Matrix({name});")
elif entry.arity == "2xn":
header_writer.write(f"Construct({name}, static_cast<uint8_t>(src[index++]));")
else:
header_writer.write(f"Construct({name},);")
header_writer.write("\n")
header_writer.finalize()
header_writer = cpp.HeaderWriter(
name="all_constructs",
inner_namespace="Typeset",
includes=[f"typeset_{entry.name.lower()}.h" for entry in constructs],
)
header_writer.finalize()
for entry in constructs:
if entry.implemented == "y":
continue
header_writer = cpp.HeaderWriter(
name="typeset_{entry.name.lower()}",
includes=["typeset_construct.h", "typeset_subphrase.h"],
)
header_writer.write(f"class {entry.name} final : public Construct {{ \n")
header_writer.write("public:\n")
if entry.arity == "1":
header_writer.write(f" {entry.name}(){{\n"
" setupUnaryArg();\n"
" }\n\n")
elif entry.arity == "2":
header_writer.write(f" {entry.name}(){{\n"
" setupBinaryArgs();\n"
" }\n\n")
elif entry.arity == "2xn":
header_writer.write(f" {entry.name}(uint8_t n){{\n"
" setupNAargs(2*n);\n"
" }\n\n")
header_writer.write(" virtual void writeArgs(std::string& out, size_t& curr) "
"const noexcept override {\n"
" out[curr++] = static_cast<uint8_t>(numArgs()/2);\n"
" }\n\n")
header_writer.write(" virtual size_t dims() const noexcept override { "
"return 1; }\n")
elif entry.arity == "nxm":
header_writer.write(" uint16_t rows;\n"
" uint16_t cols;\n\n")
header_writer.write(f" {entry.name}(uint16_t rows, uint16_t cols)\n"
" : rows(rows), cols(cols) {\n"
" setupNAargs(rows*cols);\n"
" }\n\n")
header_writer.write(" virtual void writeArgs(std::string& out, size_t& curr) "
"const noexcept override {\n"
" out[curr++] = static_cast<uint8_t>(rows);\n"
" out[curr++] = static_cast<uint8_t>(cols);\n"
" }\n\n")
header_writer.write(" virtual size_t dims() const noexcept override { "
"return 2; }\n")
header_writer.write(" virtual char constructCode() const noexcept override { return ")
header_writer.write(entry.name.upper())
header_writer.write("; }\n")
if entry.script_child == "y":
header_writer.write(" virtual bool increasesScriptDepth() const noexcept override "
"{ return true; }\n")
if entry.parent == "BigSymbol0":
header_writer.write(
"\n virtual void updateSizeSpecific() noexcept override {\n"
f" width = getWidth(SEM_DEFAULT, parent->script_level, \"{entry.label}\");\n"
" above_center = getAboveCenter(SEM_DEFAULT, parent->script_level);\n"
" under_center = getUnderCenter(SEM_DEFAULT, parent->script_level);\n"
" }\n"
"\n"
" virtual void paintSpecific(Painter& painter) const override {\n"
f" painter.drawSymbol(x, y, \"{entry.label}\");\n"
" }\n"
)
elif entry.parent == "BigSymbol1":
header_writer.write(
" double symbol_width;\n"
"\n"
" virtual void updateSizeSpecific() noexcept override {\n"
f" symbol_width = getWidth(SEM_DEFAULT, parent->script_level, \"{entry.label}\");\n"
" width = std::max(symbol_width, child()->width);\n"
" above_center = getAboveCenter(SEM_DEFAULT, parent->script_level);\n"
" under_center = getUnderCenter(SEM_DEFAULT, parent->script_level) + child()->height();\n"
" }\n"
"\n"
" virtual void updateChildPositions() override {\n"
" child()->x = x + (width - child()->width)/2;\n"
" child()->y = y + height() - child()->height();\n"
" }\n"
"\n"
" virtual void paintSpecific(Painter& painter) const override {\n"
" double symbol_x = x + (width - symbol_width) / 2;\n"
f" painter.drawSymbol(symbol_x, y, \"{entry.label}\");\n"
" }\n"
)
elif entry.parent == "BigSymbol2":
header_writer.write(
" double symbol_width;\n"
"\n"
"virtual void updateSizeSpecific() noexcept override {\n"
f" symbol_width = 1*getWidth(SEM_DEFAULT, parent->script_level, \"{entry.label}\");\n"
" width = std::max(symbol_width, std::max(first()->width, second()->width));\n"
" above_center = getAboveCenter(SEM_DEFAULT, parent->script_level) + first()->height();\n"
" under_center = 1*getUnderCenter(SEM_DEFAULT, parent->script_level) + second()->height();\n"
" }\n"
"\n"
" virtual void updateChildPositions() override {\n"
" first()->x = x + (width - first()->width)/2;\n"
" first()->y = y;\n"
" second()->x = x + (width - second()->width)/2;\n"
" second()->y = y + height() - second()->height();\n"
" }\n"
"\n"
" virtual void paintSpecific(Painter& painter) const override {\n"
" double symbol_x = x + (width - symbol_width) / 2;\n"
f" painter.drawSymbol(symbol_x, y + second()->height(), \"{entry.label}\");\n"
" }\n"
)
header_writer.write("};\n\n")
header_writer.finalize()
old_constructs = table_reader.csv_to_list_of_tuples(
csv_filepath="cache/construct_codes.csv",
tuple_name="OldConstruct",
)
changes = {}
for i in range(0, len(old_constructs)):
oc = old_constructs[i]
for j in range(0, len(constructs)):
c = constructs[j]
if oc.name == c.name:
if i != j:
changes[chr(i+1)] = chr(j+1)
break
if len(changes) > 0:
dirs = ["../test"] #, "../example"
for dir in dirs:
files = glob.iglob(dir + '**/**', recursive=True)
files = [f for f in files if os.path.isfile(f)]
for f in files:
with open(f, 'r', encoding="utf-8") as file:
filedata = file.read()
#Since '' is a construct code, search and replace is complicated
newstr = ""
for i in range(0, len(filedata)):
newstr += filedata[i]
if filedata[i] == '':
i += 1
assert i < len(filedata)
if filedata[i] in changes.keys():
newstr += changes.get(filedata[i])
else:
newstr += filedata[i]
filedata = newstr
with open(f, 'w', encoding="utf-8") as file:
file.write(filedata)
if __name__ == "__main__":
main()
```
#### File: Forscape/meta/errors.py
```python
from utils import cpp, table_reader
def main():
errors = table_reader.csv_to_list_of_tuples(
csv_filepath="errors.csv",
tuple_name="Error",
)
errors = sorted(errors, key=lambda x: x.quote == "y")
first_quote = 0
for i in range(0, len(errors)):
if errors[i].quote == "y":
first_quote = i
break
header_writer = cpp.HeaderWriter(
name="error_types",
inner_namespace="Code",
includes=("cassert", "string"),
)
header_writer.write("enum ErrorCode{\n")
for e in errors:
header_writer.write(f" {e.name.upper()},\n")
header_writer.write("};\n\n")
header_writer.write("inline std::string getMessage(ErrorCode code){\n"
" switch(code){\n")
for e in errors:
header_writer.write(f" case {e.name.upper()}: return \"{e.msg}")
if e.quote == "y":
header_writer.write(": ")
header_writer.write("\";\n")
header_writer.write(" default: assert(false); return \"\";\n")
header_writer.write(" }\n}\n\n")
header_writer.write("inline bool shouldQuote(ErrorCode code){\n"
f" return code >= {errors[first_quote].name};\n"
"}\n")
header_writer.finalize()
if __name__ == "__main__":
main()
``` |
{
"source": "johnduarte/pytest-rpc",
"score": 2
} |
#### File: pytest-rpc/tests/test_xsd.py
```python
from __future__ import absolute_import
from lxml import etree
from pytest_rpc import MK8S_ENV_VARS, ASC_ENV_VARS, get_xsd
from tests.conftest import run_and_parse
from tests.conftest import run_and_parse_with_config
# ======================================================================================================================
# Globals
# ======================================================================================================================
TEST_ENV_VARS = list(ASC_ENV_VARS) # Shallow copy.
MK8S_TEST_ENV_VARS = list(MK8S_ENV_VARS) # Shallow copy.
# ======================================================================================================================
# Tests
# ======================================================================================================================
def test_happy_path_asc(testdir, properly_decorated_test_function):
"""Verify that 'get_xsd' returns an XSD stream that can be used to validate JUnitXML."""
# Setup
testdir.makepyfile(properly_decorated_test_function.format(test_name='test_happy_path',
test_id='123e4567-e89b-12d3-a456-426655440000',
jira_id='ASC-123'))
xml_doc = run_and_parse(testdir).xml_doc
xmlschema = etree.XMLSchema(etree.parse(get_xsd()))
# Test
xmlschema.assertValid(xml_doc)
def test_happy_path_mk8s(testdir, properly_decorated_test_function):
"""Verify that 'get_xsd' returns an XSD stream that can be used to validate JUnitXML when configured with mk8s."""
# Setup
testdir.makepyfile(properly_decorated_test_function.format(test_name='test_happy_path',
test_id='123e4567-e89b-12d3-a456-426655440000',
jira_id='ASC-123'))
config = \
"""
[pytest]
ci-environment=mk8s
""" # noqa
xml_doc = run_and_parse_with_config(testdir, config).xml_doc
xmlschema = etree.XMLSchema(etree.parse(get_xsd('mk8s')))
# Test
xmlschema.assertValid(xml_doc)
def test_multiple_jira_references(testdir):
"""Verify that 'get_xsd' returns an XSD stream when a testcase is decorated Jira mark with multiple
arguments.
"""
# Setup
testdir.makepyfile("""
import pytest
@pytest.mark.jira('ASC-123', 'ASC-124')
@pytest.mark.test_id('123e4567-e89b-12d3-a456-426655440000')
def test_xsd():
pass
""")
xml_doc = run_and_parse(testdir).xml_doc
xmlschema = etree.XMLSchema(etree.parse(get_xsd()))
# Test
xmlschema.assertValid(xml_doc)
def test_missing_global_property(testdir, properly_decorated_test_function, mocker):
"""Verify that XSD will enforce the presence of all required global test suite properties."""
# Mock
# Missing 'BUILD_URL'
mock_env_vars = [x for x in TEST_ENV_VARS if x != 'BUILD_URL']
mocker.patch('pytest_rpc.ASC_ENV_VARS', mock_env_vars)
# Setup
testdir.makepyfile(properly_decorated_test_function.format(test_name='test_missing_global',
test_id='123e4567-e89b-12d3-a456-426655440000',
jira_id='ASC-123'))
xml_doc = run_and_parse(testdir).xml_doc
xmlschema = etree.XMLSchema(etree.parse(get_xsd()))
# Test
assert xmlschema.validate(xml_doc) is False
def test_extra_global_property(testdir, properly_decorated_test_function, mocker):
"""Verify that XSD will enforce the strict presence of only required global test suite properties."""
# Mock
# Extra 'BUILD_URL'
mock_env_vars = TEST_ENV_VARS + ['BUILD_URL']
mocker.patch('pytest_rpc.ASC_ENV_VARS', mock_env_vars)
# Setup
testdir.makepyfile(properly_decorated_test_function.format(test_name='test_extra_global',
test_id='123e4567-e89b-12d3-a456-426655440000',
jira_id='ASC-123'))
xml_doc = run_and_parse(testdir).xml_doc
xmlschema = etree.XMLSchema(etree.parse(get_xsd()))
# Test
assert xmlschema.validate(xml_doc) is False
def test_typo_global_property(testdir, properly_decorated_test_function, mocker):
"""Verify that XSD will enforce the only certain property names are allowed for the test suite."""
# Mock
# Typo for RPC_RELEASE
mock_env_vars = [x for x in TEST_ENV_VARS if x != 'RPC_RELEASE'] + ['RCP_RELEASE']
mocker.patch('pytest_rpc.ASC_ENV_VARS', mock_env_vars)
# Setup
testdir.makepyfile(properly_decorated_test_function.format(test_name='test_typo_global',
test_id='123e4567-e89b-12d3-a456-426655440000',
jira_id='ASC-123'))
xml_doc = run_and_parse(testdir).xml_doc
xmlschema = etree.XMLSchema(etree.parse(get_xsd()))
# Test
assert xmlschema.validate(xml_doc) is False
def test_missing_required_marks(testdir, undecorated_test_function):
"""Verify that XSD will enforce the presence of 'test_id' and 'jira_id' properties for test cases."""
# Setup
testdir.makepyfile(undecorated_test_function.format(test_name='test_typo_global'))
xml_doc = run_and_parse(testdir).xml_doc
xmlschema = etree.XMLSchema(etree.parse(get_xsd()))
# Test
assert xmlschema.validate(xml_doc) is False
def test_missing_uuid_mark(testdir, single_decorated_test_function):
"""Verify that XSD will enforce the presence of 'test_id' property for test cases."""
# Setup
testdir.makepyfile(single_decorated_test_function.format(test_name='test_missing_uuid',
mark_type='jira',
mark_arg='ASC-123'))
xml_doc = run_and_parse(testdir).xml_doc
xmlschema = etree.XMLSchema(etree.parse(get_xsd()))
# Test
assert xmlschema.validate(xml_doc) is False
def test_missing_jira_mark(testdir, single_decorated_test_function):
"""Verify that XSD will enforce the presence of 'jira' property for test cases."""
# Setup
testdir.makepyfile(single_decorated_test_function.format(test_name='test_missing_jira',
mark_type='test_id',
mark_arg='123e4567-e89b-12d3-a456-426655440000'))
xml_doc = run_and_parse(testdir).xml_doc
xmlschema = etree.XMLSchema(etree.parse(get_xsd()))
# Test
assert xmlschema.validate(xml_doc) is False
def test_extra_testcase_property(testdir, properly_decorated_test_function):
"""Verify that XSD will enforce the strict presence of only required test case properties."""
# Setup
testdir.makepyfile(properly_decorated_test_function.format(test_name='test_extra_mark',
test_id='123e4567-e89b-12d3-a456-426655440000',
jira_id='ASC-123'))
xml_doc = run_and_parse(testdir).xml_doc
# Add another property element for the testcase.
xml_doc.find('./testcase/properties').append(etree.Element('property',
attrib={'name': 'extra', 'value': 'fail'}))
xmlschema = etree.XMLSchema(etree.parse(get_xsd()))
# Test
assert xmlschema.validate(xml_doc) is False
def test_typo_property(testdir, properly_decorated_test_function):
"""Verify that XSD will enforce the only certain property names are allowed for the testcase."""
# Setup
testdir.makepyfile(properly_decorated_test_function.format(test_name='test_typo_mark',
test_id='123e4567-e89b-12d3-a456-426655440000',
jira_id='ASC-123'))
xml_doc = run_and_parse(testdir).xml_doc
# Add another property element for the testcase.
xml_doc.find('./testcase/properties/property').attrib['name'] = 'wrong_test_id'
xmlschema = etree.XMLSchema(etree.parse(get_xsd()))
# Test
assert xmlschema.validate(xml_doc) is False
``` |
{
"source": "johndunne2019/pands-problem-set",
"score": 5
} |
#### File: johndunne2019/pands-problem-set/solution_7.py
```python
print("This program will calculate an approximate square root of any positive floating point number")
# I have printed this line to the screen to give the user some background to the program
def sq_root(): # I created a function called sq_root
import math # I have imported the math module to calculate the square root of the number entered by the user
x =float(input("Please enter a positive floating point number:"))
# I have asked the user to enter a positive floating point number and assigned it the variable x
y = x # I have added a second variable y as I want to have 2 variables printed to the screen in my final output
y = math.sqrt(x) # The new value of y will be changed to the square root of x calculated by the math.sqrt module here
y = (round(y, 1)) # The result of the math.sqrt calculation above will be rounded to one decimal place using the round function
print(f"The square root of {x} is approx {y}") # The output is printed to the screen showing the variables x and y
sq_root() # I called the function
# I wrote this program myself using some additional research and reading as outlined below:
# I read about the math.sqrt module here: https://docs.python.org/3/library/math.html
# I read about the round function here: https://www.programiz.com/python-programming/methods/built-in/round
# I used curly brackets to print the variables as I learnt in week 7 lecture "fstrings"
``` |
{
"source": "john-dupuy/black-box-tester",
"score": 2
} |
#### File: black-box-tester/src/tracker.py
```python
import json
import logging
import os
import threading
log = logging.getLogger("blackbox.tracker")
TRACKER_FILE = os.path.join("/black-box-runner-storage", "tracker.json")
_lock = threading.RLock()
"""
Example tracker file:
{
"num_runners": 3,
"plugins": ["plugin1", "plugin2", "plugin3", "plugin4", "plugin5"]
"runners": [
{"name": "runner1", "plugins": ["plugin1", "plugin2"], "last_plugin": "plugin2"},
{"name": "runner2", "plugins": ["plugin3", "plugin4"], "last_plugin": "plugin3"},
{"name": "runner3", "plugins": ["plugin5"], "last_plugin": None}
]
}
"""
def locked(orig_func):
def _func(*args, **kwargs):
with _lock:
return orig_func(*args, **kwargs)
return _func
@locked
def read_tracker_data():
if not os.path.exists(TRACKER_FILE):
log.info("tracker file does not exist")
return {}
try:
with open(TRACKER_FILE) as fp:
data = fp.read()
log.debug("read tracker data: %s", data)
if not data.strip():
return {}
return json.loads(data)
except (OSError, json.decoder.JSONDecodeError):
log.exception("error reading tracker file")
return {}
@locked
def write_tracker_data(data):
try:
with open(TRACKER_FILE, "w") as fp:
json.dump(data, fp)
except OSError:
log.exception("error writing to tracker file")
@locked
def set_last_executed_plugin(runner_name, plugin_name):
data = read_tracker_data()
get_runner(runner_name, data)["last_plugin"] = plugin_name
write_tracker_data(data)
def get_runner(runner_name, data=None):
if data:
runners = data.get("runners", [])
else:
runners = read_tracker_data().get("runners", [])
for r in runners:
if r["name"] == runner_name:
return r
return None
def init_tracker(plugins, runners):
data = read_tracker_data()
current_plugin_names = [p[0] for p in plugins]
reset = False
if not data:
log.info("no tracker data found, re-initializing tracker")
reset = True
elif data.get("num_runners") != len(runners) or data.get("plugins") != current_plugin_names:
log.info("num runners or plugins have changed, re-initializing tracker")
reset = True
else:
for runner in runners:
stored_runner = get_runner(runner.name, data)
if not stored_runner:
log.info("runner %s not in tracker data, re-initializing tracker", runner.name)
reset = True
if stored_runner.get("plugins") != [p.name for p in runner.plugins]:
log.info(
"runner %s assigned plugins changed, re-initializing tracker data", runner.name
)
reset = True
if reset:
fresh_data = {
"num_runners": len(runners),
"plugins": current_plugin_names,
"runners": [r.to_dict() for r in runners],
}
write_tracker_data(fresh_data)
else:
log.info("no change in plugins/runners -- using previously stored tracker data")
log.info("tracker data:\n%s", json.dumps(read_tracker_data(), sort_keys=True, indent=4))
def get_last_executed_plugin(runner_name):
return get_runner(runner_name)["last_plugin"]
``` |
{
"source": "john-dupuy/bugzilla-data",
"score": 3
} |
#### File: bugzilla-data/scripts/make_plot.py
```python
import argparse
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.offsetbox import AnchoredText
from bugzilla_data import BugzillaData as VanillaBugzillaData
# requires PyQt5
matplotlib.use("Qt5Agg")
class BugzillaData(VanillaBugzillaData):
""" Inherit from base BugzillaData class to include plotting """
def generate_plot(self, save=False):
xvals, sorted_counts = self.get_plot_data()
# create the figure
fig, ax = plt.subplots()
ax.bar(xvals, [s[1] for s in sorted_counts], align="center")
plt.xticks(xvals, [s[0] for s in sorted_counts], rotation="vertical")
plt.ylabel("BZ Count")
plt.title(self.title)
if self.product:
ax.add_artist(AnchoredText(self.product, loc=1))
plt.tight_layout()
if save:
plt.savefig("{}.png".format(self.plot_style))
plt.show()
def get_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"-q", "--query", type=str, default="conf/query.yaml", help="Path to query yaml file"
)
parser.add_argument(
"-p",
"--plot",
type=str,
default="component",
help=(
"Plot bar chart for BZs found via <query> sorted according to one of: "
"[component, qa_contact, assigned_to, creator]"
),
)
parser.add_argument("-u", "--url", type=str, default="bugzilla.redhat.com", help="Bugzilla URL")
parser.add_argument("--save", action="store_true", default=False, help="Save the plot")
parser.add_argument(
"--output",
action="store_true",
default=False,
help="Output bugzilla data from query to stdout",
)
parser.add_argument(
"--noplot", action="store_true", default=False, help="Do not generate any plot"
)
parser.add_argument(
"--report", action="store_true", default=False, help="Generate a bz yaml file report"
)
parser.add_argument(
"--login",
action="store_true",
default=False,
help="Login to Bugzilla before making query. Required to use e.g. savedsearch and to get "
"some hidden fields.",
)
parser.add_argument(
"--credential_file",
type=str,
default="conf/credentials.yaml",
help="Path to credential yaml file",
)
return parser.parse_args()
def main(args=None):
args = args if args else get_args()
args.output = True if args.noplot else args.output
# instantiate object
bz_data = BugzillaData(
args.query, args.url, args.plot, login=args.login, credential_file=args.credential_file
)
# print out info if necessary
if args.output:
print(bz_data.generate_output())
if args.report:
bz_data.generate_report()
# generate the plot
if not args.noplot:
bz_data.generate_plot(save=args.save)
if __name__ == "__main__":
main()
``` |
{
"source": "john-dupuy/ibutsu-server",
"score": 2
} |
#### File: ibutsu_server/controllers/widget_config_controller.py
```python
import connexion
from bson import ObjectId
from ibutsu_server.constants import WIDGET_TYPES
from ibutsu_server.filters import generate_filter_object
from ibutsu_server.mongo import mongo
from ibutsu_server.util import merge_dicts
from ibutsu_server.util import serialize
from ibutsu_server.util.projects import get_project_id
from pymongo import ASCENDING
from pymongo.errors import OperationFailure
def add_widget_config(widget_config=None):
"""Create a new widget config
:param widget_config: The widget_config to save
:type widget_config: dict | bytes
:rtype: WidgetConfig
"""
if not connexion.request.is_json:
return "Bad request, JSON required", 400
widget_config = connexion.request.json
if widget_config["widget"] not in WIDGET_TYPES.keys():
return "Bad request, widget type does not exist", 400
# add default weight of 10
if not widget_config.get("weight"):
widget_config["weight"] = 10
# Look up the project id
if widget_config.get("project"):
widget_config["project"] = get_project_id(widget_config["project"])
# default to make views navigable
if widget_config.get("type") == "view" and not widget_config.get("navigable"):
widget_config["navigable"] = "true"
mongo.widget_config.insert_one(widget_config)
widget_config = serialize(widget_config)
return widget_config, 201
def get_widget_config(id_):
"""Get a widget
:param id: The ID of the widget
:type id: str
:rtype: Report
"""
widget_config = mongo.widget_config.find_one({"_id": ObjectId(id_)})
return serialize(widget_config)
def get_widget_config_list(filter_=None, page=1, page_size=25):
"""Get a list of widgets
:param filter_: A list of filters to apply
:type filter_: list
:param page: Set the page of items to return, defaults to 1
:type page: int
:param page_size: Set the number of items per page, defaults to 25
:type page_size: int
:rtype: ReportList
"""
filters = {}
if filter_:
for filter_string in filter_:
filter_obj = generate_filter_object(filter_string)
if filter_obj:
filters.update(filter_obj)
# Update the project_id filter to account for unset project ids
if "project" in filters:
filters["$or"] = [
{"project": {"$exists": False}},
{"project": {"$eq": None}},
{"project": filters["project"]},
]
del filters["project"]
offset = (page * page_size) - page_size
total_items = mongo.widget_config.count({})
total_pages = (total_items // page_size) + (1 if total_items % page_size > 0 else 0)
widgets = mongo.widget_config.find(
filters, skip=offset, limit=page_size, sort=[("weight", ASCENDING)]
)
return {
"widgets": [serialize(widget) for widget in widgets],
"pagination": {
"page": page,
"pageSize": page_size,
"totalItems": total_items,
"totalPages": total_pages,
},
}
def update_widget_config(id_):
"""Updates a single widget config
:param id: ID of widget to update
:type id: int
:param body: Result
:type body: dict
:rtype: Result
"""
if not connexion.request.is_json:
return "Bad request, JSON required", 400
widget_config = connexion.request.get_json()
if widget_config.get("widget") and widget_config["widget"] not in WIDGET_TYPES.keys():
return "Bad request, widget type does not exist", 400
# Look up the project id
if widget_config.get("project"):
widget_config["project"] = get_project_id(widget_config["project"])
existing_widget_config = mongo.widget_config.find_one({"_id": ObjectId(id_)})
# add default weight of 10
if not existing_widget_config.get("weight"):
existing_widget_config["weight"] = 10
# default to make views navigable
if widget_config.get("type") == "view" and not widget_config.get("navigable"):
widget_config["navigable"] = "true"
merge_dicts(existing_widget_config, widget_config)
mongo.widget_config.replace_one({"_id": ObjectId(id_)}, widget_config)
return serialize(widget_config)
def delete_widget_config(id_):
"""Deletes a widget
:param id: ID of the widget to delete
:type id: str
:rtype: tuple
"""
try:
mongo.widget_config.delete_one({"_id": ObjectId(id_)})
return "OK", 200
except OperationFailure:
return "Not Found", 404
```
#### File: backend/ibutsu_server/__init__.py
```python
from importlib import import_module
from pathlib import Path
from connexion import App
from dynaconf import FlaskDynaconf
from flask import redirect
from flask import request
from flask_cors import CORS
from ibutsu_server.encoder import JSONEncoder
FRONTEND_PATH = Path("/app/frontend")
def get_app():
app = App(__name__, specification_dir="./openapi/")
app.app.json_encoder = JSONEncoder
app.add_api(
"openapi.yaml", arguments={"title": "Ibutsu"}, base_path="/api", pythonic_params=True
)
CORS(app.app)
FlaskDynaconf(app.app)
@app.route("/")
def index():
return redirect("/api/ui/", code=302)
@app.route("/admin/run-task", methods=["POST"])
def run_task():
params = request.get_json(force=True, silent=True)
if not params:
return "Bad request", 400
task_path = params.get("task")
task_params = params.get("params", {})
if not task_path:
return "Bad request", 400
task_module, task_name = task_path.split(".", 2)
try:
mod = import_module(f"ibutsu_server.tasks.{task_module}")
except ImportError:
return "Not found", 404
if not hasattr(mod, task_name):
return "Not found", 404
task = getattr(mod, task_name)
task.delay(**task_params)
return "Accepted", 202
return app
```
#### File: ibutsu_server/models/group_list.py
```python
from __future__ import absolute_import
from typing import List
from ibutsu_server import util
from ibutsu_server.models.base_model_ import Model
from ibutsu_server.models.group import Group
from ibutsu_server.models.pagination import Pagination
class GroupList(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, groups=None, pagination=None): # noqa: E501
"""GroupList - a model defined in OpenAPI
:param groups: The groups of this GroupList. # noqa: E501
:type groups: List[Group]
:param pagination: The pagination of this GroupList. # noqa: E501
:type pagination: Pagination
"""
self.openapi_types = {"groups": List[Group], "pagination": Pagination}
self.attribute_map = {"groups": "groups", "pagination": "pagination"}
self._groups = groups
self._pagination = pagination
@classmethod
def from_dict(cls, dikt) -> "GroupList":
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The GroupList of this GroupList. # noqa: E501
:rtype: GroupList
"""
return util.deserialize_model(dikt, cls)
@property
def groups(self):
"""Gets the groups of this GroupList.
:return: The groups of this GroupList.
:rtype: List[Group]
"""
return self._groups
@groups.setter
def groups(self, groups):
"""Sets the groups of this GroupList.
:param groups: The groups of this GroupList.
:type groups: List[Group]
"""
self._groups = groups
@property
def pagination(self):
"""Gets the pagination of this GroupList.
:return: The pagination of this GroupList.
:rtype: Pagination
"""
return self._pagination
@pagination.setter
def pagination(self, pagination):
"""Sets the pagination of this GroupList.
:param pagination: The pagination of this GroupList.
:type pagination: Pagination
"""
self._pagination = pagination
```
#### File: ibutsu_server/models/group.py
```python
from __future__ import absolute_import
from ibutsu_server import util
from ibutsu_server.models.base_model_ import Model
class Group(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, id=None, name=None):
"""Group - a model defined in OpenAPI
:param id: The id of this Group.
:type id: str
:param name: The name of this Group.
:type name: str
"""
self.openapi_types = {"id": str, "name": str}
self.attribute_map = {"id": "id", "name": "name"}
self._id = id
self._name = name
@classmethod
def from_dict(cls, dikt) -> "Group":
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The Group of this Group.
:rtype: Group
"""
return util.deserialize_model(dikt, cls)
@property
def id(self):
"""Gets the id of this Group.
Unique ID of the project
:return: The id of this Group.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Group.
Unique ID of the project
:param id: The id of this Group.
:type id: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this Group.
The name of the group
:return: The name of this Group.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Group.
The name of the group
:param name: The name of this Group.
:type name: str
"""
self._name = name
```
#### File: ibutsu_server/models/result_list.py
```python
from __future__ import absolute_import
from typing import List
from ibutsu_server import util
from ibutsu_server.models.base_model_ import Model
from ibutsu_server.models.pagination import Pagination
from ibutsu_server.models.result import Result
class ResultList(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, results=None, pagination=None): # noqa: E501
"""ResultList - a model defined in OpenAPI
:param results: The results of this ResultList. # noqa: E501
:type results: List[Result]
:param pagination: The pagination of this ResultList. # noqa: E501
:type pagination: Pagination
"""
self.openapi_types = {"results": List[Result], "pagination": Pagination}
self.attribute_map = {"results": "results", "pagination": "pagination"}
self._results = results
self._pagination = pagination
@classmethod
def from_dict(cls, dikt) -> "ResultList":
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The ResultList of this ResultList. # noqa: E501
:rtype: ResultList
"""
return util.deserialize_model(dikt, cls)
@property
def results(self):
"""Gets the results of this ResultList.
:return: The results of this ResultList.
:rtype: List[Result]
"""
return self._results
@results.setter
def results(self, results):
"""Sets the results of this ResultList.
:param results: The results of this ResultList.
:type results: List[Result]
"""
self._results = results
@property
def pagination(self):
"""Gets the pagination of this ResultList.
:return: The pagination of this ResultList.
:rtype: Pagination
"""
return self._pagination
@pagination.setter
def pagination(self, pagination):
"""Sets the pagination of this ResultList.
:param pagination: The pagination of this ResultList.
:type pagination: Pagination
"""
self._pagination = pagination
```
#### File: ibutsu_server/models/run.py
```python
from __future__ import absolute_import
from ibutsu_server import util
from ibutsu_server.models.base_model_ import Model
class Run(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, id=None, duration=None, summary=None, metadata=None):
"""test run - a model defined in Swagger
:param id: The id of this test run.
:type id: str
:param duration: The duration of this test run.
:type duration: float
:param summary: The summary of this test run.
:type summary: Dict[str, str]
:param metadata: Extra metadata
:type metadata: Dict[str, str]
"""
self.openapi_types = {"id": str, "duration": float, "summary": object, "metadata": object}
self.attribute_map = {
"id": "id",
"duration": "duration",
"summary": "summary",
"metadata": "metadata",
}
self._id = id
self._duration = duration
self._summary = summary
self._metadata = metadata
@classmethod
def from_dict(cls, dikt) -> "Run":
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The test run of this test run.
:rtype: Run
"""
return util.deserialize_model(dikt, cls)
@property
def id(self):
"""Gets the id of this Run.
Unique id
:return: The id of this Run.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Run.
Unique id
:param id: The id of this Run.
:type id: str
"""
self._id = id
@property
def duration(self):
"""Gets the duration of this test run.
Duration of test in seconds.
:return: The duration of this test run.
:rtype: float
"""
return self._duration
@duration.setter
def duration(self, duration):
"""Sets the duration of this test run.
Duration of test in seconds.
:param duration: The duration of this test run.
:type duration: float
"""
self._duration = duration
@property
def summary(self):
"""Gets the summary of this test run.
:return: The summary of this test run.
:rtype: object
"""
return self._summary
@summary.setter
def summary(self, summary):
"""Sets the summary of this test run.
:param summary: The summary of this test run.
:type summary: object
"""
self._summary = summary
@property
def metadata(self):
"""Gets the metadata of this test run.
:return: The metadata of this test run.
:rtype: object
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this test run.
:param metadata: The metadata of this test run.
:type metadata: object
"""
self._metadata = metadata
```
#### File: ibutsu_server/tasks/db.py
```python
import time
from datetime import datetime
from datetime import timedelta
from bson import ObjectId
from bson.errors import InvalidId
from dynaconf import settings
from ibutsu_server.mongo import mongo
from ibutsu_server.tasks.queues import task
from ibutsu_server.tasks.results import add_result_start_time
from ibutsu_server.tasks.runs import update_run as update_run_task
from ibutsu_server.util import serialize
from kombu.exceptions import OperationalError
from pymongo import DESCENDING
from redis import Redis
from redis.exceptions import LockError
""" Tasks for DB related things"""
LOCK_EXPIRE = 1
@task
def create_runs_from_results():
# 1. get all the runs
runs_to_create = mongo.results.aggregate([{"$group": {"_id": "$metadata.run"}}])
# 2. loop over all the runs
for run_id in runs_to_create:
# first check if the run exists already
_id = run_id["_id"]
try:
if mongo.runs.find_one({"_id": ObjectId(_id)}):
continue
except InvalidId:
continue
run_dict = {
"_id": ObjectId(_id),
}
# 3. Create the run in Ibutsu
mongo.runs.insert_one(run_dict)
run_dict = serialize(run_dict)
# 4. Start the update task
update_run_task.apply_async((run_dict["id"],), countdown=5)
@task
def add_start_time_to_results():
""" Add the field 'start_time' to all the results. For this we create a task for each run. """
for run in mongo.runs.find(sort=[("start_time", DESCENDING)]):
run = serialize(run)
try:
add_result_start_time.apply_async((run["id"],), countdown=5)
except OperationalError:
pass
@task
def _add_project_metadata(run, project_id):
""" Update all runs and results to add the 'metadata.project' field"""
redis_client = Redis.from_url(settings["CELERY_BROKER_URL"])
try:
# Get a lock so that we don't run this task concurrently
with redis_client.lock(f"update-run-lock-{run['id']}", blocking_timeout=LOCK_EXPIRE):
# add project metadata to the run
if not run.get("metadata"):
run["metadata"] = {}
run["metadata"]["project"] = project_id
mongo.runs.replace_one({"_id": ObjectId(run["id"])}, run)
results = mongo.results.find(
{"metadata.run": run["id"], "metadata.project": {"$exists": False}}
)
for result in results:
result = serialize(result)
# add project metadata to the result
if not result.get("metadata"):
result["metadata"] = {}
result["metadata"]["project"] = project_id
mongo.results.replace_one({"_id": ObjectId(result["id"])}, result)
except LockError:
# If this task is locked, discard it so that it doesn't clog up the system
pass
@task
def add_project_metadata_to_objects(project_name="insights-qe"):
""" Add IQE Project Metadata to historical DB objects. """
project_id = serialize(mongo.projects.find_one({"name": project_name})).get("id")
if not project_id:
return
for run in mongo.runs.find(
{"metadata.project": {"$exists": False}}, sort=[("start_time", DESCENDING)]
):
run = serialize(run)
try:
_add_project_metadata.apply_async((run, project_id), countdown=5)
except OperationalError:
pass
@task
def _delete_old_files(filename, max_date):
""" Delete all files uploaded before the max_date """
try:
redis_client = Redis.from_url(settings["CELERY_BROKER_URL"])
if not isinstance(max_date, datetime):
max_date = datetime.fromisoformat(max_date)
try:
# Get a lock so that we don't run this task concurrently
with redis_client.lock(f"delete-file-lock-{filename}", blocking_timeout=LOCK_EXPIRE):
for file in mongo.fs.find({"filename": filename, "uploadDate": {"$lt": max_date}}):
mongo.fs.delete(file._id)
except LockError:
# If this task is locked, discard it so that it doesn't clog up the system
pass
except Exception:
# we don't want to continually retry this task
return
@task
def prune_old_files(months=5):
""" Delete artifact files older than specified months (here defined as 4 weeks). """
try:
if isinstance(months, str):
months = int(months)
if months < 2:
# we don't want to remove files more recent than 3 months
return
files_to_delete = ["traceback.log", "screenshot.png", "iqe.log"]
delta = timedelta(weeks=months * 4).total_seconds()
current_time = time.time()
timestamp_in_sec = current_time - delta
# get datetime obj
max_date = datetime.fromtimestamp(timestamp_in_sec)
# send out the tasks
for filename in files_to_delete:
try:
_delete_old_files.apply_async((filename, max_date), countdown=5)
except OperationalError:
pass
except Exception:
# we don't want to continually retry this task
return
@task
def delete_large_files(limit=256 * 1024):
""" Delete 'iqe.log' files larger than the limit, defaults to 256 KiB"""
try:
if isinstance(limit, str):
limit = int(limit)
if limit < (256 * 1024):
# we don't want to remove files smaller than 256 KiB
return
redis_client = Redis.from_url(settings["CELERY_BROKER_URL"])
try:
# Get a lock so that we don't run this task concurrently
with redis_client.lock(f"delete-file-lock-{limit}", blocking_timeout=LOCK_EXPIRE):
for file in mongo.fs.find({"length": {"$gt": limit}, "filename": "iqe.log"}):
mongo.fs.delete(file._id)
except LockError:
# If this task is locked, discard it so that it doesn't clog up the system
pass
except Exception:
# we don't want to continually retry this task
return
```
#### File: ibutsu_server/tasks/results.py
```python
from bson import ObjectId
from dynaconf import settings
from ibutsu_server.mongo import mongo
from ibutsu_server.tasks.queues import task
from ibutsu_server.util import serialize
from redis import Redis
from redis.exceptions import LockError
LOCK_EXPIRE = 1
@task
def add_result_start_time(run_id):
""" Update all results in a run to add the 'start_time' field to a result"""
redis_client = Redis.from_url(settings["CELERY_BROKER_URL"])
try:
# Get a lock so that we don't run this task concurrently
with redis_client.lock(f"update-run-lock-{run_id}", blocking_timeout=LOCK_EXPIRE):
run = mongo.runs.find_one({"_id": ObjectId(run_id)})
if not run:
return
results = mongo.results.find({"metadata.run": run_id})
for result in results:
result = serialize(result)
if not result.get("start_time"):
result["start_time"] = result.get("starttime")
mongo.results.replace_one({"_id": ObjectId(result["id"])}, result)
except LockError:
# If this task is locked, discard it so that it doesn't clog up the system
pass
```
#### File: ibutsu_server/tasks/runs.py
```python
import time
from datetime import datetime
from bson import ObjectId
from dynaconf import settings
from ibutsu_server.mongo import mongo
from ibutsu_server.tasks.queues import task
from pymongo import ASCENDING
from redis import Redis
from redis.exceptions import LockError
LOCK_EXPIRE = 1
METADATA_TO_COPY = ["component", "env", "project", "jenkins", "tags"]
def _copy_result_metadata(result, metadata, key):
if not metadata.get(key) and result.get("metadata") and result["metadata"].get(key):
metadata[key] = result["metadata"][key]
@task
def update_run(run_id):
"""Update the run summary from the results"""
redis_client = Redis.from_url(settings["CELERY_BROKER_URL"])
try:
# Get a lock so that we don't run this task concurrently
with redis_client.lock(f"update-run-lock-{run_id}", blocking_timeout=LOCK_EXPIRE):
key_map = {"failed": "failures", "error": "errors", "skipped": "skips"}
run = mongo.runs.find_one({"_id": ObjectId(run_id)})
if not run:
return
# sort according to starttime to get the most recent starting time of the run
results = mongo.results.find({"metadata.run": run_id}, sort=[("start_time", ASCENDING)])
summary = {"errors": 0, "failures": 0, "skips": 0, "tests": 0}
run_duration = 0.0
metadata = run.get("metadata") or {}
for counter, result in enumerate(results):
if counter == 0:
if not run.get("start_time"):
run["start_time"] = result.get("start_time", result.get("starttime"))
if not run.get("created"):
run["created"] = datetime.fromtimestamp(
run.get("start_time") or time.time()
).isoformat()
summary["tests"] += 1
key = key_map.get(result["result"], None)
if key:
summary[key] += 1
if result.get("duration"):
run_duration += result["duration"]
if not run.get("source") and result.get("source"):
run["source"] = result["source"]
for key in METADATA_TO_COPY:
_copy_result_metadata(result, metadata, key)
run["summary"] = summary
run["metadata"] = metadata
if run_duration:
run["duration"] = run_duration
mongo.runs.replace_one({"_id": ObjectId(run_id)}, run)
except LockError:
# If this task is locked, discard it so that it doesn't clog up the system
pass
```
#### File: ibutsu_server/test/test_artifact_controller.py
```python
from __future__ import absolute_import
from unittest import skip
from unittest.mock import MagicMock
from unittest.mock import patch
from bson import ObjectId
from ibutsu_server.test import BaseTestCase
from six import BytesIO
class MockArtifact(object):
def __init__(self, id_, result_id, filename, additional_metadata, content_type, file_contents):
self._id = ObjectId(id_)
self.filename = filename
self.content_type = content_type
self.metadata = {"resultId": result_id, "additionalMetadata": additional_metadata}
self._file_contents = file_contents
def read(self):
return self._file_contents
MOCK_ID = "507f1f77bcf86cd799439011"
MOCK_ARTIFACT = MockArtifact(
MOCK_ID, "cd7994f77bcf8639011507f1", "filename", {"key": "{}"}, "text/plain", "file_contents"
)
class TestArtifactController(BaseTestCase):
"""ArtifactController integration test stubs"""
def setUp(self):
"""Set up a fake MongoDB object"""
self.mongo_patcher = patch("ibutsu_server.controllers.artifact_controller.mongo")
self.mock_mongo = self.mongo_patcher.start()
self.mock_mongo.fs = MagicMock()
self.mock_mongo.fs.find.return_value = [MOCK_ARTIFACT]
self.mock_mongo.fs.upload_from_stream.return_value = ObjectId(MOCK_ID)
def tearDown(self):
"""Teardown the mocks"""
self.mongo_patcher.stop()
def test_delete_artifact(self):
"""Test case for delete_artifact
Delete an artifact
"""
headers = {}
response = self.client.open(
"/api/artifact/{id}".format(id=MOCK_ID), method="DELETE", headers=headers
)
assert self.mock_mongo.fs.delete.called_once_with(ObjectId(MOCK_ID))
self.assert_200(response, "Response body is : " + response.data.decode("utf-8"))
def test_download_artifact(self):
"""Test case for download_artifact
Download an artifact
"""
headers = {"Accept": "application/octet-stream"}
response = self.client.open(
"/api/artifact/{id}/download".format(id="5d9230bb10b3f82ce80760fd"),
method="GET",
headers=headers,
)
self.assert_200(response, "Response body is : " + response.data.decode("utf-8"))
def test_get_artifact(self):
"""Test case for get_artifact
Get a single artifact
"""
headers = {"Accept": "application/json"}
response = self.client.open(
"/api/artifact/{id}".format(id="5d9230bb10b3f82ce80760fd"),
method="GET",
headers=headers,
)
self.assert_200(response, "Response body is : " + response.data.decode("utf-8"))
def test_get_artifact_list(self):
"""Test case for get_artifact_list
Get a (filtered) list of artifacts
"""
query_string = [("resultId", "result_id_example"), ("page", 56), ("pageSize", 56)]
headers = {"Accept": "application/json"}
response = self.client.open(
"/api/artifact", method="GET", headers=headers, query_string=query_string
)
self.assert_200(response, "Response body is : " + response.data.decode("utf-8"))
@skip("multipart/form-data not supported by Connexion")
def test_upload_artifact(self):
"""Test case for upload_artifact
Uploads a test run artifact
"""
headers = {"Accept": "application/json", "Content-Type": "multipart/form-data"}
data = dict(
result_id="result_id_example",
filename="filename_example",
file=(BytesIO(b"some file data"), "file.txt"),
additional_metadata=None,
)
response = self.client.open(
"/api/artifact",
method="POST",
headers=headers,
data=data,
content_type="multipart/form-data",
)
self.assert_201(response, "Response body is : " + response.data.decode("utf-8"))
```
#### File: ibutsu_server/test/test_report_controller.py
```python
from __future__ import absolute_import
from unittest.mock import MagicMock
from unittest.mock import patch
from bson import ObjectId
from flask import json
from ibutsu_server.test import BaseTestCase
MOCK_ID = "cd7994f77bcf8639011507f1"
MOCK_PARAMS = {"type": "csv", "source": "local"}
MOCK_REPORT = {
"_id": ObjectId(MOCK_ID),
"id": MOCK_ID,
"filename": "report.csv",
"mimetype": "text/csv",
"url": "",
"parameters": MOCK_PARAMS,
"created": "2019-09-30T22:08:30.205319",
}
MOCK_CSV = {"func": MagicMock()}
class TestReportController(BaseTestCase):
"""ReportController integration test stubs"""
def setUp(self):
"""Set up some mocked objects"""
self.mongo_patcher = patch("ibutsu_server.controllers.report_controller.mongo")
self.mocked_mongo = self.mongo_patcher.start()
self.mocked_mongo.reports.count.return_value = 1
self.mocked_mongo.reports.find_one.return_value = MOCK_REPORT
self.mocked_mongo.reports.find.return_value = [MOCK_REPORT]
def tearDown(self):
"""Tear down mocks"""
self.mongo_patcher.stop()
def test_add_report(self):
"""Test case for add_report
Create a new report
"""
body = {"type": "csv", "source": "local"}
headers = {"Accept": "application/json", "Content-Type": "application/json"}
with patch.dict("ibutsu_server.controllers.report_controller.REPORTS", {"csv": MOCK_CSV}):
response = self.client.open(
"/api/report",
method="POST",
headers=headers,
data=json.dumps(body),
content_type="application/json",
)
self.assert_201(response, "Response body is : " + response.data.decode("utf-8"))
def test_get_report(self):
"""Test case for get_report
Get a report
"""
headers = {"Accept": "application/json"}
response = self.client.open(
"/api/report/{id}".format(id=MOCK_ID), method="GET", headers=headers
)
self.assert_200(response, "Response body is : " + response.data.decode("utf-8"))
def test_get_report_list(self):
"""Test case for get_report_list
Get a list of reports
"""
query_string = [("page", 56), ("pageSize", 56)]
headers = {"Accept": "application/json"}
response = self.client.open(
"/api/report", method="GET", headers=headers, query_string=query_string
)
self.assert_200(response, "Response body is : " + response.data.decode("utf-8"))
```
#### File: ibutsu_server/test/test_run_controller.py
```python
from __future__ import absolute_import
from unittest import skip
from unittest.mock import MagicMock
from unittest.mock import patch
from bson import ObjectId
from flask import json
from ibutsu_server.test import BaseTestCase
from six import BytesIO
MOCK_ID = "cd7994f77bcf8639011507f1"
MOCK_RUN = {
"_id": ObjectId(MOCK_ID),
"id": MOCK_ID,
"duration": 540.05433,
"summary": {"errors": 1, "failures": 3, "skips": 0, "tests": 548},
}
class TestRunController(BaseTestCase):
"""RunController integration test stubs"""
def setUp(self):
"""Set up a fake MongoDB object"""
self.mongo_patcher = patch("ibutsu_server.controllers.run_controller.mongo")
self.mock_mongo = self.mongo_patcher.start()
self.mock_mongo.runs = MagicMock()
self.mock_mongo.runs.count.return_value = 1
self.mock_mongo.runs.find_one.return_value = MOCK_RUN
self.mock_mongo.runs.find.return_value = [MOCK_RUN]
self.task_patcher = patch("ibutsu_server.controllers.run_controller.update_run_task")
self.mock_update_run_task = self.task_patcher.start()
def tearDown(self):
"""Teardown the mocks"""
self.mongo_patcher.stop()
self.task_patcher.stop()
def test_add_run(self):
"""Test case for add_run
Create a run
"""
run = {
"id": "cd7994f77bcf8639011507f1",
"duration": 540.05433,
"summary": {"errors": 1, "failures": 3, "skips": 0, "tests": 548},
}
headers = {"Accept": "application/json", "Content-Type": "application/json"}
response = self.client.open(
"/api/run",
method="POST",
headers=headers,
data=json.dumps(run),
content_type="application/json",
)
self.mock_update_run_task.apply_async.assert_called_once_with((MOCK_ID,), countdown=5)
self.assert_201(response, "Response body is : " + response.data.decode("utf-8"))
def test_get_run(self):
"""Test case for get_run
Get a single run by ID
"""
headers = {"Accept": "application/json"}
response = self.client.open(
"/api/run/{id}".format(id="5d92316a10b3f82ce8076107"), method="GET", headers=headers
)
self.assert_200(response, "Response body is : " + response.data.decode("utf-8"))
def test_get_run_list(self):
"""Test case for get_run_list
Get a list of the test runs
"""
query_string = [("page", 56), ("pageSize", 56)]
headers = {"Accept": "application/json"}
response = self.client.open(
"/api/run", method="GET", headers=headers, query_string=query_string
)
self.assert_200(response, "Response body is : " + response.data.decode("utf-8"))
@skip("multipart/form-data not supported by Connexion")
def test_import_run(self):
"""Test case for import_run
Import a JUnit XML file
"""
headers = {"Accept": "application/json", "Content-Type": "multipart/form-data"}
data = dict(xml_file=(BytesIO(b"some file data"), "file.txt"))
response = self.client.open(
"/api/run/import",
method="POST",
headers=headers,
data=data,
content_type="multipart/form-data",
)
self.assert_200(response, "Response body is : " + response.data.decode("utf-8"))
def test_update_run(self):
"""Test case for update_run
Update a single run
"""
run = {
"id": "cd7994f77bcf8639011507f1",
"duration": 540.05433,
"summary": {"errors": 1, "failures": 3, "skips": 0, "tests": 548},
}
headers = {"Accept": "application/json", "Content-Type": "application/json"}
response = self.client.open(
"/api/run/{id}".format(id="cd7994f77bcf8639011507f1"),
method="PUT",
headers=headers,
data=json.dumps(run),
content_type="application/json",
)
self.mock_update_run_task.delay.assert_called_once_with(MOCK_ID)
self.assert_200(response, "Response body is : " + response.data.decode("utf-8"))
```
#### File: ibutsu_server/widgets/jenkins_heatmap.py
```python
from bson import ObjectId
from ibutsu_server.mongo import mongo
from pymongo.errors import OperationFailure
NO_RUN_TEXT = "None"
NO_PASS_RATE_TEXT = "Build failed"
def _calculate_slope(x_data):
"""Calculate the trend slope of the data
:param x_data: A list of the result percentages, e.g. [98, 54, 97, 99]
:type x_data: list
:rtype float:
"""
if all(x == 100 for x in x_data):
return 100
y_data = list(range(len(x_data)))
x_avg = sum(x_data) / len(x_data)
y_avg = sum(y_data) / len(y_data)
try:
slope = sum([(x - x_avg) * (y - y_avg) for x, y in zip(x_data, y_data)]) / sum(
[(x - x_avg) ** 2 for x in x_data]
)
except ZeroDivisionError:
slope = 0
return slope
def _get_heatmap(job_name, build_number, builds, group_field, count_skips, project=None):
"""Run the aggregation to get the Jenkins heatmap report"""
# Get the run IDs for the last 5 Jenkins builds
build_min = build_number - (builds - 1)
build_max = build_number + 1
build_range = [str(bnum) for bnum in range(build_min, build_max)]
aggregation = [
{
"$match": {
"metadata.jenkins.job_name": job_name,
"metadata.jenkins.build_number": {"$in": build_range},
}
},
{
"$group": {
"_id": "$metadata.run",
"build_number": {"$first": "$metadata.jenkins.build_number"},
}
},
]
if project:
aggregation[0]["$match"].update({"metadata.project": project})
cursor = mongo.results.aggregate(aggregation)
runs = [run for run in cursor]
run_to_build = {str(run["_id"]): run["build_number"] for run in runs}
# Figure out the pass rates for each run
fail_fields = ["$summary.errors", "$summary.failures"]
if count_skips:
fail_fields.append("$summary.skips")
pipeline = [
{"$match": {"_id": {"$in": [ObjectId(run["_id"]) for run in runs]}}},
{
"$project": {
"_id": True,
"metadata": True,
"pass_rate": {
"$multiply": [
{
"$cond": {
"if": {"$eq": ["$summary.tests", 0]},
"then": 0,
"else": {
"$divide": [
{"$subtract": ["$summary.tests", {"$add": fail_fields}]},
"$summary.tests",
]
},
}
},
100,
]
},
}
},
{
"$group": {
"_id": f"${group_field}",
"pass_rate": {"$push": "$pass_rate"},
"run_ids": {"$push": "$_id"},
}
},
]
# Now calculate the slopes (angle of the trend line, essentially)
aggr = [r for r in mongo.runs.aggregate(pipeline)]
heatmap = {
run["_id"]: [(_calculate_slope(run["pass_rate"]), 0)]
+ [
(pass_rate, str(run_id), run_to_build[str(run_id)])
for pass_rate, run_id in zip(run["pass_rate"], run["run_ids"])
]
for run in aggr
if run["_id"] is not None
}
return heatmap, build_range
def _pad_heatmap(heatmap, build_range):
"""Pad Jenkins runs that are not present with Null"""
padded_dict = {}
if not heatmap:
return heatmap
for group in heatmap.keys():
# skip first item in list which contains slope info
run_list = heatmap[group][1:]
padded_run_list = []
completed_runs = {run[2]: run for run in run_list}
for build in build_range:
if build not in completed_runs.keys():
padded_run_list.append((NO_PASS_RATE_TEXT, NO_RUN_TEXT, build))
else:
padded_run_list.append(completed_runs[build])
# add the slope info back in
padded_run_list.insert(0, heatmap[group][0])
# write to the padded_dict
padded_dict[group] = padded_run_list
return padded_dict
def get_jenkins_heatmap(
job_name, builds, group_field, sort_field="starttime", count_skips=False, project=None
):
"""Generate JSON data for a heatmap of Jenkins runs"""
# Get latest build number
filters = {"metadata.jenkins.job_name": job_name}
if project:
filters.update({"metadata.project": project})
results = mongo.results.find(filters, sort=[(sort_field, -1)], limit=1)
build_number = int(results[0]["metadata"]["jenkins"]["build_number"])
try:
heatmap, build_range = _get_heatmap(
job_name, build_number, builds, group_field, count_skips, project
)
except OperationFailure:
# Probably a divide by zero exception, roll back one on the build number and try again
build_number -= 1
heatmap, build_range = _get_heatmap(
job_name, build_number, builds, group_field, count_skips, project
)
# do some postprocessing -- fill empty runs with null
heatmap = _pad_heatmap(heatmap, build_range)
return {"heatmap": heatmap}
```
#### File: ibutsu_server/widgets/jenkins_job_analysis.py
```python
from ibutsu_server.constants import HEATMAP_MAX_BUILDS
from ibutsu_server.widgets.jenkins_job_view import get_jenkins_job_view
def get_jenkins_line_chart(job_name, builds, group_field="build_number", project=None):
data = {"duration": {}}
jobs = get_jenkins_job_view(
filter_=f"job_name={job_name}", page_size=builds, project=project
).get("jobs")
# first determine duration differs from total_execution_time
run_had_multiple_components = any(
[job.get("total_execution_time") != job.get("duration") for job in jobs]
)
if run_had_multiple_components:
data["total_execution_time"] = {}
# now format the data
for job in jobs:
data_id = job.get(group_field)
data["duration"][data_id] = round(job.get("duration") / (60 * 60), 2) # convert s to hrs
if run_had_multiple_components:
data["total_execution_time"][data_id] = round(
job.get("total_execution_time") / (60 * 60), 2
)
return data
def get_jenkins_bar_chart(job_name, builds, group_field="build_number", project=None):
data = {"passed": {}, "skipped": {}, "error": {}, "failed": {}}
jobs = get_jenkins_job_view(
filter_=f"job_name={job_name}", page_size=builds, project=project
).get("jobs")
for job in jobs:
data_id = job.get(group_field)
data["passed"][data_id] = job["summary"].get("passes")
data["skipped"][data_id] = job["summary"].get("skips")
data["error"][data_id] = job["summary"].get("errors")
data["failed"][data_id] = job["summary"].get("failures")
return data
def get_jenkins_analysis_data(job_name, builds, group_field="metadata.component", project=None):
heatmap_params = {
"job_name": job_name,
"builds": min(builds, HEATMAP_MAX_BUILDS),
"group_field": group_field,
"count_skips": True,
"sort_field": "start_time",
}
barchart_params = {
"job_name": job_name,
"builds": builds,
}
if project:
heatmap_params["project"] = project
barchart_params["project"] = project
linechart_params = barchart_params.copy()
return {
"barchart_params": barchart_params,
"heatmap_params": heatmap_params,
"linechart_params": linechart_params,
}
``` |
{
"source": "john-dupuy/widgetastic.patternfly4",
"score": 2
} |
#### File: src/widgetastic_patternfly4/table.py
```python
import six
from widgetastic.log import create_item_logger
from widgetastic.widget import Table
from widgetastic.widget import TableColumn
from widgetastic.widget import TableRow
from widgetastic.widget import Text
from widgetastic.widget import Widget
from widgetastic.widget.table import resolve_table_widget
class HeaderColumn(TableColumn):
"""Represents a cell in the header row."""
def __locator__(self):
return "(./td|./th)[{}]".format(self.position + 1)
@property
def is_sortable(self):
"""Returns true of the column is sortable."""
return "pf-c-table__sort" in self.browser.classes(self)
@property
def sorting_order(self):
"""Returns current sorting order as a string."""
return self.browser.get_attribute("aria-sort", self)
def sort(self, order="ascending"):
"""Sorts the column according to the supplied "ascending" or "descending"."""
if order not in ("ascending", "descending"):
raise ValueError("order should be either 'ascending' or 'descending'")
while self.sorting_order != order:
self.click()
class HeaderRow(TableRow):
Column = HeaderColumn
def __init__(self, parent, logger=None):
Widget.__init__(self, parent, logger=logger)
def __locator__(self):
return "./thead/tr"
def __repr__(self):
return "{}({!r})".format(type(self).__name__, self.parent)
def __getitem__(self, item):
if isinstance(item, six.string_types):
index = self.table.header_index_mapping[self.table.ensure_normal(item)]
elif isinstance(item, int):
index = item
else:
raise TypeError("row[] accepts only integers and strings")
return self.Column(self, index, logger=create_item_logger(self.logger, item))
def read(self):
"""Returns the values of the headers of the HeaderRow object."""
return self.parent.headers
class PatternflyTableRow(TableRow):
"""
Extends TableRow to support having a 'th' tag within the row
"""
HEADER_IN_ROW = "./th[1]"
TABLE_COLUMN_CLS = TableColumn
@property
def has_row_header(self):
"""Returns a boolean detailing if the Table Row has a header."""
return len(self.browser.elements(self.HEADER_IN_ROW)) > 0
def __getitem__(self, item):
if isinstance(item, six.string_types):
index = self.table.header_index_mapping[self.table.ensure_normal(item)]
elif isinstance(item, int):
index = item
else:
raise TypeError("row[] accepts only integers and strings")
# We need to do adjustments if a <th> tag exists inside the row...
# Typically the layout is: <td>, <th>, <td>, <td>, <td>, and so on...
if self.has_row_header:
if index == 1:
# We assume the header entry always sits at position 1. Return a TableColumn for it.
# Pass position '0' since the Column __locator__ uses 'position + 1'
return self.TABLE_COLUMN_CLS(self, 0, logger=create_item_logger(self.logger, item))
if index > 1:
# Adjust the index for td objects that exist beyond the th so xpath is valid
index = index - 1
# After adjusting the index, call the original __getitem__ to get our TableColumn item
return super(PatternflyTableRow, self).__getitem__(index)
class PatternflyTable(Table):
"""Represents the Patternfly table.
https://www.patternfly.org/v4/documentation/react/components/table
"""
ROWS = "./tbody/tr[./td]"
HEADERS = "./thead/tr/th|./tr/th|./thead/tr/td"
Row = PatternflyTableRow
header_row = HeaderRow()
@property
def _is_header_in_body(self):
"""Override this to return False.
Some PF4 tables have a 'header cell' in the row, which is a th in the row, this will
cause Table._is_header_in_body to incorrectly return 'True'
"""
return False
def sort_by(self, column, order):
"""Sets the sort order for the supplied column by name, and "ascending/descending"."""
header = self.header_row[column]
header.sort(order)
def _toggle_select_all(self, value, column):
header = self.header_row[column]
header.fill(value)
def select_all(self, column=0):
"""Selects all the rows."""
self._toggle_select_all(True, column)
def deselect_all(self, column=0):
"""Deselects all the rows."""
self._toggle_select_all(False, column)
class ExpandableTableHeaderColumn(TableColumn):
"""
Used for special cases where a <th> appears as a column in ExpandableTable.
"""
def __locator__(self):
"""Override the locator to look inside the first 'tr' within the tbody"""
return "./tr[1]/th[{}]".format(self.position + 1)
class RowNotExpandable(Exception):
def __init__(self, row):
self.row = row
def __str__(self):
return "Row is not expandable: {}".format(repr(self.row))
class ExpandableTableRow(PatternflyTableRow):
"""Represents a row in the table.
If subclassing and also changing the Column class, do not forget to set the Column to the new
class.
Args:
index: Position of the row in the table.
"""
ROW = "./tr[1]"
EXPANDABLE_CONTENT = "./tr[2]"
# Override these values inherited from PatternflyTableRow...
HEADER_IN_ROW = "./tr[1]/th[1]"
TABLE_COLUMN_CLS = ExpandableTableHeaderColumn
def __init__(self, parent, index, content_view=None, logger=None):
super(ExpandableTableRow, self).__init__(parent, index, logger=logger)
content_parent = Text(parent=self, locator=self.EXPANDABLE_CONTENT)
if content_view:
self.content = resolve_table_widget(content_parent, content_view)
else:
self.content = content_parent
@property
def is_displayed(self):
"""Returns a boolean detailing if the Table Row is displayed."""
return self.browser.is_displayed(locator=self.ROW)
@property
def is_expandable(self):
"""Returns a boolean detailing if the table row is expandable."""
return self[0].widget.is_displayed
def _check_expandable(self):
if not self.is_expandable:
raise RowNotExpandable(self)
@property
def is_expanded(self):
"""Returns a boolean detailing if the table row has been expanded."""
self._check_expandable()
return self.browser.is_displayed(locator=self.EXPANDABLE_CONTENT)
def expand(self):
"""Expands the table row."""
self._check_expandable()
if not self.is_expanded:
self[0].widget.click()
self.content.wait_displayed()
def collapse(self):
"""Collapses the table row."""
self._check_expandable()
if self.is_expanded:
self[0].widget.click()
def read(self):
"""Returns a text representation of the table row."""
result = super(ExpandableTableRow, self).read()
# Remove the column with the "expand" button in it
if 0 in result and not result[0]:
del result[0]
return result
class ExpandableTable(PatternflyTable):
"""
The patternfly 4 expandable table has the following outline:
.. code-block:: html
<table>
<thead>
<tbody>
<tr>The row always on display.</tr>
<tr>The "expandable" content viewed by clicking the arrow button</tr>
</tbody>
<tbody>
<tr>Next row...</tr>
<tr>Next row's expandable content...</tr>
Therefore, we modify the behavior of Table here to look for rows based on 'tbody'
tags instead of 'tr' tags. We use a custom class, ExpandableTableRow, which treats
the first <tr> tag as a normal table row (if you call row.read(), it will read the
this row -- also table column widgets will apply to this row, etc. etc.), but it
will treat the second <tr> tag as a Text widget, or a parent for a custom defined View
"""
ROWS = "./tbody"
ROW_RESOLVER_PATH = "/table/tbody"
ROW_AT_INDEX = "./tbody[{0}]"
COLUMN_RESOLVER_PATH = "/tr[0]/td"
COLUMN_AT_POSITION = "./tr[1]/td[{0}]"
ROW_TAG = "tbody"
HEADERS = "./thead/tr/th|./thead/tr/td"
Row = ExpandableTableRow
def __init__(self, *args, **kwargs):
"""Extend init of Table
Automatically add the 'expand' button widget as column 0.
Provide additional kwarg for 'content_view', which is used to pass in a WidgetDescriptor
to be used as the Widget/View for the expanded content of each row.
"""
column_widgets = kwargs.get("column_widgets")
self.content_view = kwargs.pop("content_view", None)
col_widget = Text('./button[contains(@class, "pf-c-button")]')
if column_widgets and 0 not in column_widgets:
# Do not override column 0 if the user defined it during init
kwargs["column_widgets"][0] = col_widget
elif not column_widgets:
kwargs["column_widgets"] = {0: col_widget}
super(ExpandableTable, self).__init__(*args, **kwargs)
def _create_row(self, parent, index, logger=None):
return self.Row(parent, index, self.content_view, logger)
```
#### File: src/widgetastic_patternfly4/tabs.py
```python
from widgetastic.utils import ParametrizedLocator
from widgetastic.widget import View
class Tab(View):
"""Represents the Patternfly Tab widget.
Selects itself automatically when any child widget gets accessed, ensuring that the widget is
visible.
https://www.patternfly.org/v4/documentation/react/components/tabs
"""
# The text on the tab. Can be omitted if it is the same as the tab class name capitalized
TAB_NAME = None
# Locator of the Tab selector
TAB_LOCATOR = ParametrizedLocator(
'.//div[contains(@class, "pf-c-tabs")]/ul'
"/li[button[normalize-space(.)={@tab_name|quote}]]"
)
ROOT = ParametrizedLocator(
".//section[@aria-labelledby=string("
"preceding-sibling::div/ul/li/button[normalize-space(.)={@tab_name|quote}]/@id)]"
"|"
".//section[@id=string(../preceding-sibling::div/ul/li"
"/button[normalize-space(.)={@tab_name|quote}]/@aria-controls)]"
)
@property
def tab_name(self):
"""Returns the tab name as a string."""
return self.TAB_NAME or type(self).__name__.replace("_", " ").capitalize()
def is_active(self):
"""Returns a boolean detailing of the tab is active."""
return "pf-m-current" in self.parent_browser.classes(self.TAB_LOCATOR)
@property
def is_displayed(self):
"""Returns a boolean detailing of the tab is displayed."""
return self.parent_browser.is_displayed(self.TAB_LOCATOR)
def click(self):
"""Clicks the tab."""
return self.parent_browser.click(self.TAB_LOCATOR)
def select(self):
"""Selects the tab (checks if active already first)."""
if not self.is_active():
self.logger.info("Opening the tab %s", self.tab_name)
self.click()
def child_widget_accessed(self, widget):
# Select the tab
self.select()
def __repr__(self):
return "<Tab {!r}>".format(self.tab_name)
```
#### File: widgetastic.patternfly4/testing/conftest.py
```python
import os
import pytest
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from widgetastic.browser import Browser
@pytest.fixture(scope="session")
def browser_name():
return os.environ["BROWSER"]
@pytest.fixture(scope="module")
def selenium(browser_name):
if browser_name == "firefox":
driver = webdriver.Remote(desired_capabilities=DesiredCapabilities.FIREFOX)
elif browser_name == "chrome":
caps = DesiredCapabilities.CHROME.copy()
caps["chromeOptions"] = {"args": ["disable-dev-shm-usage", "no-sandbox"]}
driver = webdriver.Remote(desired_capabilities=caps)
yield driver
driver.quit()
@pytest.fixture(scope="module")
def browser(selenium, request):
name = request.module.__name__.split("_")[1]
category = getattr(request.module, "CATEGORY", "components")
url = f"https://patternfly-react.surge.sh/patternfly-4/documentation/react/{category}/{name}"
selenium.maximize_window()
selenium.get(url)
return Browser(selenium)
```
#### File: widgetastic.patternfly4/testing/test_dropdown.py
```python
import pytest
from widgetastic.widget import View
from widgetastic_patternfly4 import Dropdown
from widgetastic_patternfly4 import DropdownItemDisabled
from widgetastic_patternfly4 import DropdownItemNotFound
from widgetastic_patternfly4 import GroupDropdown
@pytest.fixture
def view(browser):
class TestView(View):
ROOT = "(.//div[@id='ws-react-c-dropdown-basic'])[1]"
dropdown_txt_locator = Dropdown("Dropdown")
dropdown_custom_locator = Dropdown(locator=".//div[contains(@class, 'pf-c-dropdown')]")
dropdown_default_locator = Dropdown()
return TestView(browser)
@pytest.fixture(
params=["dropdown_txt_locator", "dropdown_custom_locator", "dropdown_default_locator"]
)
def dropdown(view, request):
return getattr(view, request.param)
@pytest.fixture()
def group_dropdown(browser):
return GroupDropdown(
browser,
locator=(
".//div[@id='ws-react-c-dropdown-with-groups']"
"/div[contains(@class, 'pf-c-dropdown')]"
),
)
def test_dropdown_is_displayed(dropdown):
assert dropdown.is_displayed
def test_enabled_dropdown(dropdown):
assert dropdown.is_enabled
def test_dropdown_items(dropdown):
assert dropdown.items == [
"Link",
"Action",
"Disabled Link",
"Disabled Action",
"",
"Separated Link",
"Separated Action",
]
assert dropdown.has_item("Action")
assert not dropdown.has_item("Non existing items")
assert dropdown.item_enabled("Action")
assert not dropdown.item_enabled("Disabled Link")
def test_dropdown_open(dropdown):
assert not dropdown.is_open
dropdown.open()
assert dropdown.is_open
dropdown.close()
assert not dropdown.is_open
def test_dropdown_item_select(dropdown):
dropdown.item_select("Action")
assert not dropdown.is_open
with pytest.raises(DropdownItemDisabled):
dropdown.item_select("Disabled Link")
with pytest.raises(DropdownItemNotFound):
dropdown.item_select("Non existing items")
def test_group_dropdown(group_dropdown):
assert group_dropdown.is_displayed
assert group_dropdown.is_enabled
assert group_dropdown.items == [
"Link",
"Action",
"Group 2 Link",
"Group 2 Action",
"Group 3 Link",
"Group 3 Action",
]
assert group_dropdown.has_item("Group 2 Link")
assert group_dropdown.item_enabled("Group 3 Action")
assert group_dropdown.groups == ["Group 2", "Group 3"]
group_dropdown.item_select("Link")
group_dropdown.item_select("Group 3 Link", group_name="Group 3")
with pytest.raises(DropdownItemNotFound):
group_dropdown.item_select("Group 3 Link", group_name="Group 2")
``` |
{
"source": "johndwwe/Jarves",
"score": 3
} |
#### File: johndwwe/Jarves/john-jarves.py
```python
import os # import os
import pyttsx3 # spking pip install pyttsx3
import datetime # use to no date time
import speech_recognition as sr # pip install SpeechRecognition
import subprocess # pip install subprocess
import wikipedia # pip install wikipedia
engine = pyttsx3.init()
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[1].id)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def login():
pass
def time():
Time = datetime.datetime.now().strftime("%I:%M:%p")
speak(Time)
def date():
year = datetime.datetime.now().year
month = datetime.datetime.now().month
date = datetime.datetime.now().day
speak(date)
speak(month)
speak(year)
def wishme():
speak("Wellcome <NAME>")
speak("the current time is")
time()
speak("the current date is")
date()
hour = datetime.datetime.now().hour
if hour >=6 and hour<12:
speak("Good night bro!")
elif hour >=12 and hour<18:
speak("Good afternon bro!")
elif hour >=18 and hour<24:
speak("Good Evening bro!")
else:
speak("Good morning bro!")
speak("it's me")
speak("Amanda")
speak("at your service.")
speak("please tell me how can i help you?")
def takeCommand():
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1
audio = r.listen(source)
try:
print("Recongnizning...")
query = r.recognize_google(audio, language='en-in')
print(query)
except Exception as e:
print(e)
return "None"
return query
wishme()
while True:
query = takeCommand().lower()
if 'wikipedia' in query:
speak("Searching wikipedia...")
query = query.replace("wikipedia", "")
result = wikipedia.summary(query, sentences=4)
speak("four sentences i will tell you")
print(result)
speak(result)
elif 'bye' in query:
speak("bye bye bro")
exit()
elif 'exit' in query:
speak("bye bye bro")
exit()
elif 'open browser' in query:
speak('which browser')
speak('chrome. or')
speak('Microsoft edge')
elif 'open chrome' in query:
speak('chrome is opening')
chrome = subprocess.Popen(['C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe'])
elif 'close chrome' in query:
speak('closeing chrome')
chrome.terminate()
elif 'open microsoft edge' in query:
speak('microsoft Edge is opening')
microsoft = subprocess.Popen(['C:\\Program Files (x86)\\Microsoft\\Edge\\Application\\msedge.exe'])
elif 'close microsoft edge' in query:
speak('microsoft Edge is closeing')
microsoft.terminate()
elif 'open youtube' in query:
speak('youtube is opening')
youtube = subprocess.Popen(['C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe', 'youtube.com'])
elif 'close youtube' in query:
speak('closeing youtube')
youtube.terminate()
elif 'open google' in query:
speak('google is opening')
google = subprocess.Popen(['C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe', 'google.com'])
elif 'close google' in query:
speak('close google')
google.terminate()
elif 'what is your name' in query:
speak('my name is amanda')
elif 'your name' in query:
speak('my name is amanda')
elif 'what your name' in query:
speak('my name is amanda')
elif 'open notepad' in query:
speak('opening notepad')
notepad = subprocess.Popen(['C:\\Windows\\system32\\notepad.exe'])
elif 'close notepad' in query:
speak('closeing notepad')
notepad.terminate()
elif 'close' in query:
speak('bye bye john')
exit()
``` |
{
"source": "johne53/MB3Lilv",
"score": 2
} |
#### File: waflib/extras/autowaf.py
```python
import glob
import os
import subprocess
import sys
import time
from waflib import Build, Context, Logs, Options, Utils
from waflib.TaskGen import feature, before, after
global g_is_child
g_is_child = False
# Only run autowaf hooks once (even if sub projects call several times)
global g_step
g_step = 0
global line_just
line_just = 40
# Compute dependencies globally
# import preproc
# preproc.go_absolute = True
# Test context that inherits build context to make configuration available
class TestContext(Build.BuildContext):
"Run tests"
cmd = 'test'
fun = 'test'
@feature('c', 'cxx')
@after('apply_incpaths')
def include_config_h(self):
self.env.append_value('INCPATHS', self.bld.bldnode.abspath())
def set_options(opt, debug_by_default=False, test=False):
"Add standard autowaf options if they havn't been added yet"
global g_step
if g_step > 0:
return
opts = opt.get_option_group('Configuration options')
# Standard directory options
opts.add_option('--bindir', type='string',
help="executable programs [default: PREFIX/bin]")
opts.add_option('--configdir', type='string',
help="configuration data [default: PREFIX/etc]")
opts.add_option('--datadir', type='string',
help="shared data [default: PREFIX/share]")
opts.add_option('--includedir', type='string',
help="header files [default: PREFIX/include]")
opts.add_option('--libdir', type='string',
help="libraries [default: PREFIX/lib]")
opts.add_option('--mandir', type='string',
help="manual pages [default: DATADIR/man]")
opts.add_option('--docdir', type='string',
help="HTML documentation [default: DATADIR/doc]")
# Build options
if debug_by_default:
opts.add_option('--optimize', action='store_false', default=True,
dest='debug', help="build optimized binaries")
else:
opts.add_option('-d', '--debug', action='store_true', default=False,
dest='debug', help="build debuggable binaries")
opts.add_option('--pardebug', action='store_true', default=False,
dest='pardebug',
help="build debug libraries with D suffix")
opts.add_option('-s', '--strict', action='store_true', default=False,
dest='strict',
help="use strict compiler flags and show all warnings")
opts.add_option('-S', '--ultra-strict', action='store_true', default=False,
dest='ultra_strict',
help="use extremely strict compiler flags (likely noisy)")
opts.add_option('--docs', action='store_true', default=False, dest='docs',
help="build documentation (requires doxygen)")
# Test options
if test:
test_opts = opt.add_option_group('Test options', '')
opts.add_option('-T', '--test', action='store_true', dest='build_tests',
help='build unit tests')
opts.add_option('--no-coverage', action='store_true',
dest='no_coverage',
help='do not instrument code for test coverage')
test_opts.add_option('--wrapper', type='string',
dest='test_wrapper',
help='command prefix for tests (e.g. valgrind)')
g_step = 1
def add_flags(opt, flags):
for name, desc in flags.items():
opt.add_option('--' + name, action='store_true',
dest=name.replace('-', '_'), help=desc)
def get_check_func(conf, lang):
if lang == 'c':
return conf.check_cc
elif lang == 'cxx':
return conf.check_cxx
else:
Logs.error("Unknown header language `%s'" % lang)
def check_header(conf, lang, name, define='', mandatory=True):
"Check for a header"
check_func = get_check_func(conf, lang)
if define != '':
check_func(header_name=name,
define_name=define,
mandatory=mandatory)
else:
check_func(header_name=name, mandatory=mandatory)
def check_function(conf, lang, name, **args):
"Check for a function"
header_names = Utils.to_list(args['header_name'])
includes = ''.join(['#include <%s>\n' % x for x in header_names])
fragment = '''
%s
int main() { return !(void(*)())(%s); }
''' % (includes, name)
check_func = get_check_func(conf, lang)
args['msg'] = 'Checking for %s' % name
check_func(fragment=fragment, **args)
def nameify(name):
return (name.replace('/', '_').replace('++', 'PP')
.replace('-', '_').replace('.', '_'))
def define(conf, var_name, value):
conf.define(var_name, value)
conf.env[var_name] = value
def check_pkg(conf, name, **args):
"Check for a package iff it hasn't been checked for yet"
if args['uselib_store'].lower() in conf.env['AUTOWAF_LOCAL_LIBS']:
return
class CheckType:
OPTIONAL = 1
MANDATORY = 2
var_name = 'CHECKED_' + nameify(args['uselib_store'])
check = var_name not in conf.env
mandatory = 'mandatory' not in args or args['mandatory']
if not check and 'atleast_version' in args:
# Re-check if version is newer than previous check
checked_version = conf.env['VERSION_' + name]
if checked_version and checked_version < args['atleast_version']:
check = True
if not check and mandatory and conf.env[var_name] == CheckType.OPTIONAL:
# Re-check if previous check was optional but this one is mandatory
check = True
if check:
found = None
pkg_var_name = 'PKG_' + name.replace('-', '_')
pkg_name = name
if conf.env.PARDEBUG:
args['mandatory'] = False # Smash mandatory arg
found = conf.check_cfg(package=pkg_name + 'D',
args="--cflags --libs", **args)
if found:
pkg_name += 'D'
if mandatory:
args['mandatory'] = True # Unsmash mandatory arg
if not found:
found = conf.check_cfg(package=pkg_name, args="--cflags --libs",
**args)
if found:
conf.env[pkg_var_name] = pkg_name
if 'atleast_version' in args:
conf.env['VERSION_' + name] = args['atleast_version']
if mandatory:
conf.env[var_name] = CheckType.MANDATORY
else:
conf.env[var_name] = CheckType.OPTIONAL
if not conf.env.MSVC_COMPILER and 'system' in args and args['system']:
includes = conf.env['INCLUDES_' + nameify(args['uselib_store'])]
for path in includes:
if 'COMPILER_CC' in conf.env:
conf.env.append_value('CFLAGS', ['-isystem', path])
if 'COMPILER_CXX' in conf.env:
conf.env.append_value('CXXFLAGS', ['-isystem', path])
conf.env.append_value('CXXFLAGS', ['-isystem', '/usr/local/include'])
def normpath(path):
if sys.platform == 'win32':
return os.path.normpath(path).replace('\\', '/')
else:
return os.path.normpath(path)
def configure(conf):
global g_step
if g_step > 1:
return
def append_cxx_flags(flags):
conf.env.append_value('CFLAGS', flags)
conf.env.append_value('CXXFLAGS', flags)
if Options.options.docs:
conf.load('doxygen')
try:
conf.load('clang_compilation_database')
except Exception:
pass
prefix = normpath(os.path.abspath(os.path.expanduser(conf.env['PREFIX'])))
conf.env['DOCS'] = Options.options.docs and conf.env.DOXYGEN
conf.env['DEBUG'] = Options.options.debug or Options.options.pardebug
conf.env['PARDEBUG'] = Options.options.pardebug
conf.env['PREFIX'] = prefix
def config_dir(var, opt, default):
if opt:
conf.env[var] = normpath(opt)
else:
conf.env[var] = normpath(default)
opts = Options.options
config_dir('BINDIR', opts.bindir, os.path.join(prefix, 'bin'))
config_dir('SYSCONFDIR', opts.configdir, os.path.join(prefix, 'etc'))
config_dir('DATADIR', opts.datadir, os.path.join(prefix, 'share'))
config_dir('INCLUDEDIR', opts.includedir, os.path.join(prefix, 'include'))
config_dir('LIBDIR', opts.libdir, os.path.join(prefix, 'lib'))
datadir = conf.env['DATADIR']
config_dir('MANDIR', opts.mandir, os.path.join(datadir, 'man'))
config_dir('DOCDIR', opts.docdir, os.path.join(datadir, 'doc'))
if Options.options.debug:
if conf.env['MSVC_COMPILER']:
conf.env['CFLAGS'] = ['/Od', '/Z7', '/MTd', '/FS']
conf.env['CXXFLAGS'] = ['/Od', '/Z7', '/MTd', '/FS']
conf.env['LINKFLAGS'] = ['/DEBUG', '/MANIFEST']
else:
conf.env['CFLAGS'] = ['-O0', '-g']
conf.env['CXXFLAGS'] = ['-O0', '-g']
else:
if conf.env['MSVC_COMPILER']:
append_cxx_flags(['/MD', '/FS', '/DNDEBUG'])
else:
append_cxx_flags(['-DNDEBUG'])
if conf.env.MSVC_COMPILER:
Options.options.no_coverage = True
append_cxx_flags(['/nologo',
'/FS',
'/DNDEBUG',
'/D_CRT_SECURE_NO_WARNINGS',
'/experimental:external',
'/external:W0',
'/external:anglebrackets'])
conf.env.append_value('LINKFLAGS', '/nologo')
if Options.options.strict or Options.options.ultra_strict:
ms_strict_flags = ['/Wall',
'/wd4061',
'/wd4200',
'/wd4514',
'/wd4571',
'/wd4625',
'/wd4626',
'/wd4706',
'/wd4710',
'/wd4820',
'/wd5026',
'/wd5027',
'/wd5045']
conf.env.append_value('CFLAGS', ms_strict_flags)
conf.env.append_value('CXXFLAGS', ms_strict_flags)
conf.env.append_value('CXXFLAGS', ['/EHsc'])
else:
if Options.options.ultra_strict:
Options.options.strict = True
conf.env.append_value('CFLAGS', ['-Wredundant-decls',
'-Wstrict-prototypes',
'-Wmissing-prototypes',
'-Wcast-qual'])
conf.env.append_value('CXXFLAGS', ['-Wcast-qual'])
if Options.options.strict:
conf.env.append_value('CFLAGS', ['-pedantic', '-Wshadow'])
if conf.env.DEST_OS != "darwin":
conf.env.append_value('LINKFLAGS', ['-Wl,--no-undefined'])
conf.env.append_value('CXXFLAGS', ['-Wnon-virtual-dtor',
'-Woverloaded-virtual'])
append_cxx_flags(['-Wall',
'-Wcast-align',
'-Wextra',
'-Wmissing-declarations',
'-Wno-unused-parameter',
'-Wstrict-overflow',
'-Wundef',
'-Wwrite-strings',
'-fstrict-overflow'])
# Add less universal flags after checking they work
extra_flags = ['-Wlogical-op',
'-Wsuggest-attribute=noreturn',
'-Wunsafe-loop-optimizations']
if conf.check_cc(cflags=['-Werror'] + extra_flags, mandatory=False,
msg="Checking for extra C warning flags"):
conf.env.append_value('CFLAGS', extra_flags)
if 'COMPILER_CXX' in conf.env:
if conf.check_cxx(cxxflags=['-Werror'] + extra_flags,
mandatory=False,
msg="Checking for extra C++ warning flags"):
conf.env.append_value('CXXFLAGS', extra_flags)
if not conf.env['MSVC_COMPILER']:
append_cxx_flags(['-fshow-column'])
conf.env.NO_COVERAGE = True
conf.env.BUILD_TESTS = False
try:
conf.env.BUILD_TESTS = Options.options.build_tests
conf.env.NO_COVERAGE = Options.options.no_coverage
if not Options.options.no_coverage:
# Set up unit test code coverage
if conf.is_defined('CLANG'):
for cov in [conf.env.CC[0].replace('clang', 'llvm-cov'),
'llvm-cov']:
if conf.find_program(cov, var='LLVM_COV', mandatory=False):
break
else:
conf.check_cc(lib='gcov', define_name='HAVE_GCOV',
mandatory=False)
except Exception:
pass # Test options do not exist
# Define version in configuration
appname = getattr(Context.g_module, Context.APPNAME, 'noname')
version = getattr(Context.g_module, Context.VERSION, '0.0.0')
defname = appname.upper().replace('-', '_').replace('.', '_')
define(conf, defname + '_VERSION', version)
conf.env.prepend_value('CFLAGS', '-I' + os.path.abspath('.'))
conf.env.prepend_value('CXXFLAGS', '-I' + os.path.abspath('.'))
g_step = 2
def display_summary(conf, msgs=None):
global g_is_child
if not g_is_child:
display_msg(conf, "Install prefix", conf.env['PREFIX'])
if 'COMPILER_CC' in conf.env:
display_msg(conf, "C Flags", ' '.join(conf.env['CFLAGS']))
if 'COMPILER_CXX' in conf.env:
display_msg(conf, "C++ Flags", ' '.join(conf.env['CXXFLAGS']))
display_msg(conf, "Debuggable", bool(conf.env['DEBUG']))
display_msg(conf, "Build documentation", bool(conf.env['DOCS']))
if msgs is not None:
display_msgs(conf, msgs)
def set_c_lang(conf, lang):
"Set a specific C language standard, like 'c99' or 'c11'"
if conf.env.MSVC_COMPILER:
# MSVC has no hope or desire to compile C99, just compile as C++
conf.env.append_unique('CFLAGS', ['/TP'])
else:
flag = '-std=%s' % lang
conf.check(cflags=['-Werror', flag],
msg="Checking for flag '%s'" % flag)
conf.env.append_unique('CFLAGS', [flag])
def set_cxx_lang(conf, lang):
"Set a specific C++ language standard, like 'c++11', 'c++14', or 'c++17'"
if conf.env.MSVC_COMPILER:
if lang != 'c++14':
lang = 'c++latest'
conf.env.append_unique('CXXFLAGS', ['/std:%s' % lang])
else:
flag = '-std=%s' % lang
conf.check(cxxflags=['-Werror', flag],
msg="Checking for flag '%s'" % flag)
conf.env.append_unique('CXXFLAGS', [flag])
def set_modern_c_flags(conf):
"Use the most modern C language available"
if 'COMPILER_CC' in conf.env:
if conf.env.MSVC_COMPILER:
# MSVC has no hope or desire to compile C99, just compile as C++
conf.env.append_unique('CFLAGS', ['/TP'])
else:
for flag in ['-std=c11', '-std=c99']:
if conf.check(cflags=['-Werror', flag], mandatory=False,
msg="Checking for flag '%s'" % flag):
conf.env.append_unique('CFLAGS', [flag])
break
def set_modern_cxx_flags(conf, mandatory=False):
"Use the most modern C++ language available"
if 'COMPILER_CXX' in conf.env:
if conf.env.MSVC_COMPILER:
conf.env.append_unique('CXXFLAGS', ['/std:c++latest'])
else:
for lang in ['c++14', 'c++1y', 'c++11', 'c++0x']:
flag = '-std=%s' % lang
if conf.check(cxxflags=['-Werror', flag], mandatory=False,
msg="Checking for flag '%s'" % flag):
conf.env.append_unique('CXXFLAGS', [flag])
break
def set_local_lib(conf, name, has_objects):
var_name = 'HAVE_' + nameify(name.upper())
define(conf, var_name, 1)
if has_objects:
if type(conf.env['AUTOWAF_LOCAL_LIBS']) != dict:
conf.env['AUTOWAF_LOCAL_LIBS'] = {}
conf.env['AUTOWAF_LOCAL_LIBS'][name.lower()] = True
else:
if type(conf.env['AUTOWAF_LOCAL_HEADERS']) != dict:
conf.env['AUTOWAF_LOCAL_HEADERS'] = {}
conf.env['AUTOWAF_LOCAL_HEADERS'][name.lower()] = True
def append_property(obj, key, val):
if hasattr(obj, key):
setattr(obj, key, getattr(obj, key) + val)
else:
setattr(obj, key, val)
def use_lib(bld, obj, libs):
abssrcdir = os.path.abspath('.')
libs_list = libs.split()
for l in libs_list:
in_headers = l.lower() in bld.env['AUTOWAF_LOCAL_HEADERS']
in_libs = l.lower() in bld.env['AUTOWAF_LOCAL_LIBS']
if in_libs:
append_property(obj, 'use', ' lib%s ' % l.lower())
append_property(obj, 'framework', bld.env['FRAMEWORK_' + l])
if in_headers or in_libs:
if bld.env.MSVC_COMPILER:
inc_flag = '/I' + os.path.join(abssrcdir, l.lower())
else:
inc_flag = '-iquote ' + os.path.join(abssrcdir, l.lower())
for f in ['CFLAGS', 'CXXFLAGS']:
if inc_flag not in bld.env[f]:
bld.env.prepend_value(f, inc_flag)
else:
append_property(obj, 'uselib', ' ' + l)
@feature('c', 'cxx')
@before('apply_link')
def version_lib(self):
if self.env.DEST_OS == 'win32':
self.vnum = None # Prevent waf from automatically appending -0
if self.env['PARDEBUG']:
applicable = ['cshlib', 'cxxshlib', 'cstlib', 'cxxstlib']
if [x for x in applicable if x in self.features]:
self.target = self.target + 'D'
def set_lib_env(conf, name, version):
"Set up environment for local library as if found via pkg-config."
NAME = name.upper()
major_ver = version.split('.')[0]
pkg_var_name = 'PKG_' + name.replace('-', '_') + '_' + major_ver
lib_name = '%s-%s' % (name, major_ver)
if conf.env.PARDEBUG:
lib_name += 'D'
conf.env[pkg_var_name] = lib_name
conf.env['INCLUDES_' + NAME] = ['${INCLUDEDIR}/%s-%s' % (name, major_ver)]
conf.env['LIBPATH_' + NAME] = [conf.env.LIBDIR]
conf.env['LIB_' + NAME] = [lib_name]
conf.define(NAME + '_VERSION', version)
def set_line_just(conf, width):
global line_just
line_just = max(line_just, width)
conf.line_just = line_just
def display_header(title):
global g_is_child
if g_is_child:
Logs.pprint('BOLD', title)
def display_msg(conf, msg, status=None, color=None):
color = 'CYAN'
if type(status) == bool and status:
color = 'GREEN'
status = 'yes'
elif type(status) == bool and not status or status == "False":
color = 'YELLOW'
status = 'no'
Logs.pprint('BOLD', '%s' % msg.ljust(conf.line_just), sep='')
Logs.pprint('BOLD', ":", sep='')
Logs.pprint(color, status)
def display_msgs(conf, msgs):
for k, v in msgs.items():
display_msg(conf, k, v)
def link_flags(env, lib):
return ' '.join(map(lambda x: env['LIB_ST'] % x,
env['LIB_' + lib]))
def compile_flags(env, lib):
return ' '.join(map(lambda x: env['CPPPATH_ST'] % x,
env['INCLUDES_' + lib]))
def set_recursive():
global g_is_child
g_is_child = True
def is_child():
global g_is_child
return g_is_child
def build_pc(bld, name, version, version_suffix, libs, subst_dict={}):
"""Build a pkg-config file for a library.
name -- uppercase variable name (e.g. 'SOMENAME')
version -- version string (e.g. '1.2.3')
version_suffix -- name version suffix (e.g. '2')
libs -- string/list of dependencies (e.g. 'LIBFOO GLIB')
"""
pkg_prefix = bld.env['PREFIX']
if pkg_prefix[-1] == '/':
pkg_prefix = pkg_prefix[:-1]
target = name.lower()
if version_suffix != '':
target += '-' + version_suffix
if bld.env['PARDEBUG']:
target += 'D'
target += '.pc'
libdir = bld.env['LIBDIR']
if libdir.startswith(pkg_prefix):
libdir = libdir.replace(pkg_prefix, '${exec_prefix}')
includedir = bld.env['INCLUDEDIR']
if includedir.startswith(pkg_prefix):
includedir = includedir.replace(pkg_prefix, '${prefix}')
obj = bld(features='subst',
source='%s.pc.in' % name.lower(),
target=target,
install_path=os.path.join(bld.env['LIBDIR'], 'pkgconfig'),
exec_prefix='${prefix}',
PREFIX=pkg_prefix,
EXEC_PREFIX='${prefix}',
LIBDIR=libdir,
INCLUDEDIR=includedir)
if type(libs) != list:
libs = libs.split()
subst_dict[name + '_VERSION'] = version
subst_dict[name + '_MAJOR_VERSION'] = version[0:version.find('.')]
for i in libs:
subst_dict[i + '_LIBS'] = link_flags(bld.env, i)
lib_cflags = compile_flags(bld.env, i)
if lib_cflags == '':
lib_cflags = ' '
subst_dict[i + '_CFLAGS'] = lib_cflags
obj.__dict__.update(subst_dict)
def build_dir(name, subdir):
if is_child():
return os.path.join('build', name, subdir)
else:
return os.path.join('build', subdir)
def make_simple_dox(name):
"Clean up messy Doxygen documentation after it is built"
name = name.lower()
NAME = name.upper()
try:
top = os.getcwd()
os.chdir(build_dir(name, 'doc/html'))
page = 'group__%s.html' % name
if not os.path.exists(page):
return
for i in [
['%s_API ' % NAME, ''],
['%s_DEPRECATED ' % NAME, ''],
['group__%s.html' % name, ''],
[' ', ''],
[r'<script.*><\/script>', ''],
[r'<hr\/><a name="details" id="details"><\/a><h2>.*<\/h2>', ''],
[r'<link href=\"tabs.css\" rel=\"stylesheet\" type=\"text\/css\"\/>',
''],
[r'<img class=\"footer\" src=\"doxygen.png\" alt=\"doxygen\"\/>',
'Doxygen']]:
os.system("sed -i 's/%s/%s/g' %s" % (i[0], i[1], page))
os.rename('group__%s.html' % name, 'index.html')
for i in (glob.glob('*.png') +
glob.glob('*.html') +
glob.glob('*.js') +
glob.glob('*.css')):
if i != 'index.html' and i != 'style.css':
os.remove(i)
os.chdir(top)
os.chdir(build_dir(name, 'doc/man/man3'))
for i in glob.glob('*.3'):
os.system("sed -i 's/%s_API //' %s" % (NAME, i))
for i in glob.glob('_*'):
os.remove(i)
os.chdir(top)
except Exception as e:
Logs.error("Failed to fix up %s documentation: %s" % (name, e))
def build_dox(bld, name, version, srcdir, blddir, outdir='', versioned=True):
"""Build Doxygen API documentation"""
if not bld.env['DOCS']:
return
# Doxygen paths in are relative to the doxygen file, not build directory
if is_child():
src_dir = os.path.join(srcdir, name.lower())
else:
src_dir = srcdir
subst_tg = bld(features='subst',
source='doc/reference.doxygen.in',
target='doc/reference.doxygen',
install_path='',
name='doxyfile')
subst_dict = {
name + '_VERSION': version,
name + '_SRCDIR': os.path.abspath(src_dir),
name + '_DOC_DIR': ''
}
subst_tg.__dict__.update(subst_dict)
subst_tg.post()
docs = bld(features='doxygen',
doxyfile='doc/reference.doxygen')
docs.post()
outname = name.lower()
if versioned:
outname += '-%d' % int(version[0:version.find('.')])
bld.install_files(
os.path.join('${DOCDIR}', outname, outdir, 'html'),
bld.path.get_bld().ant_glob('doc/html/*'))
for i in range(1, 8):
bld.install_files('${MANDIR}/man%d' % i,
bld.path.get_bld().ant_glob('doc/man/man%d/*' % i,
excl='**/_*'))
def build_version_files(header_path, source_path, domain, major, minor, micro):
"""Generate version code header"""
header_path = os.path.abspath(header_path)
source_path = os.path.abspath(source_path)
text = "int " + domain + "_major_version = " + str(major) + ";\n"
text += "int " + domain + "_minor_version = " + str(minor) + ";\n"
text += "int " + domain + "_micro_version = " + str(micro) + ";\n"
try:
o = open(source_path, 'w')
o.write(text)
o.close()
except IOError:
Logs.error('Failed to open %s for writing\n' % source_path)
sys.exit(-1)
text = "#ifndef __" + domain + "_version_h__\n"
text += "#define __" + domain + "_version_h__\n"
text += "extern const char* " + domain + "_revision;\n"
text += "extern int " + domain + "_major_version;\n"
text += "extern int " + domain + "_minor_version;\n"
text += "extern int " + domain + "_micro_version;\n"
text += "#endif /* __" + domain + "_version_h__ */\n"
try:
o = open(header_path, 'w')
o.write(text)
o.close()
except IOError:
Logs.warn('Failed to open %s for writing\n' % header_path)
sys.exit(-1)
return None
def build_i18n_pot(bld, srcdir, dir, name, sources, copyright_holder=None):
Logs.info('Generating pot file from %s' % name)
pot_file = '%s.pot' % name
cmd = ['xgettext',
'--keyword=_',
'--keyword=N_',
'--keyword=S_',
'--from-code=UTF-8',
'-o', pot_file]
if copyright_holder:
cmd += ['--copyright-holder="%s"' % copyright_holder]
cmd += sources
Logs.info('Updating ' + pot_file)
subprocess.call(cmd, cwd=os.path.join(srcdir, dir))
def build_i18n_po(bld, srcdir, dir, name, sources, copyright_holder=None):
pwd = os.getcwd()
os.chdir(os.path.join(srcdir, dir))
pot_file = '%s.pot' % name
po_files = glob.glob('po/*.po')
for po_file in po_files:
cmd = ['msgmerge',
'--update',
po_file,
pot_file]
Logs.info('Updating ' + po_file)
subprocess.call(cmd)
os.chdir(pwd)
def build_i18n_mo(bld, srcdir, dir, name, sources, copyright_holder=None):
pwd = os.getcwd()
os.chdir(os.path.join(srcdir, dir))
po_files = glob.glob('po/*.po')
for po_file in po_files:
mo_file = po_file.replace('.po', '.mo')
cmd = ['msgfmt',
'-c',
'-f',
'-o',
mo_file,
po_file]
Logs.info('Generating ' + po_file)
subprocess.call(cmd)
os.chdir(pwd)
def build_i18n(bld, srcdir, dir, name, sources, copyright_holder=None):
build_i18n_pot(bld, srcdir, dir, name, sources, copyright_holder)
build_i18n_po(bld, srcdir, dir, name, sources, copyright_holder)
build_i18n_mo(bld, srcdir, dir, name, sources, copyright_holder)
def cd_to_build_dir(ctx, appname):
top_level = (len(ctx.stack_path) > 1)
if top_level:
os.chdir(os.path.join('build', appname))
else:
os.chdir('build')
def cd_to_orig_dir(ctx, child):
if child:
os.chdir(os.path.join('..', '..'))
else:
os.chdir('..')
def bench_time():
if hasattr(time, 'perf_counter'): # Added in Python 3.3
return time.perf_counter()
else:
return time.time()
def pre_test(ctx, appname, dirs=['src']):
Logs.pprint('GREEN', '\n[==========] Running %s tests' % appname)
if not hasattr(ctx, 'autowaf_tests_total'):
ctx.autowaf_tests_start_time = bench_time()
ctx.autowaf_tests_total = 0
ctx.autowaf_tests_failed = 0
ctx.autowaf_local_tests_total = 0
ctx.autowaf_local_tests_failed = 0
ctx.autowaf_tests = {}
ctx.autowaf_tests[appname] = {'total': 0, 'failed': 0}
cd_to_build_dir(ctx, appname)
if not ctx.env.NO_COVERAGE:
diropts = ''
for i in dirs:
diropts += ' -d ' + i
clear_log = open('lcov-clear.log', 'w')
try:
try:
# Clear coverage data
subprocess.call(('lcov %s -z' % diropts).split(),
stdout=clear_log, stderr=clear_log)
except Exception:
Logs.warn('Failed to run lcov, no coverage report generated')
finally:
clear_log.close()
class TestFailed(Exception):
pass
def post_test(ctx, appname, dirs=['src'], remove=['*boost*', 'c++*']):
if not ctx.env.NO_COVERAGE:
diropts = ''
for i in dirs:
diropts += ' -d ' + i
coverage_log = open('lcov-coverage.log', 'w')
coverage_lcov = open('coverage.lcov', 'w')
coverage_stripped_lcov = open('coverage-stripped.lcov', 'w')
try:
try:
base = '.'
if g_is_child:
base = '..'
# Generate coverage data
lcov_cmd = 'lcov -c %s -b %s' % (diropts, base)
if ctx.env.LLVM_COV:
lcov_cmd += ' --gcov-tool %s' % ctx.env.LLVM_COV[0]
subprocess.call(lcov_cmd.split(),
stdout=coverage_lcov, stderr=coverage_log)
# Strip unwanted stuff
subprocess.call(
['lcov', '--remove', 'coverage.lcov'] + remove,
stdout=coverage_stripped_lcov, stderr=coverage_log)
# Generate HTML coverage output
if not os.path.isdir('coverage'):
os.makedirs('coverage')
subprocess.call(
'genhtml -o coverage coverage-stripped.lcov'.split(),
stdout=coverage_log, stderr=coverage_log)
except Exception:
Logs.warn('Failed to run lcov, no coverage report generated')
finally:
coverage_stripped_lcov.close()
coverage_lcov.close()
coverage_log.close()
duration = (bench_time() - ctx.autowaf_tests_start_time) * 1000.0
total_tests = ctx.autowaf_tests[appname]['total']
failed_tests = ctx.autowaf_tests[appname]['failed']
passed_tests = total_tests - failed_tests
Logs.pprint('GREEN', '\n[==========] %d tests from %s ran (%d ms total)' % (
total_tests, appname, duration))
if not ctx.env.NO_COVERAGE:
Logs.pprint('GREEN', '[----------] Coverage: <file://%s>'
% os.path.abspath('coverage/index.html'))
Logs.pprint('GREEN', '[ PASSED ] %d tests' % passed_tests)
if failed_tests > 0:
Logs.pprint('RED', '[ FAILED ] %d tests' % failed_tests)
raise TestFailed('Tests from %s failed' % appname)
Logs.pprint('', '')
top_level = (len(ctx.stack_path) > 1)
if top_level:
cd_to_orig_dir(ctx, top_level)
def run_test(ctx,
appname,
test,
desired_status=0,
dirs=['src'],
name='',
header=False,
quiet=False):
"""Run an individual test.
`test` is either a shell command string, or a list of [name, return status]
for displaying tests implemented in the calling Python code.
"""
ctx.autowaf_tests_total += 1
ctx.autowaf_local_tests_total += 1
ctx.autowaf_tests[appname]['total'] += 1
out = (None, None)
if type(test) == list:
name = test[0]
returncode = test[1]
elif callable(test):
returncode = test()
else:
s = test
if isinstance(test, type([])):
s = ' '.join(test)
if header and not quiet:
Logs.pprint('Green', '\n[ RUN ] %s' % s)
cmd = test
if Options.options.test_wrapper:
cmd = Options.options.test_wrapper + ' ' + test
if name == '':
name = test
proc = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out = proc.communicate()
returncode = proc.returncode
success = desired_status is None or returncode == desired_status
if success:
if not quiet:
Logs.pprint('GREEN', '[ OK ] %s' % name)
else:
Logs.pprint('RED', '[ FAILED ] %s' % name)
ctx.autowaf_tests_failed += 1
ctx.autowaf_local_tests_failed += 1
ctx.autowaf_tests[appname]['failed'] += 1
if type(test) != list and not callable(test):
Logs.pprint('RED', test)
if Options.options.verbose and type(test) != list and not callable(test):
sys.stdout.write(out[0].decode('utf-8'))
sys.stderr.write(out[1].decode('utf-8'))
return (success, out)
def tests_name(ctx, appname, name='*'):
if name == '*':
return appname
else:
return '%s.%s' % (appname, name)
def begin_tests(ctx, appname, name='*'):
ctx.autowaf_local_tests_failed = 0
ctx.autowaf_local_tests_total = 0
ctx.autowaf_local_tests_start_time = bench_time()
Logs.pprint('GREEN', '\n[----------] %s' % (
tests_name(ctx, appname, name)))
class Handle:
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
end_tests(ctx, appname, name)
return Handle()
def end_tests(ctx, appname, name='*'):
duration = (bench_time() - ctx.autowaf_local_tests_start_time) * 1000.0
total = ctx.autowaf_local_tests_total
failures = ctx.autowaf_local_tests_failed
if failures == 0:
Logs.pprint('GREEN', '[----------] %d tests from %s (%d ms total)' % (
ctx.autowaf_local_tests_total, tests_name(ctx, appname, name), duration))
else:
Logs.pprint('RED', '[----------] %d/%d tests from %s (%d ms total)' % (
total - failures, total, tests_name(ctx, appname, name), duration))
def run_tests(ctx,
appname,
tests,
desired_status=0,
dirs=['src'],
name='*',
headers=False):
begin_tests(ctx, appname, name)
diropts = ''
for i in dirs:
diropts += ' -d ' + i
for i in tests:
run_test(ctx, appname, i, desired_status, dirs, i, headers)
end_tests(ctx, appname, name)
def run_ldconfig(ctx):
should_run = (ctx.cmd == 'install' and
not ctx.env['RAN_LDCONFIG'] and
ctx.env['LIBDIR'] and
'DESTDIR' not in os.environ and
not Options.options.destdir)
if should_run:
try:
Logs.info("Waf: Running `/sbin/ldconfig %s'" % ctx.env['LIBDIR'])
subprocess.call(['/sbin/ldconfig', ctx.env['LIBDIR']])
ctx.env['RAN_LDCONFIG'] = True
except Exception:
pass
def get_rdf_news(name,
in_files,
top_entries=None,
extra_entries=None,
dev_dist=None):
import rdflib
from time import strptime
doap = rdflib.Namespace('http://usefulinc.com/ns/doap#')
dcs = rdflib.Namespace('http://ontologi.es/doap-changeset#')
rdfs = rdflib.Namespace('http://www.w3.org/2000/01/rdf-schema#')
foaf = rdflib.Namespace('http://xmlns.com/foaf/0.1/')
rdf = rdflib.Namespace('http://www.w3.org/1999/02/22-rdf-syntax-ns#')
m = rdflib.ConjunctiveGraph()
try:
for i in in_files:
m.parse(i, format='n3')
except Exception:
Logs.warn('Error parsing data, unable to generate NEWS')
return
proj = m.value(None, rdf.type, doap.Project)
for f in m.triples([proj, rdfs.seeAlso, None]):
if f[2].endswith('.ttl'):
m.parse(f[2], format='n3')
entries = {}
for r in m.triples([proj, doap.release, None]):
release = r[2]
revision = m.value(release, doap.revision, None)
date = m.value(release, doap.created, None)
blamee = m.value(release, dcs.blame, None)
changeset = m.value(release, dcs.changeset, None)
dist = m.value(release, doap['file-release'], None)
if not dist:
Logs.warn('No file release for %s %s' % (proj, revision))
dist = dev_dist
if revision and date and blamee and changeset:
entry = {}
entry['name'] = str(name)
entry['revision'] = str(revision)
entry['date'] = strptime(str(date), '%Y-%m-%d')
entry['status'] = 'stable' if dist != dev_dist else 'unstable'
entry['dist'] = str(dist)
entry['items'] = []
for i in m.triples([changeset, dcs.item, None]):
item = str(m.value(i[2], rdfs.label, None))
entry['items'] += [item]
if dist and top_entries is not None:
if not str(dist) in top_entries:
top_entries[str(dist)] = {'items': []}
top_entries[str(dist)]['items'] += [
'%s: %s' % (name, item)]
if extra_entries and dist:
for i in extra_entries[str(dist)]:
entry['items'] += extra_entries[str(dist)]['items']
entry['blamee_name'] = str(m.value(blamee, foaf.name, None))
entry['blamee_mbox'] = str(m.value(blamee, foaf.mbox, None))
entries[(str(date), str(revision))] = entry
else:
Logs.warn('Ignored incomplete %s release description' % name)
return entries
def write_news(entries, out_file):
import textwrap
from time import strftime
if len(entries) == 0:
return
news = open(out_file, 'w')
for e in sorted(entries.keys(), reverse=True):
entry = entries[e]
news.write('%s (%s) %s;\n' % (entry['name'], entry['revision'], entry['status']))
for item in entry['items']:
wrapped = textwrap.wrap(item, width=79)
news.write('\n * ' + '\n '.join(wrapped))
news.write('\n\n --')
news.write(' %s <%s>' % (entry['blamee_name'],
entry['blamee_mbox'].replace('mailto:', '')))
news.write(' %s\n\n' % (
strftime('%a, %d %b %Y %H:%M:%S +0000', entry['date'])))
news.close()
def write_posts(entries, meta, out_dir, status='stable'):
"write news posts in Pelican Markdown format"
from time import strftime
try:
os.mkdir(out_dir)
except Exception:
pass
for i in entries:
entry = entries[i]
revision = i[1]
if entry['status'] != status:
continue
date_str = strftime('%Y-%m-%d', entry['date'])
datetime_str = strftime('%Y-%m-%d %H:%M', entry['date'])
path = os.path.join(out_dir, '%s-%s-%s.md' % (
date_str, entry['name'], revision.replace('.', '-')))
post = open(path, 'w')
title = entry['title'] if 'title' in entry else entry['name']
post.write('Title: %s %s\n' % (title, revision))
post.write('Date: %s\n' % datetime_str)
post.write('Slug: %s-%s\n' % (entry['name'], revision.replace('.', '-')))
for k in meta:
post.write('%s: %s\n' % (k, meta[k]))
post.write('\n')
url = entry['dist']
if entry['status'] == status:
post.write('[%s %s](%s) has been released.' % (
(entry['name'], revision, url)))
if 'description' in entry:
post.write(' ' + entry['description'])
post.write('\n')
if (len(entry['items']) > 0 and
not (len(entry['items']) == 1 and
entry['items'][0] == 'Initial release')):
post.write('\nChanges:\n\n')
for i in entry['items']:
post.write(' * %s\n' % i)
post.close()
def get_blurb(in_file):
"Get the first paragram of a Markdown formatted file, skipping the title"
f = open(in_file, 'r')
f.readline() # Title
f.readline() # Title underline
f.readline() # Blank
out = ''
line = f.readline()
while len(line) > 0 and line != '\n':
out += line.replace('\n', ' ')
line = f.readline()
return out.strip()
def get_news(in_file, entry_props={}):
"""Get NEWS entries in the format expected by write_posts().
Properties that should be set on every entry can be passed in
`entry_props`. If `entry_props` has a 'dist_pattern' value, it is used to
set the 'dist' entry of entries by substituting the version number.
"""
import re
import rfc822
f = open(in_file, 'r')
entries = {}
while True:
# Read header line
head = f.readline()
matches = re.compile(r'([^ ]*) \((.*)\) ([a-zA-z]*);').match(head)
if matches is None:
break
entry = {}
entry['name'] = matches.group(1)
entry['revision'] = matches.group(2)
entry['status'] = matches.group(3)
entry['items'] = []
if 'dist_pattern' in entry_props:
entry['dist'] = entry_props['dist_pattern'] % entry['revision']
# Read blank line after header
if f.readline() != '\n':
raise SyntaxError('expected blank line after NEWS header')
def add_item(item):
if len(item) > 0:
entry['items'] += [item.replace('\n', ' ').strip()]
# Read entries for this revision
item = ''
line = ''
while line != '\n':
line = f.readline()
if line.startswith(' * '):
add_item(item)
item = line[3:].lstrip()
else:
item += line.lstrip()
add_item(item)
# Read footer line
foot = f.readline()
matches = re.compile(' -- (.*) <(.*)> (.*)').match(foot)
entry['date'] = rfc822.parsedate(matches.group(3))
entry['blamee_name'] = matches.group(1)
entry['blamee_mbox'] = matches.group(2)
entry.update(entry_props)
entries[(entry['date'], entry['revision'])] = entry
# Skip trailing blank line before next entry
f.readline()
f.close()
return entries
def news_to_posts(news_file, entry_props, post_meta, default_post_dir):
post_dir = os.getenv('POST_DIR')
if not post_dir:
post_dir = default_post_dir
sys.stderr.write('POST_DIR not set in environment, writing to %s\n' % post_dir)
else:
sys.stderr.write('writing posts to %s\n' % post_dir)
entries = get_news(news_file, entry_props)
write_posts(entries, post_meta, post_dir)
def run_script(cmds):
for cmd in cmds:
subprocess.check_call(cmd, shell=True)
def release(name, version, dist_name=None):
if dist_name is None:
dist_name = name.lower()
dist = '%s-%s.tar.bz2' % (dist_name or name.lower(), version)
try:
os.remove(dist)
os.remove(dist + '.sig')
except Exception:
pass
status = subprocess.check_output('git status --porcelain', shell=True)
if status:
Logs.error('error: git working copy is dirty\n' + status)
raise Exception('git working copy is dirty')
head = subprocess.check_output('git show -s --oneline', shell=True)
head_summary = head[8:].strip().lower()
expected_summary = '%s %s' % (name.lower(), version)
if head_summary != expected_summary:
raise Exception('latest commit "%s" does not match "%s"' % (
head_summary, expected_summary))
run_script(['./waf configure --docs',
'./waf',
'./waf distcheck',
'./waf posts',
'gpg -b %s' % dist,
'git tag -s v%s -m "%s %s"' % (version, name, version)])
``` |
{
"source": "johneastman/Project-Eyebot",
"score": 3
} |
#### File: Project-Eyebot/image_classification/data_collection.py
```python
import win32api
import cv2
import numpy as np
from PIL import ImageGrab
import time
import sys
import os
from collections import OrderedDict
import numpy as np
import direct_keys # direct_keys.py
sys.path.insert(0, "..")
BOUNDING_BOX = (8, 32, 808, 482)
PAUSE_KEY = "P" # <-- Set the key that pauses the program here
SHOW_WINDOW = False
PAUSED = True
def is_key_pressed(key):
"""Check if a key is pressed."""
return win32api.GetAsyncKeyState(ord(key)) != 0
def capture_screen(bounding_box):
"""Capture the screen and process the image."""
screen = np.array(ImageGrab.grab(bbox=bounding_box))
screen = cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY)
screen = cv2.resize(screen, (75, 133))
return screen
def get_current_checkpoint():
"""Get the current checkpoint value.
If recording happens over multiple sessions, this ensures that previous
data will not get overwritten.
"""
checkpoints = [file for file in os.listdir("checkpoints/") if file.endswith(".npy")]
if len(checkpoints) == 0:
# No checkpoint files found. Start at 0.
return 0
# Get the id of each checkpoint and return the largest one plus 1 (for
# the new checkpoint).
checkpoints = [checkpoint.rsplit(".", 1)[0] for checkpoint in checkpoints]
max_checkpoint = max(int(checkpoint.rsplit("_", 1)[1]) for checkpoint in checkpoints)
return max_checkpoint + 1
class Output:
"""This formats output hot-key arrays for each keys in the input."""
def __init__(self, keys):
"""Initialize hot-key arrays for each key in 'keys'.
Each array is the length of 'keys'. In each array, only one "bit" can
be on for each, which is determined by matching the key with its index
in the array of keys. Example:
keys = ["W", "S", "A", "D", "NIL"]
# The index of "W" is 0, so the zeroth element in the array
# for W will be 1, and the remaining elements will be 0.
W = [1, 0, 0, 0, 0]
# The index of "S" is 1, so the second element in the array
# for S will be 1, and the remaining elements will be 0.
S = [0, 1, 0, 0, 0]
"""
self.keys = keys
self.num_keys = len(keys)
# The order with which keys are pressed is important.
self.keys_output = OrderedDict()
for i, key in enumerate(keys):
self.keys_output[key] = [int(i == keys.index(key))
for i in range(self.num_keys)]
def get_output(self):
"""Return the key-mouse pair that is active."""
# Get the direction the mouse is moving (left, right, or not moving)
mouse_direction = direct_keys.get_mouse_direction()
for (key, mouse_dir), value in self.keys_output.items():
# 'input' is a tuple, where the first element is a key and the
# second element is a mouse direction ("left", "right", or "none")
try:
# Find the key-direction pair that matches the currently-pressed
# key and the current mouse direction and return the associated
# hot-key array.
if is_key_pressed(key) and mouse_dir == mouse_direction:
return value
# NIL is not a key; it denotes that no key was pressed. Because this
# implementation of 'is_key_pressed' required characters, a type
# error will be rasied with 'NIL' is passed as a parameter.
except TypeError:
# Second element in 'input' is mouse direction
return self.keys_output[("NIL", mouse_direction)]
if __name__ == "__main__":
# Pair each input (keys, mouse) combination with a one-hot array
input_combinations = [("W", "right"), ("W", "left"), ("W", "none"),
("A", "right"), ("A", "left"), ("A", "none"),
("S", "right"), ("S", "left"), ("S", "none"),
("D", "right"), ("D", "left"), ("D", "none"),
("NIL", "right"), ("NIL", "left"), ("NIL", "none")]
output_object = Output(input_combinations)
training_data = []
checkpoint = get_current_checkpoint()
print(checkpoint)
filename_template = "checkpoints/navigation_training_data_{}.npy"
filename = filename_template.format(checkpoint)
print(f"Program is paused. Press {PAUSE_KEY} to begin")
while True:
# Pause the script from recording
if is_key_pressed(PAUSE_KEY):
PAUSED = not PAUSED
if PAUSED:
print("PAUSED")
else:
print("UNPAUSED")
time.sleep(1)
if not PAUSED:
screen = capture_screen(BOUNDING_BOX)
output = output_object.get_output()
training_data.append([screen, output])
# Save a checkpoint of the data
if len(training_data) == 1000:
print(f"saving {filename} to disk...", end="")
np.save(filename, training_data)
training_data = []
checkpoint += 1
# Update the filename for the next checkpoint file
filename = filename_template.format(checkpoint)
print("done")
# Only show what has been recorded if this setting is "True"
if SHOW_WINDOW:
cv2.imshow("window", screen)
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
break
``` |
{
"source": "johnebehr/tseu_sandbox",
"score": 2
} |
#### File: app/util/settings.py
```python
import os
class Settings():
PROJECT_TITLE: str = "TSEU Sandbox Application"
PROJECT_VERSION: str = "0.0.1"
# DATABASE_DRIVER = os.environ["DATABASE_DRIVER"]
# DATABASE_USERNAME = os.environ["DATABASE_USERNAME"]
# DATABASE_HOST = os.environ["DATABASE_HOST"]
# DATABASE_PASSWORD = os.environ["DATABASE_PASSWORD"]
# DATABASE_NAME = os.environ["DATABASE_NAME"]
def __init__(self) -> None:
self.project_title: str = "TSEU Sandbox Application"
self.project_version: str = "0.0.1"
self.database_driver: str = os.environ["DATABASE_DRIVER"]
self.database_username: str = os.environ["DATABASE_USERNAME"]
self.database_host: str = os.environ["DATABASE_HOST"]
self.database_password: str = os.environ["DATABASE_PASSWORD"]
self.database_name: str = os.environ["DATABASE_NAME"]
def get_alchemy_dict(self) -> dict:
alchemy_dict = {
"drivername": self.database_driver,
"username": self.database_username,
"host": self.database_host,
"password":<PASSWORD>,
"database":self.database_name
}
return alchemy_dict
# def get_alchemy_dict(self) -> dict:
# alchemy_dict = {
# "drivername": self.DATABASE_DRIVER,
# "username": self.DATABASE_USERNAME,
# "host": self.DATABASE_HOST,
# "password":<PASSWORD>.DATABASE_PASSWORD,
# "database":self.DATABASE_NAME
# }
# return alchemy_dict
``` |
{
"source": "johneblake/crepuscular",
"score": 3
} |
#### File: crepuscular/reader/etf_reader.py
```python
import os
import csv
import requests
def read_lines(csv_lines):
"""read lines from csv file"""
items = []
count = 0
for line in csv_lines:
if count > 0:
if line:
items.append((line[0], line[1]))
count += 1
return items
def get_etf():
"""Download csv from nasdaq of etf or read existing file"""
if os.path.exists("etf.csv"):
with open("etf.csv", "r") as csvfile:
csv_lines = csv.reader(csvfile)
return read_lines(csv_lines)
else:
download = requests.get("http://www.nasdaq.com/investing/etfs/etf-finder-results.aspx?download=Yes")
decoded_content = download.content.decode(download.encoding)
with open('etf.csv','w') as file:
file.write(decoded_content)
csv_lines = csv.reader(decoded_content.splitlines())
return read_lines(csv_lines)
``` |
{
"source": "johned0/EdwardsLab",
"score": 3
} |
#### File: EdwardsLab/AuthorInformation/parse_addresses.py
```python
import os
import sys
import argparse
from author import Author, Address
import operator
from collections import OrderedDict
def parse_file(filename):
"""
parse a file and create a set of authors
:param filename: file to parse
:return: set of author elements
"""
authors = set()
firstline = True # ignore the first line
with open(filename, 'r') as f:
for l in f:
if firstline:
firstline = False
continue
p = l.rstrip().split("\t")
if len(p) < 15:
sys.stderr.write("ERROR: Malformed: {}\t{}\n".format(len(p), p))
continue
auth = Author(p[2])
try:
if p[1]:
auth.orcid = p[1]
if p[3]:
auth.lastname = p[3]
auth.lastnamelower = p[3].lower()
if p[4]:
auth.firstname = p[4]
auth.firstnamelower = p[4].lower()
if p[5]:
auth.middleinitial = p[5]
if p[6]:
auth.email = p[6].replace(' ', '')
if p[14]:
auth.order = int(p[14])
if p[15]:
auth.contribution = p[15]
primary = Address()
if p[7]:
primary.department = p[7]
if p[8]:
primary.institution = p[8]
if p[9]:
primary.street = p[9]
if p[10]:
primary.city = p[10]
if p[11]:
primary.state = p[11]
if p[12]:
primary.zip = p[12]
if p[13]:
primary.country = p[13]
auth.primaryaddress = primary
secondary = Address()
if len(p) > 17 and p[17]:
secondary.department = p[17]
if len(p) > 18 and p[18]:
secondary.institution = p[18]
if len(p) > 19 and p[19]:
secondary.street = p[19]
if len(p) > 20 and p[20]:
secondary.city = p[20]
if len(p) > 21 and p[21]:
secondary.state = p[21]
if len(p) > 22 and p[22]:
secondary.zip = p[22]
if len(p) > 23 and p[23]:
secondary.country = p[23]
if secondary.is_valid():
auth.secondaryaddress = secondary
except Exception as err:
sys.stderr.write("Error parsing {}: {}\n".format(p[2], err))
continue
authors.add(auth)
return authors
def test_validity(authors):
"""
Test whether the author information is valid
:param authors: the set of authors
:return: nothing
"""
abbs = set()
for a in authors:
if a.abbreviation.lower() in abbs:
sys.stderr.write("FATAL: {} is a duplicate abbrevation\n".format(a.abbreviation))
abbs.add(a.abbreviation.lower())
if not a.is_valid():
a.verbose = True
print("{}\t{}\t{}".format(a.abbreviation, a.get_name(), a.is_valid()))
def check_spellings(authors):
"""
Check for potential misspellings based on institutions, departments, and zip codes
:param authors: the list of authors
:return:
"""
# find the set of different addresses
addresses = {}
allinst = {}
allzip = {}
for a in sorted(authors, key=operator.attrgetter('order', 'lastnamelower', 'firstnamelower')):
pa = a.primaryaddress.get_address()
if pa not in addresses:
addresses[pa] = {}
addresses[pa]['institution'] = a.primaryaddress.institution
if a.primaryaddress.institution not in allinst:
allinst[a.primaryaddress.institution] = 1
else:
allinst[a.primaryaddress.institution] += 1
addresses[pa]['zip'] = a.primaryaddress.zip
if a.primaryaddress.zip not in allzip:
allzip[a.primaryaddress.zip] = 1
else:
allzip[a.primaryaddress.zip] += 1
if a.secondaryaddress.is_valid():
sa = a.secondaryaddress.get_address()
if sa not in addresses:
addresses[sa] = {}
addresses[sa]['institution'] = a.secondaryaddress.institution
if a.secondaryaddress.institution not in allinst:
allinst[a.secondaryaddress.institution] = 1
else:
allinst[a.secondaryaddress.institution] += 1
addresses[sa]['zip'] = a.secondaryaddress.zip
if a.secondaryaddress.zip not in allzip:
allzip[a.secondaryaddress.zip] = 1
else:
allzip[a.secondaryaddress.zip] += 1
sys.stderr.write("Duplicates by institution\n")
for i in allinst:
if not i:
continue
if allinst[i] < 2:
continue
sys.stderr.write("\n")
for a in addresses:
if addresses[a]['institution'] == i:
sys.stderr.write("{}\n".format(a))
sys.stderr.write("\n\n\nDuplicates by zip\n")
for z in allzip:
if not z:
continue
if allzip[z] < 2:
continue
sys.stderr.write("\n")
for a in addresses:
if addresses[a]['zip'] == z:
sys.stderr.write("{}\n".format(a))
def print_author_list(authors):
"""
Print the list of all authors.
:param authors: the set of authors
:return:
"""
# the list of addresses as we add them. This becomes the order
addresses = []
a: Author
sys.stdout.write("<p>")
for a in sorted(authors, key=operator.attrgetter('order', 'lastnamelower', 'firstnamelower')):
output = a.get_name()
pa = a.primaryaddress.get_address()
if pa not in addresses:
addresses.append(pa)
addidx = addresses.index(pa) + 1
if a.secondaryaddress.is_valid():
sa = a.secondaryaddress.get_address()
if sa not in addresses:
addresses.append(sa)
oidx = addresses.index(sa) + 1
addidx = "{},{}".format(addidx, oidx)
output += "<sup>{}</sup>, ".format(addidx)
sys.stdout.write(output)
sys.stdout.write("</p>\n\n\n")
for i, j in enumerate(addresses):
print("<sup>{}</sup>{}<br>".format(i+1,j))
def print_author_contributions(authors):
"""
Print the author contribution list
:param authors:
:return:
"""
contribs = OrderedDict()
a: Author
for a in sorted(authors, key=operator.attrgetter('order', 'lastnamelower', 'firstnamelower')):
c = a.contribution
thisc = ''.join([c[0].lower(), c[1:]]) # convert the first letter to lower case as it will be in a sentence
if thisc not in contribs:
contribs[thisc] = []
contribs[thisc].append(a.abbreviation)
sys.stdout.write("<p> </p><h1>Author Contributions</h1><p> </p>\n")
for c in contribs:
output = ", ".join(map(str, sorted(contribs[c])))
output += " {}".format(c)
sys.stdout.write("{}. ".format(output))
sys.stdout.write("</p>\n\n\n")
def nature_form(authors):
"""
Print a text list of authors to be cut and pasted for nature
First Name
Middle Initial
Last Name
email
Institution
City
Country
:param authors: the set of authors
:return:
"""
counter=0
for a in sorted(authors, key=operator.attrgetter('order', 'lastnamelower', 'firstnamelower')):
if None == a.middleinitial:
a.middleinitial = ""
print(counter)
print("\n".join(map(str, [a.firstname, a.middleinitial, a.lastname, a.email,
a.primaryaddress.institution, a.primaryaddress.city,
a.primaryaddress.country])))
print("\n\n")
counter+=1
def science_list(authors):
"""
Print a list of the authors for science. This is easy, first, last, email
:param authors: the set of authors
:return:
"""
for a in sorted(authors, key=operator.attrgetter('order', 'lastnamelower', 'firstnamelower')):
print("\t".join(map(str, [a.firstname, a.lastname, a.email])))
def comma_list(authors):
"""
Print a list of the authors with first MI last,
:param authors: the set of authors
:return:
"""
for a in sorted(authors, key=operator.attrgetter('order', 'lastnamelower', 'firstnamelower')):
if a.middleinitial:
print(" ".join(map(str, [a.firstname, a.middleinitial + ".", a.lastname])), end=", ")
else:
print(" ".join(map(str, [a.firstname, a.lastname])), end=", ")
def email_list(authors):
"""
Print a list suitable for emailing all colleagues.
:param authors: the set of authors
:return:
"""
for a in sorted(authors, key=operator.attrgetter('order', 'lastnamelower', 'firstnamelower')):
sys.stdout.write("{} {} <{}>, ".format(a.firstname, a.lastname, a.email))
print()
def orcid_list(authors):
"""
Print the author list in the correct order, but print their orcid
:param authors: the set of authors
:return:
"""
for a in sorted(authors, key=operator.attrgetter('order', 'lastnamelower', 'firstnamelower')):
orcid = a.orcid
nm = a.get_name()
if orcid:
print(f"{orcid}\t{nm}")
else:
sys.stderr.write("No ORCID for {}\n".format(nm))
print(nm)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Parse the author information from our google doc for the crAssphage paper")
parser.add_argument('-f', help='Google doc of author information', required=True)
parser.add_argument('-t', help="test validity of the author information", action='store_true')
parser.add_argument('-d', help='check for duplicate entries', action='store_true')
parser.add_argument('-o', help='print the author list as ORCids in the correct order', action='store_true')
parser.add_argument('-n', help='print the author list suitable for cutting and pasting to nature', action='store_true')
parser.add_argument('-s', help='print the author list to add to the science bulk upload', action='store_true')
parser.add_argument('-c', help='print the author list comma separated', action='store_true')
parser.add_argument('-e', help='print the author list to use sending emails', action='store_true')
parser.add_argument('-v', help='verbose output', action="store_true")
args = parser.parse_args()
authors = parse_file(args.f)
if args.d:
check_spellings(authors)
sys.exit(0)
if args.t:
test_validity(authors)
sys.exit(0)
if args.n:
nature_form(authors)
sys.exit(0)
if args.s:
science_list(authors)
sys.exit(0)
if args.c:
comma_list(authors)
print()
sys.exit(0)
if args.e:
email_list(authors)
sys.exit(0)
if args.o:
orcid_list(authors)
sys.exit(0)
print_author_list(authors)
print_author_contributions(authors)
```
#### File: EdwardsLab/bam/bam2fastq_paired.py
```python
import argparse
import os
import sys
import pysam
from roblib import read_fasta, bcolors
__author__ = '<NAME>'
def list2ids(listf, verbose=False):
""" Read a list of ids, one per line
:param listf: file of ids
:param verbose: more output
:return: a set of IDS
"""
if verbose:
sys.stderr.write(f"{bcolors.GREEN} Reading IDs from text file: {listf}{bcolors.ENDC}")
i=set()
with open(listf, 'r') as f:
for l in f:
i.add(l.strip().split(" ")[0])
return i
def fasta2ids(faf, verbose=False):
"""
Extract IDs from a fasta file
:param faf: fasta file
:param verbose: more output
:return: a set of IDS
"""
if verbose:
sys.stderr.write(f"{bcolors.GREEN} Reading IDs from fasta file: {faf}{bcolors.ENDC}")
f = read_fasta(faf, whole_id=False)
return set(f.keys())
def qual2fastq(quals, verbose=False):
"""
Convert a list of quality scores to a single fastq line
:param quals: A list of quality scores
:type quals: list
:return: A fastq quality string
:rtype: str
"""
quality = [chr(q + 33) for q in quals]
return "".join(quality)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert bam to fastq')
parser.add_argument('-b', help='bam file', required=True)
parser.add_argument('-l', help='file to output fastq left reads', required=True)
parser.add_argument('-r', help='file to output fastq right reads', required=True)
parser.add_argument('-i', help='list of ids (one per line) to keep')
parser.add_argument('-f', help='fasta file of sequences whose ids to keep')
parser.add_argument('-v', help='verbose output')
args = parser.parse_args()
bamfile = pysam.AlignmentFile(args.b, "rb")
i = None
if args.l:
i = list2ids(args.l, args.v)
if args.f:
if i:
i.update(fasta2ids(args.f, args.v))
else:
i = fasta2ids(args.f, args.v)
if args.v:
sys.stderr.write(f"{bcolors.BLUE} Writing to {args.l} and {args.r}{bcolors.ENDC}")
with open(args.l, 'w') as left:
with open(args.r, 'w') as right:
for read in bamfile.fetch(until_eof=True):
if i and read not in i:
continue
# construct the output string
if read.query_qualities:
ostr = "@{}\n{}\n+\n{}".format(read.query_name, read.query_sequence, qual2fastq(read.query_qualities))
else:
ostr = "@{}\n{}\n+\n".format(read.query_name, read.query_sequence)
if read.endswith("1") or read.endswith("l"):
left.write(ostr)
else:
right.write(ostr)
```
#### File: EdwardsLab/bam/fastq_pairs.py
```python
import os
import re
import sys
from roblib import sequences
def clean_fastq(file1, file2):
"""
Make a set of cleaned pairs and unpaired reads. If all the reads are paired we do not do anything
:param file1: first fastq file
:type file1: str
:param file2: second fastq file
:type file2: str
:return: A string for the output using -1 -2 and -U for unpaired reads
:rtype: str
"""
sys.stderr.write("Checking " + file1 + " and " + file2 + "\n")
seq1 = {}
seq2 = {}
for (sid, label, seq, qual) in sequences.stream_fastq(file1):
sid = re.sub('@', '', sid)
sid = re.sub('\.[12]$', '', sid)
sid = re.sub('/[12]$', '', sid)
seq1[sid] = "@" + label + "\n" + seq + "\n+\n" + qual + "\n"
for (sid, label, seq, qual) in sequences.stream_fastq(file2):
sid = re.sub('@', '', sid)
sid = re.sub('\.[12]$', '', sid)
sid = re.sub('/[12]$', '', sid)
seq2[sid] = "@" + label + "\n" + seq + "\n+\n" + qual + "\n"
seq1set = set(seq1.keys())
seq2set = set(seq2.keys())
sys.stderr.write(
"File 1: " + file1 + " seqs: " + str(len(seq1set)) + " File 2: " + file2 + " seqs: " + str(len(seq2set)) + "\n")
# are there reads in one but not the other?
s1unique = seq1set.difference(seq2set)
s2unique = seq2set.difference(seq1set)
ret = ' -1 ' + file1 + ' -2 ' + file2
if len(s1unique) > 0 or len(s2unique) > 0:
file1 = file1.replace('.gz', '')
file2 = file2.replace('.gz', '')
sys.stderr.write("Rewriting " + file1 + " and " + file2 + "\n")
# we have to make new files
file1clean = file1.replace('.fastq', '.clean.fastq')
if file1clean == file1:
file1clean = file1.replace('.fq', '.clean.fq')
if file1clean == file1:
file1clean = file1 + ".clean.fastq"
file1unique = file1.replace('.fastq', '.unique.fastq')
if file1unique == file1:
file1unique = file1.replace('.fq', '.unique.fastq')
if file1unique == file1:
file1unique = file1 + '.unique.fastq'
file2clean = file2.replace('.fastq', '.clean.fastq')
if file2clean == file2:
file2clean = file2.replace('.fq', '.clean.fq')
if file2clean == file2:
file2clean = file2 + ".clean.fastq"
file2unique = file2.replace('.fastq', '.unique.fastq')
if file2unique == file2:
file2unique = file2.replace('.fq', '.unique.fastq')
if file2unique == file2:
file2unique = file2 + '.unique.fastq'
file1unique = file1unique.replace('.gz', '')
file2unique = file2unique.replace('.gz', '')
file1clean = file1clean.replace('.gz', '')
file2clean = file2clean.replace('.gz', '')
ret = " -1 " + file1clean + " -2 " + file2clean
try:
out1 = open(file1clean, 'w')
out2 = open(file2clean, 'w')
for sid in seq1set.intersection(seq2set):
out1.write(seq1[sid])
out2.write(seq2[sid])
out1.close()
out2.close()
except IOError as e:
print "I/O error({0}): {1}".format(e.errno, e.strerror)
if len(s1unique) > 0:
ret = ret + " -U " + file1unique
try:
out = open(file1unique, 'w')
for sid in s1unique:
out.write(seq1[sid])
out.close()
except IOError as e:
print "I/O error({0}): {1}".format(e.errno, e.strerror)
if len(s2unique) > 0:
ret = ret + " -U " + file2unique
try:
out = open(file2unique, 'w')
for sid in s2unique:
out.write(seq2[sid])
out.close()
except IOError as e:
print "I/O error({0}): {1}".format(e.errno, e.strerror)
return ret
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Given a directory of fastq files, figure out the pairs and check if they are ok. The only requirement is the fastq files have _ between the name and id")
parser.add_argument('-d', help='Directory of fastq files', required=True)
args = parser.parse_args()
files = {}
for f in os.listdir(args.d):
sid = f.split('_')[0]
if sid not in files:
files[sid] = set()
files[sid].add(f)
for s in files:
if len(files[s]) == 1:
print(args.d + "/" + files[s].pop())
elif len(files[s]) == 2:
outstr = clean_fastq(os.path.join(args.d, files[s].pop()), os.path.join(args.d, files[s].pop()))
print(outstr)
else:
sys.stderr.write("Apparently more than two files for " + s + " ==> " + " ".join(files))
```
#### File: EdwardsLab/bin/clustering.py
```python
import os
import sys
import argparse
import scipy
import scipy.cluster.hierarchy as sch
def parse_text_file(tf):
"""
Parse a text file and return an n-choose-2 array of the elements. The array returned has the distance from the first
element to all other elements, and then the second element to n-1 elements (all but the first), and then the
third element to n-2 elements (all but the first & second) and so on.
:param tf: Text file with [a, b, distance]
:type tf: str
:return: n-choose-2 array of the data.
:rtype: array
"""
data = {}
ks = set()
with open(tf, 'r') as fin:
for l in fin:
p=l.strip().split("\t")
ks.add(p[0])
ks.add(p[1])
if p[0] not in data:
data[p[0]]={}
if p[1] not in data:
data[p[1]] = {}
data[p[0]][p[1]] = float(p[2])/100
data[p[1]][p[0]] = float(p[2])/100
allkeys = list(ks)
allkeys.sort()
nct = []
for i in range(len(allkeys)):
for j in range(i+1, len(allkeys)):
nct.append(data[allkeys[i]][allkeys[j]])
return nct
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Cluster genes based on %id with cutoffs")
parser.add_argument('-t', help='file with [a, b, distance] separated by tabs', required=True)
parser.add_argument('-o', help='clusters output file name. We print them out in json format', required=True)
args = parser.parse_args()
matrix = parse_text_file(args.t)
L = sch.linkage(matrix, method='complete')
out = open(args.o, 'w')
for i in range(101):
ind = sch.fcluster(L, i/100.0, 'distance')
out.write("{" + str(i) + " : " + str(ind) + "},\n")
print("{}\t{}".format(100-i, max(ind)))
```
#### File: EdwardsLab/bin/correlations.py
```python
import os
import sys
import argparse
from itertools import combinations
from scipy.stats.stats import pearsonr
import matplotlib.pyplot as plt
def read_data(f, h, c):
"""
Read the file and store the data.
:param f: the file to read
:param h: whether the file contains headers
:param c: whether the first column is a label or data
:return:
"""
data = []
headers = []
firstline = True
start = 1
if c:
start = 0
with open(f,'r') as fin:
for l in fin:
p=l.strip().split("\t")
if firstline:
if h:
headers = p[start:]
for i in range(start, len(p)):
if not h:
headers.append(i)
data.append([])
firstline = False
continue
for i in range(start, len(p)):
data[i-start].append(float(p[i]))
return data, headers
def pairwise(data, headers):
"""
Calculate pairwise distances
:param data:
:param headers:
:return:
"""
cols = range(len(headers))
for i, j in combinations(cols, 2):
pearson, p = pearsonr(data[i], data[j])
print("{}\t{}\t{}\t{}".format(headers[i], headers[j], pearson, p))
def plot_pairs(data, headers):
cols = range(len(headers))
f, axarr = plt.subplots(2, 2)
pltx = 0
plty = 0
for i, j in combinations(cols, 2):
axarr[pltx, plty].plot(data[i], data[j], 'ro')
axarr[pltx, plty].set_title('{} versus {}'.format(headers[i], headers[j]))
pltx += 1
if pltx == 2:
pltx = 0
plty = 1
plt.tight_layout()
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Generate all pairwise correlations between the data")
parser.add_argument('-f', help='file of data with data in columns', required=True)
parser.add_argument('-l', help='first line is a header line (will be used in output)', action='store_true')
parser.add_argument('-c', help='first column is data and should be included (default: the first columns is labels that are discarded)', action='store_true')
parser.add_argument('-v', help='verbose output')
args = parser.parse_args()
data, headers = read_data(args.f, args.l, args.c)
pairwise(data, headers)
plot_pairs(data, headers)
```
#### File: EdwardsLab/bin/cpgs.py
```python
import os
import sys
import argparse
from roblib import bcolors
from roblib import stream_fastq
def countcpgs(fqfile):
"""
Count the CpGs in a file
:param fqfile: the fastq file
:return:
"""
count = {}
for seqid, header, seq, qual in stream_fastq(fqfile):
cg = seq.count('CG')
count[cg] = count.get(cg, 0) + 1
return count
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Count CGs in a fastq file')
parser.add_argument('-f', help='fastq file', required=True)
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
count = countcpgs(args.f)
for c in sorted(list(count.keys())):
print(f"{c}\t{count[c]}")
```
#### File: EdwardsLab/bin/crAss_contig_correlations.py
```python
import os
import sys
from numpy import isnan
from scipy.stats.stats import pearsonr
import argparse
__author__ = '<NAME>'
def merge_clust(c1, c2, inclust, clustermembers):
if c2 < c1:
[c1, c2] = [c2, c1]
for c in clustermembers[c2]:
clustermembers[c1].add(c)
inclust[c] = c1
clustermembers.pop(c2)
return inclust, clustermembers
def notzero(x): return x > 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='calculate pairwise pearson correlations between contigs and then cluster them')
parser.add_argument('-d', help='data table with contigs in rows and occurence in columns', required=True)
parser.add_argument('-s', help='minimum number of non-zero samples (default=all samples)', type=int)
parser.add_argument('-r', help='minimum number of reads (row total) for a sample to be included (default=all rows)', type=int)
parser.add_argument('-m', help='minimum Pearson correlation to be printed out', type=float)
args = parser.parse_args()
data = {}
headers = []
with open(args.d, 'r') as f:
for l in f:
p=l.strip().split("\t")
if headers == []:
headers = p
else:
tmp = list(map(int, p[1:]))
if args.r and sum(tmp) < args.r:
continue
data[p[0]] = tmp
allcontigs = list(data.keys())
allcontigs.sort()
if not args.s:
args.s = len(headers)
# we're just going to use contigs where at least 10 samples are not zero
nonzero = []
for c in allcontigs:
nz = list(filter(notzero, data[c]))
if len(nz) >= args.s:
nonzero.append(c)
sys.stderr.write("Before filtering we had {} contigs, after filtering to remove samples with at least {} non zero values we have {} contigs\n".format(len(allcontigs), args.s, len(nonzero)))
for i in range(len(nonzero)):
cfr = nonzero[i]
for j in range(i+1, len(nonzero)):
cto = nonzero[j]
dist = pearsonr(data[cfr], data[cto])[0]
if not isnan(dist) and dist > args.m:
print("{}\t{}\t{}".format(cfr, cto, dist))
```
#### File: EdwardsLab/bin/dump_all_tables.py
```python
import argparse
import sqlite3
import pandas as pd
def to_csv(filename):
db = sqlite3.connect(filename)
cursor = db.cursor()
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = cursor.fetchall()
for table_name in tables:
table_name = table_name[0]
table = pd.read_sql_query("SELECT * from %s" % table_name, db)
table.to_csv(table_name + '.csv', index_label='index', encoding='utf-8')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Dump the contents of an SQL file to CSV. This was taken from http://stackoverflow.com/questions/305378/get-list-of-tables-db-schema-dump-etc-in-sqlite-databases')
parser.add_argument('-d', help='SQLlite database file', required=True)
args = parser.parse_args()
to_csv(args.d)
```
#### File: EdwardsLab/bin/environment_violin_lot.py
```python
import os
import sys
import argparse
import gzip
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def plot(f, figf, freqmin):
freq = {}
with open(f, 'r') as i:
for l in i:
p=l.strip().split("\t")
if 'human' in p[0]:
p[0]='human'
if 'human' in p[1]:
p[1]='human'
if p[0] < p[1]:
(g1, g2) = (p[0], p[1])
else:
(g2, g1) = (p[0], p[1])
if g1 not in freq:
freq[g1] = {}
if g2 not in freq[g1]:
freq[g1][g2] = []
freq[g1][g2].append(float(p[2]))
labels = []
scores = []
sames = []
count = 1
ticks = []
for g1 in freq.keys():
for g2 in freq[g1].keys():
if len(freq[g1][g2]) < freqmin:
continue
labels.append("{}-{}".format(g1, g2))
scores.append(freq[g1][g2])
if g1 == g2:
sames.append(1)
else:
sames.append(0)
ticks.append(count)
count += 1
fig = plt.figure()
ax = fig.add_subplot(111)
# ax.boxplot(alldata)
vp = ax.violinplot(scores, ticks, showmeans=True)
for i, j in enumerate(vp['bodies']):
if sames[i]:
j.set_color('red')
ax.set_xlabel("Environments")
ax.set_ylabel("Genome Distances")
ax.set_xticks(ticks)
ax.set_xticklabels(labels, rotation='vertical')
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
fig.set_facecolor('white')
plt.tight_layout()
#plt.show()
fig.savefig(figf)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Draw a voilin plot of same and different environments")
parser.add_argument('-f', help='file of environments and distances', required=True)
parser.add_argument('-o', help='output image file', default='out.png')
parser.add_argument('-m', help='Minimum frequency (default=100000)', default=100000)
parser.add_argument('-v', help='verbose output')
args = parser.parse_args()
plot(args.f, args.o, int(args.m))
```
#### File: EdwardsLab/bin/plot_pairwise_percents.py
```python
import json
import matplotlib.pyplot as plt
def median(lst):
sortedLst = sorted(lst)
lstLen = len(lst)
index = (lstLen - 1) // 2 # // is the floor division
if (lstLen % 2):
return sortedLst[index]
else:
return (sortedLst[index] + sortedLst[index + 1]) / 2.0
filename = '/home/redwards/Desktop/all_pairwise.json'
with open(filename, 'r') as f:
data = json.load(f)
tax = ['kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species', 'strain']
alldata = []
for t in tax:
# data[t] = map(float, data[t])
floatdata = map(float, data[t])
alldata.append(floatdata)
print("{}\t{}\t{}\t{}".format(t, len(floatdata), 1.0*sum(floatdata)/len(floatdata), median(floatdata)))
fig = plt.figure()
ax = fig.add_subplot(111)
#ax.boxplot(alldata)
tax.insert(0, "")
ax.violinplot(alldata, showmeans=True)
ax.set_xlabel("Phylogeny")
ax.set_ylabel("Average percent identity")
ax.set_xticklabels(tax)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
fig.set_facecolor('white')
plt.show()
```
#### File: EdwardsLab/bin/resample.py
```python
import os
import sys
import argparse
import matplotlib.pyplot as plt
from random import shuffle
def resample(size, percent, tries):
if percent > 1:
percent /= 100
# define an array of size size
data = [i for i in range(size)]
# where we put the results as a cumulative total
iterations = []
seen = set()
for t in range(tries):
# randomize the array
shuffle(data)
# see if we have seen percent things
new = 0
resampsize = int(size * percent)
# sys.stderr.write("resampling " + str(resampsize) + " from " + str(size) + "\n")
for i in range(resampsize):
if data[i] not in seen:
seen.add(data[i])
new += 1
if not iterations:
iterations.append(new)
else:
iterations.append(new+iterations[-1])
# now just plot the number of new things as a cumulative total
plt.plot(iterations)
plt.ylabel('New numbers seen')
plt.xlabel('Iteration')
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Resample a list of numbers to see the new things seen")
parser.add_argument('-s', help='Size of array to resample from (size of dataset)', type=int, required=True)
parser.add_argument('-p', help='Percent to resample at each iteration (float)', type=float, required=True)
parser.add_argument('-i', help='Number of iterations to run', type=int, required=True)
args = parser.parse_args()
resample(args.s, args.p, args.i)
```
#### File: EdwardsLab/blast/blast_to_network.py
```python
import os
import sys
import argparse
import networkx as nx
def load_blast(blastf, maxeval=None, minper=None, minlen=None, verbose=False):
"""
Load a blast file into the network
:param blastf: blast tab separated file to read
:param maxeval: maximum E value to include in the network
:param minper: minimum percent to include in the network
:param minlen: minimum alignment length to include in the network
:param verbose: add more output
:return: a networkx objext
:return type: networkx
"""
G = nx.Graph()
with open(blastf, 'r') as f:
for l in f:
p=l.strip().split("\t")
if p[0] == p[1]:
continue
if p[1] in G and p[0] in G[p[1]]:
continue
if maxeval and float(p[10]) > maxeval:
continue
if minper and float(p[2]) < minper:
continue
if minlen and float(p[3]) < minlen:
continue
weight = (float(p[2])/100) * float(p[3])
if verbose:
sys.stderr.write("Addding edge: {} -> {} : {}\n".format(p[0], p[1], weight))
G.add_edges_from([(p[0], p[1], {'weight' : weight})])
return G
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Create a network from blast tab separated output")
parser.add_argument('-b', help='blast output file (required)', required=True)
parser.add_argument('-e', help='maximum e-value', type=float)
parser.add_argument('-p', help='minimum percent identity', type=float)
parser.add_argument('-l', help='minimum alignment length', type=int)
parser.add_argument('-v', help='verbose output', action="store_true")
args = parser.parse_args()
net = load_blast(args.b, maxeval=args.e, minper=args.p, minlen=args.l, verbose=args.v)
print("Network:\n\tEdges: {}\n\tNodes: {}\n".format(len(net.edges), len(net.nodes)))
for c in nx.connected_components(net):
print(c)
sys.exit()
seen = set()
for n in net.nodes:
if n in seen:
continue
thisa = set(net.adj[n])
thisa.add(n)
seen.update(thisa)
print("{}\n".format(thisa))
```
#### File: EdwardsLab/blast/filter_fastq_by_blast.py
```python
import os
import sys
import argparse
from roblib import stream_blast_results, stream_fastq, bcolors
__author__ = '<NAME>'
def read_blast(blastf, maxeval, minlen, minid, verbose=False):
"""
Read the blast file and only keep those matches that exceed our parameters.
:param blastf: blast file to read
:param maxeval: maximum ID to keep
:param minlen: minimum length to keep
:param minid: minimum % id to keep
:param verbose: more output
:return: a set of matches
"""
results = set()
for b in stream_blast_results(blastf, verbose=verbose):
if b.alignment_length < minlen:
continue
if b.evalue > maxeval:
continue
if b.percent_id < minid:
continue
results.add(b.query)
return results
def filter_fastq(fqf, br, matchout=None, nomatchout=None, verbose=False):
"""
Filter the fastq file and print out matches or no matches
:param fqf: The fastq file to filter
:param br: the set of query blast results
:param matchout: The file to write matches to
:param nomatchout: the file to write no matches to
:param verbose: more output
:return: nothing
"""
mo = open(matchout, 'w')
nmo = open(nomatchout, 'w')
matches = 0
nonmatches = 0
for sid, allid, seq, qual in stream_fastq(fqf):
if sid in br:
if matchout:
mo.write(f"@{allid}\n{seq}\n+\n{qual}\n")
matches += 1
else:
if nomatchout:
nmo.write(f"@{allid}\n{seq}\n+\n{qual}\n")
nonmatches += 1
sys.stderr.write(f"{bcolors.GREEN}FINISHED:{bcolors.ENDC} Sequences Matched: {matches} Sequences without match {nonmatches}\n")
if __name__ == "__main__":
maxeval = 1e-10
minlen = 50
minid = 75
parser = argparse.ArgumentParser(description='Filter fastq files based on blast results')
parser.add_argument('-f', help='fastq file to filter', required=True)
parser.add_argument('-b', help='blast output file (using -outfmt 6 std)', required=True)
parser.add_argument('-m', help='file to write the sequences that match the blast file to')
parser.add_argument('-n', help='file to write the sequences that DO NOT match the blast file to')
parser.add_argument('-e', help='Maximum E value cut off (default={})'.format(maxeval), default=maxeval)
parser.add_argument('-l', help='Minimum alignment length cut off (default={})'.format(minlen), default=minlen)
parser.add_argument('-i', help='Minimum percent id cut off (default={})'.format(minid), default=minid)
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
if not args.m and not args.n:
sys.stderr.write(f"{bcolors.FAIL}ERROR: Either -m or -n must be specified{bcolors.ENDC}\n")
sys.exit(-1)
b = read_blast(args.b, args.e, args.l, args.i, args.v)
if not args.m:
args.m = None
if not args.n:
args.n = None
filter_fastq(args.f, b, args.m, args.n, args.v)
```
#### File: EdwardsLab/blast/summarize_blast.py
```python
import os
import sys
import argparse
from roblib import stream_fasta, stream_blast_results
def seq_lengths(fafile, verbose=False):
"""
Read the sequence length from a fasta file
:param fafile: the fasta file to read
:param verbose: more output
:return: a dict of sequence id and length
"""
length = {}
for i,s in stream_fasta(fafile):
length[i] = len(s)
return length
def summarize_blast(fafile, blfile, verbose=False):
"""
Summarize blast hits
:param fafile: the query fasta file
:param blfile: the blast output file
:param verbose: more output
:return:
"""
seqlens = seq_lengths(fafile, verbose)
for b in stream_blast_results(blfile, verbose=verbose):
"blech blech blech"
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="")
parser.add_argument('-b', help='blast output file', required=True)
parser.add_argument('-f', help='fasta query file', required=True)
parser.add_argument('-v', help='verbose output', action="store_true")
args = parser.parse_args()
```
#### File: EdwardsLab/bwt/generate_table.py
```python
import os
import sys
import argparse
__author__ = '<NAME>'
def printarr(arr):
"""
print the array
:param arr:
:return:
"""
print("\n".join(arr))
def rotate(s):
"""
Rotate the string
:param s:
:return:
"""
d=[]
d.append(s)
for i in reversed(range(1, len(s))):
d.append(s[i:] + s[:i])
return d
def count_ends(d):
"""
count number of consecutive letters
:param d:
:return:
"""
con=0
for i in range(len(d)-1):
if d[i][-1] == d[i+1][-1]:
con+=1
print("{} consecutive letters".format(con))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Generate all the rotations of a sequence')
parser.add_argument('-s', help='string to rotate', required=True)
parser.add_argument('-b', help="character to start the string with (defualt = no character)", default=None)
parser.add_argument('-e', help='Character to end the string with (default = $)', default='$')
parser.add_argument('-c', help='change case', action='store_true')
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
s = args.s + args.e
if args.b:
s = args.b + s
if args.c:
s=s.lower()
d = rotate(s)
printarr(d)
count_ends(d)
print("\nAfter sorting:")
d = sorted(d)
printarr(d)
count_ends(d)
```
#### File: EdwardsLab/cartopy/crAssphage_cophenetic.py
```python
import os
import sys
import argparse
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
import math
import cartopy.crs as ccrs
import re
def get_lon_lat(idf, maxtoget=50000):
"""
Get the longitude and latitude of different ids. Note that we have longitude first to work with cartopy
:param idf: the id.map file
:param maxtoget: the maxiumum number of ids to get. This is just for debugging
:return:
"""
lonlat = {}
count = 0
global verbose
with open(idf, 'r') as fin:
for l in fin:
if count > maxtoget:
break
count+=1
s=re.search('latitude=(\S+)\]', l)
if not s:
sys.stderr.write("No latitude in {}".format(l))
continue
lat=s.group(1)
s = re.search('longitude=(\S+)\]', l)
if not s:
sys.stderr.write("No longitude in {}".format(l))
continue
lon = s.group(1)
p=l.split("\t")
lonlat[p[0]] = (float(lon), float(lat))
return lonlat
def latlon2distance(lat1, long1, lat2, long2, miles=False):
"""Convert two coordinates to distance.
This is an approximation since the earth is not spherical, but accuracy is <100m, especially for close points
This code was taken from http://www.johndcook.com/python_longitude_latitude.html
Latitude is measured in degrees north of the equator; southern locations have negative latitude.
Similarly, longitude is measured in degrees east of the Prime Meridian. A location 10deg west of
the Prime Meridian, for example, could be expressed as either 350deg east or as -10deg east.
Arguments: lat1, long1; lat2, long2; miles is a boolean. If you want miles set it to true. Else set it to false
"""
global verbose
if lat1 == lat2 and long1 == long2:
return 0
# Convert latitude and longitude to
# spherical coordinates in radians.
degrees_to_radians = math.pi / 180.0
# phi = 90 - latitude
phi1 = (90.0 - lat1) * degrees_to_radians
phi2 = (90.0 - lat2) * degrees_to_radians
# theta = longitude
theta1 = long1 * degrees_to_radians
theta2 = long2 * degrees_to_radians
# Compute spherical distance from spherical coordinates.
# For two locations in spherical coordinates
# (1, theta, phi) and (1, theta, phi)
# cosine( arc length ) =
# sin phi sin phi' cos(theta-theta') + cos phi cos phi'
# distance = rho * arc length
cos = (math.sin(phi1) * math.sin(phi2) * math.cos(theta1 - theta2) + math.cos(phi1) * math.cos(phi2))
try:
arc = math.acos(cos)
except Exception as err:
sys.stderr.write("There was an err: {} trying to take the acos of ({})\n".format(err, cos))
arc=0
# Remember to multiply arc by the radius of the earth
# in your favorite set of units to get length.
#
# To convert to miles multiple arc by 3960
# To convert to kilometers multiply arc by 6373
if miles:
arc *= 3960
else:
arc *= 6373
return arc
def closest_dna_dist(matrixfile):
"""
Read the matrix file and get the id of the point with the closest distance that is not ourself
:param treefile: The cophenetic matrix file to read
:return: a dict of a node and its closest leaf
"""
global verbose
if verbose:
sys.stderr.write("Getting closest distances\n")
distances = {}
with open(matrixfile, 'r') as f:
l = f.readline()
ids = l.rstrip().split("\t")
for i,name in enumerate(ids):
if i == 0:
continue
distances[name] = {}
for l in f:
data = l.rstrip().split("\t")
for i,dist in enumerate(data):
if i == 0:
continue
distances[data[0]][ids[i]] = float(dist)
distances[ids[i]][data[0]] = float(dist)
closest = {}
for d in distances:
closest[d] = {}
for k in sorted(distances[d], key=distances[d].get):
if k == d:
continue
closest[d][k] = distances[d][k]
break
if verbose:
sys.stderr.write("Done\n")
return closest
def plotmap(ll, dd, outputfile, maxdist=1, maxlinewidth=3):
"""
Plot the map of the dna distances and lat longs
:param ll: The lon-lats
:param dd: The distances to use
:param outputfile: The file name to write the image to
:param maxdist: The maximum distance that we will scale to be maxlinewidth
:return:
"""
global verbose
ax = plt.axes(projection=ccrs.Robinson())
# make the map global rather than have it zoom in to
# the extents of any plotted data
ax.set_global()
ax.stock_img()
ax.coastlines()
## color the lines based on the maximum distance value
jet = cm = plt.get_cmap('jet')
cNorm = colors.Normalize(vmin=0, vmax=maxdist)
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
# Using contourf to provide my colorbar info, then clearing the figure
Z = [[0, 0], [0, 0]]
levels = range(0, int(100 * maxdist) + 10, 10)
CS3 = plt.contourf(Z, levels, cmap=jet)
# plt.clf()
# NOTE: longitude before latitude!!
# plt.plot([sdlon, brislon], [sdlat, brislat], color='blue', linewidth=2, transform=ccrs.Geodetic())
# plot the circles for each sample site
# markerfacecolor="None",
for lonlat in ll.values():
plt.plot(lonlat[0], lonlat[1], 'o', color='Black', alpha=0.25, markersize=4, transform=ccrs.PlateCarree())
for idx1 in dd:
for idx2 in dd[idx1]:
# this should only happen when we do best DNA distances
if idx1 not in ll:
sys.stderr.write("NO Lat/Lon for {}\n".format(idx1))
continue
if idx2 not in ll:
sys.stderr.write("NO Lat/Lon for {}\n".format(idx2))
continue
if verbose:
sys.stderr.write("Distance between {} and {}: {}\n".format(idx1, idx2, latlon2distance(ll[idx1][1], ll[idx1][0], ll[idx2][1], ll[idx2][0])))
linewidth = dd[idx1][idx2]
linewidth = linewidth/maxdist * maxlinewidth
#colorVal = scalarMap.to_rgba(dd[idx1][idx2])
plt.plot([ll[idx1][0], ll[idx2][0]], [ll[idx1][1], ll[idx2][1]], color='Red', linewidth=linewidth, alpha=0.1, transform=ccrs.Geodetic())
if latlon2distance(ll[idx1][1], ll[idx1][0], ll[idx2][1], ll[idx2][0]) < 100:
if verbose:
sys.stderr.write("Adding a circle for {} and {}\n".format(ll[idx1][0], ll[idx1][1]))
plt.plot(ll[idx1][0], ll[idx1][1], 'o', color='Red', alpha=0.1, markersize=2,
transform=ccrs.PlateCarree())
# plt.colorbar(CS3)
#plt.show()
plt.savefig(outputfile)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Plot a map using ete and lat lon')
parser.add_argument('-i', help='id.map file with lat/lon information', required=True)
parser.add_argument('-m', help='cophenetic map file with same ids as id.map', required=True)
parser.add_argument('-o', help='output file name', required=True)
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
global verbose
verbose = False
if args.v:
verbose = True
lonlat = get_lon_lat(args.i)
# dist = best_dna_dist(get_dna_distance(args.t))
dist = closest_dna_dist(args.m)
plotmap(lonlat, dist, args.o)
```
#### File: EdwardsLab/crAssphage/extract_pcr_regions.py
```python
import argparse
import os
import sys
import pysam
__author__ = '<NAME>'
"""
Extract the PCR regions from indexed bam alignments. These are regions that we have asked people to pull out, and
we will extract the sequences from those regions
Current regions:
Primer sequences: Position on JQ995537 (the original genbank record)
Primer A:
Fwd: CTGATAGTATGATTGGTAAT Position 25634 .. 25653
Rev: ATAAGTTCTCCAACTATCTT Position complement(26945 .. 26964)
Primer B:
Fwd: CCAGTATCTCCATAAGCATC Position 33709 .. 33728
Rev: GTGAGGGCGGAATAGCTA Position complement(35045 .. 35062)
Primer C:
Fwd: GCAACAGGAGTAGTAAAATCTC Position 43820 .. 43841
Rev: GCTCCTGTTAATCCTGATGTTA Position complement(45036 .. 45057)
"""
locations = {
'JQ995537': {
'PrimerA': (25633, 26964),
'PrimerB': (33708, 35062),
'PrimerC': (43819, 45057)
}
}
def print_query_regions(bam):
"""
Print the regions from the query sequences that overlap our regions of interest
:param bam: The bam object from pysam
:type bam: pysam.AlignmentFile
:return:
:rtype:
"""
for template in locations:
for primer in locations[template]:
start, end = locations[template][primer]
for read in bam.fetch(reference=template, start=start, end=end):
# this is an AlignedSegment: http://pysam.readthedocs.org/en/latest/api.html#pysam.AlignedSegment
# sys.stderr.write("Primer: {} ({} .. {}). Found a region for {} ({} .. {}) -> ({} .. {})\n".format(
# primer, start, end, read.query_name, read.query_alignment_start, read.query_alignment_end,
# read.reference_start, read.reference_end
# ))
# this checks for sequences that overlap the start and end (none do in the Ondrej data set
# if read.reference_start <= start and read.reference_end >= stop:
# sys.stderr.write("Primer: {} ({} .. {}). Found a region for {} ({} .. {}) -> ({} .. {})\n".format(
# primer, start, stop, read.query_name, read.query_alignment_start, read.query_alignment_end,
# read.reference_start, read.reference_end
# ))
# get just the sequence that maps to the region
seq = read.query_sequence
beg_offset = None
end_offset = None
if read.reference_start < start:
beg_offset = start - read.reference_start - 1
if read.reference_end > end:
end_offset = len(seq) - (read.reference_end - end)
if beg_offset and end_offset:
seq = seq[beg_offset:end_offset]
elif beg_offset:
seq = seq[beg_offset:]
elif end_offset:
seq = seq[:end_offset]
print(">{} {} {} {}\n{}".format(read.query_name, primer, read.reference_start, read.reference_end, seq))
def print_pileup(bam):
"""
Print the information about the pileup
:param bam: the bam object from pysam
:type bam: pysam.AlignmentFile
:return:
:rtype:
"""
for template in locations:
for primer in locations[template]:
start, end = locations[template][primer]
for p in bam.pileup(reference=template, start=start, end=end, truncate=True):
bases = {}
for pilups in p.pileups:
if pilups.query_position:
bp = pilups.alignment.query_sequence[pilups.query_position]
else:
bp = '-'
bases[bp] = bases.get(bp, 0) + 1
sys.stdout.write("{} : {} -> {}\n".format(p.reference_name, p.reference_pos, str(bases)))
def print_consensus(bam):
"""
Print the consensus sequence about the pileup
:param bam: the bam object from pysam
:type bam: pysam.AlignmentFile
:return:
:rtype:
"""
for template in locations:
for primer in locations[template]:
start, end = locations[template][primer]
cseq = []
for p in bam.pileup(reference=template, start=start, end=end, truncate=True):
bases = {}
for pilups in p.pileups:
if pilups.query_position:
bp = pilups.alignment.query_sequence[pilups.query_position]
bases[bp] = bases.get(bp, 0) + 1
# if we don't have any bases at this position, add an N
if not bases:
bases['N'] = 1
bps = sorted(bases, key=bases.get, reverse=True)
# text = ""
# for b in bps:
# text += " " + b + ": " + str(bases[b])
# sys.stdout.write("{} : {} -> {}\n".format(p.reference_name, p.reference_pos, text))
# make the consensus seq
cseq.append(bps[0])
print(">{} {} {} {}\n{}".format(primer, template, start, end, ''.join(cseq)))
def print_alignment(bam, output_dir, filename, min_read_length=0, min_alignment_frac=0):
"""
Print an alignment of all matching pileups
:param output_dir: The output directory to store the results in. We will make a sub directory for each PCR region
:type output_dir: str
:param bam: the bam object from pysam
:type bam: pysam.AlignmentFile
:return:
:rtype:
"""
if not os.path.exists(output_dir):
os.mkdir(output_dir)
for template in locations:
for primer in locations[template]:
if not os.path.exists(os.path.join(output_dir, primer)):
os.mkdir(os.path.join(output_dir, primer))
start, end = locations[template][primer]
# print("ALIGNMENT: {} FROM {} TO {}\n".format(primer, start, end))
# This is a failed attempt to get the reference sequence for this region, but I am not sure that this
# is even possible from a BAM file, since each read will have a unique alignment to the reference
# refseq = ['-' for i in range(start, end)]
# for aln in bam.fetch(reference=template, start=start, end=end, until_eof=True):
# posns = aln.get_reference_positions()
# seq = aln.get_reference_sequence()
# if len(posns) > len(seq):
# sys.stderr.write("There are more positions {} than sequences {}\n".format(len(posns), len(seq)))
# continue
# for i in range(len(posns)):
# if posns[i] - start > len(refseq) -1:
# sys.stderr.write("Too many positions\n")
# if i > len(seq)-1:
# sys.stderr.write("Too many seq\n")
# refseq[posns[i]-start] = seq[i]
#
# print("{}_{} {}".format(template, primer, ''.join(refseq)))
alignment = {}
for p in bam.pileup(reference=template, start=start, end=end, truncate=True):
for pilups in p.pileups:
if pilups.alignment.query_name not in alignment:
alignment_frac = 1.0 * pilups.alignment.query_alignment_length/ pilups.alignment.query_length
if pilups.alignment.query_length > min_read_length and alignment_frac > min_alignment_frac:
alignment[pilups.alignment.query_name] = ['-' for idx in range(start, end+1)]
for p in bam.pileup(reference=template, start=start, end=end, truncate=True):
rp = p.reference_pos
idx = rp - start
for pilups in p.pileups:
if pilups.query_position:
posn = pilups.query_position - start
# sys.stderr.write("Posn: {} Q.position: {} start: {} end: {} len: {}\n".format(posn, pilups.query_position, start, end, end-start))
if pilups.alignment.query_name in alignment:
# if the read is too short, we don't include it here!
alignment[pilups.alignment.query_name][idx] = pilups.alignment.query_sequence[pilups.query_position]
# don't want to carry on if we have no sequences
if len(alignment) < 10:
continue
# find the longest name
longest_name = 0
for n in alignment:
if len(n) > longest_name:
longest_name = len(n)
longest_name += 5
# I want to sort by the number of -'s at the beginning of the sequence
beginning_gaps = {}
for n in alignment:
gap = 0
while (gap < len(alignment[n]) and alignment[n][gap] == '-'):
gap += 1
beginning_gaps[n] = gap
out = open(os.path.join(output_dir, primer, filename), 'w')
for n in sorted(alignment.keys(), key=beginning_gaps.get):
# this is for "pylip style output"
# out.write(n)
# out.write(" " * (longest_name - len(n)))
# out.write(''.join(alignment[n]) + "\n")
out.write(">{}\n{}\n".format(n, ''.join(alignment[n])))
out.close()
#print("\n\n")
def list_sequences(bam):
"""
List the sequences involved and whether they are forward or reverse
:param bam: the bam object from pysam
:type bam: pysam.AlignmentFile
:return:
:rtype:
"""
for template in locations:
for primer in locations[template]:
start, end = locations[template][primer]
print("\nALIGNMENT: {} FROM {} TO {}\n".format(primer, start, end))
for read in bam.fetch(reference=template, start=start, end=end):
print("{}\t{}\t{}".format(primer, read.query_name, read.is_reverse))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Extract PCRd regions from BAM files')
parser.add_argument('-b', help='bam file', required=True)
parser.add_argument('-q', help='print query regions. This is a fasta output of all sequences that are in the region', action='store_true')
parser.add_argument('-p', help='print pileup. Prints debug information about each position in the pileup', action='store_true')
parser.add_argument('-c', help='print consensus sequence. Prints a single sequence for each region', action='store_true')
parser.add_argument('-a', help='print alignment. Prints an alignment for each region.', action='store_true')
parser.add_argument('-l', help='list read ids and whether they are reversed', action='store_true')
parser.add_argument('-d', help='output directory to write the alignments to (required with -a)')
parser.add_argument('-m', help='minimum alignment length for inclusion in output. Default=0', default=0, type=int)
parser.add_argument('-f', help='minimum fraction (i.e. <=1) of read length in alignment. Default=0', default=0, type=float)
parser.add_argument('-v', help='verbose output')
args = parser.parse_args()
filename = os.path.split(args.b)[-1]
filename = filename.replace('.bam', '.aln')
bam = pysam.AlignmentFile(args.b, 'rb')
if args.q:
print_query_regions(bam)
if args.p:
print_pileup(bam)
if args.c:
print_consensus(bam)
if args.a:
if not args.d:
sys.exit("You must provide an output directory with -a flag")
print_alignment(bam, args.d, filename, args.m, args.f)
if args.l:
list_sequences(bam)
```
#### File: EdwardsLab/crAssphage/plot_genotypes.py
```python
import sys
import argparse
from roblib import sequences
import re
import matplotlib.pyplot as plt
def replace_leading_trailing(seq):
"""
Replace leading and trailing Ns with -s
:param seq: the sequence
:type seq: str
:return: the sequence with leading trailing N's replaced
:rtype: str
"""
validbase = {"A", "G", "C", "T"}
lastbase = 0
inseq = False
newseq = []
for i in range(len(seq)):
newseq.append("")
for (i, j) in enumerate(seq):
if j in validbase:
inseq = True
newseq[i] = j
lastbase = i
elif inseq:
newseq[i] = j
elif j == "N":
newseq[i] = "-"
else:
newseq[i] = j
newnewseq = newseq[0:lastbase]
for i in range(lastbase+1, len(newseq)):
if newseq[i] == "N":
newnewseq.append("-")
else:
newnewseq.append(newseq[i])
return "".join(newnewseq)
# data is going to be an array where each element is a hash
# the hash has to have the following elements:
# seq : this is the sequence and will be mutable - we will change the seqeunce
# ids : this is a list of seqid that contribute to this sequence
count=[]
total = []
# we initially set this up to be 10kb. Hopefully no sequences longer than that!
for i in range(10000):
count.append({"A": 0, "G": 0, "T": 0, "C": 0, "N": 0})
total.append(0)
longestseq = 0
firstrun = True
bases = {'A', 'T', 'G', 'C', 'N'}
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Extract genotypes from metagenomes by concatenating matching sequences")
parser.add_argument('-f', help='fasta sequence alignment file', required=True)
parser.add_argument('-n', help='minimum number of sequences a genotype must be in (default = 1)', default=1, type=int)
parser.add_argument('-b', help='plot start position', type=int)
parser.add_argument('-e', help='plot end position', type=int)
parser.add_argument('-c', help='cutoff to print the values (e.g. 0.8)', type=float)
parser.add_argument('-p', help='make the plot', action="store_true")
parser.add_argument('-a', help='print all results by base (the default is to sort the possibilities)', action="store_true")
args = parser.parse_args()
# to start we are just going to merge identical sequences
byseq = {}
for (seqid, seq) in sequences.stream_fasta(args.f):
seq = seq.upper() # make sure we have only upper case
seq = seq.strip() # strip of leading/trailing whitespace
#seq = seq.replace('N', '-') # remove any N's in the sequence
seq = replace_leading_trailing(seq)
seq = seq.rstrip('-') # remove all the trailing -s
keep_seq = True
for k in seq:
if k not in bases and k != "-":
if keep_seq:
sys.stderr.write("Skipped {} becuase it has base {}\n".format(seqid, k))
keep_seq = False
if not keep_seq:
continue
if len(seq) > longestseq:
longestseq = len(seq)
for (i, j) in enumerate(seq):
if j in bases:
count[i][j] = count[i][j] + 1
total[i] += 1
# now lets just make a plot of the data
startseq = 0
if args.e:
longestseq = args.e
if args.b:
startseq = args.b
if args.p:
fig = plt.figure()
ax = fig.add_subplot(111)
x = range(startseq, longestseq)
toprint = []
for base in "A", "T", "G", "C", "N":
counts = []
for i in range(startseq,longestseq):
if not total[i]:
continue
if args.c and 1.0 * count[i][base] / total[i] < args.c and 1 - (1.0 * count[i][base] / total[i]) < args.c:
toprint.append(i)
counts.append(1.0 * count[i][base]/total[i])
if args.p:
ax.plot(x, counts, label=base)
if args.c and toprint and not args.a:
for p in toprint:
results = []
for base in "A", "T", "G", "C", "N":
results.append(1.0 * count[p][base] / total[p])
results.sort(reverse=True)
print("\t".join(map(str, results)))
if args.a and toprint:
print("A\tG\tT\tC\tN\n")
for p in toprint:
print("{}\t{}\t{}\t{}\t{}\t{}".format(1.0 * count[p]["A"] / total[p],
1.0 * count[p]["G"] / total[p],
1.0 * count[p]["T"] / total[p],
1.0 * count[p]["C"] / total[p],
1.0 * count[p]["N"] / total[p],
total[p]))
if args.p:
ax.legend()
fig.set_facecolor('white')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
plt.show()
```
#### File: EdwardsLab/crAssphage/print_ondrej_pcr_regions.py
```python
import argparse
import os
import sys
import pysam
__author__ = '<NAME>'
"""
This is just a version of extract-pcr-regions adapted to print out information from Ondrej Cinek.
Mainly it is the consensus header. You can probably delete this file!
"""
locations = {
'JQ995537': {
'A': (25634, 26964),
'B': (33709, 35062),
'C': (43820, 45057)
}
}
def print_consensus(bam, sname):
"""
Print the consensus sequence about the pileup
:param bam: the bam object from pysam
:type bam: bam
:return:
:rtype:
"""
for template in locations:
for primer in locations[template]:
start, end = locations[template][primer]
cseq = []
for p in bam.pileup(reference=template, start=start, end=end, truncate=True):
bases = {}
for pilups in p.pileups:
if pilups.query_position:
bp = pilups.alignment.query_sequence[pilups.query_position]
bases[bp] = bases.get(bp, 0) + 1
# if we don't have any bases at this position, add an N
if not bases:
bases['N'] = 1
bps = sorted(bases, key=bases.get, reverse=True)
# text = ""
# for b in bps:
# text += " " + b + ": " + str(bases[b])
# sys.stdout.write("{} : {} -> {}\n".format(p.reference_name, p.reference_pos, text))
# make the consensus seq
cseq.append(bps[0])
header = 'Cinek_{}_primer{}.20151120 [name=Cinek lab sample {} primer {} 20151120] '.format(sname, primer, sname, primer)
header += '[date=20151120] [latitude=61.505] [longitude=23.815] [note=coordinates are hospital location]'
if cseq:
print(">{}\n{}".format(header, ''.join(cseq)))
# >Brouns_A.20151106 [name=Brouns lab primer A 20151106] [date=20151106] [latitude=52.180646] [longitude=5.9478529] [altitude=36m]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Extract PCRd regions from BAM files')
parser.add_argument('-b', help='bam file', required=True)
parser.add_argument('-v', help='verbose output')
args = parser.parse_args()
bam = pysam.AlignmentFile(args.b, 'rb')
sname = args.b.split('.')[0]
# print_query_regions(bam)
# print_pileup(bam)
print_consensus(bam, sname)
```
#### File: EdwardsLab/deconvolute_minion_reads/split_fastq.py
```python
import os
import sys
import argparse
import re
from dateutil.parser import parse
from datetime import timedelta
from fastq import stream_fastq
def write_fastq(fqf, outs, outdir):
"""
Write the sequences to a set of fastq files
:param fqf: the input fastq file with the original sequences
:param outs: the sets of sequences for each id
:param outdir: the output directory to write the sequences to
:return:
"""
if not os.path.exists(outdir):
os.mkdir(outdir)
outputs = {}
for o in outs:
outputs[o] = open(os.path.join(outdir, o + ".fastq"), 'w')
remapped = {}
for o in outs:
for seqid in outs[o]:
remapped[seqid] = o
for seqid, header, seq, qualscores in stream_fastq(fqf):
if seqid not in remapped:
sys.stderr.write("Error: found sequence {} that we don't know where to write{}\n".format(seqid))
outputs[remapped[seqid]].write("@{}\n{}\n+\n{}\n".format(header, seq, qualscores))
for o in outputs:
outputs[o].close()
def split_fastq(fqf, times):
"""
Split the fastq file based on the times and dates
:param fqf: fastq file to parse
:param times: dictionary of times
:return: a dictionary of time ids and the list of sequences in that time
"""
seqs = {"other" : set(), "before" : set(), "after" : set()}
alltimes = []
for t in times:
seqs[t] = set()
alltimes.append(times[t][0])
alltimes.append(times[t][1])
earliest = min(alltimes)
latest = max(alltimes)
newest = None
newestseq = None
oldest = None
oldestseq = None
for seqid, header, seq, qualscores in stream_fastq(fqf):
m = re.search("start_time=([\w\:\-]+)", header)
if not m:
sys.stderr.write("No start time was detected in {}\n".format(header))
continue
try:
seqstart = parse(m.groups()[0])
except ValueError as v:
sys.stderr.write("Can't parse date time from: {}\n".format(m.groups()[0]))
continue
if seqstart < earliest:
seqs['before'].add(seqid)
continue
if seqstart > latest:
seqs['after'].add(seqid)
continue
if not newest or seqstart < newest:
newest = seqstart
newestseq = seqid
if not oldest or seqstart > oldest:
oldest = seqstart
oldestseq = seqid
added = False
for t in times:
if seqstart > times[t][0] and seqstart <= times[t][1]:
added = True
seqs[t].add(seqid)
break
if not added:
seqs['other'].add(seqid)
sys.stderr.write("Newest sequence: {} at {}\nOldest sequence: {} at {}\n".format(
newestseq, newest, oldestseq, oldest
))
return seqs
def parse_times(timefile, ztoffset):
"""
Parse the times from the time separation file
:param timefile: the file to parse
:param ztoffset: the difference from zulu time
:return: a dict of IDs and times
"""
times = {}
lastid = None
with open(timefile, 'r') as fin:
for l in fin:
p=l.strip().split("\t")
try:
starttime = parse(p[1])
except:
sys.stderr.write("Error: could not parse start time from {}\n".format(p[1]))
continue
if ztoffset:
startime = starttime + timedelta(hours=ztoffset)
times[p[0]] = [starttime, 0]
if lastid:
times[lastid][1] = starttime
lastid = p[0]
times[lastid][1] = times[lastid][0] + timedelta(hours=48)
for t in times:
sys.stderr.write("Time: {} From: {} To: {}\n".format(t, times[t][0], times[t][1]))
return times
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Deconvolute a fastq file")
parser.add_argument('-f', help='fastq file to read', required=True)
parser.add_argument('-t', help='timestamp file', required=True)
parser.add_argument('-z', help='offset from zulu time. This number will be added to Z so use -8 on the west coast of US. Default: use Z time', type=int, default=0)
parser.add_argument('-o', help='output directory to write fastq files to. If not provided sequences not written')
parser.add_argument('-v', help='verbose output', action="store_true")
args = parser.parse_args()
times = parse_times(args.t, args.z)
seqs = split_fastq(args.f, times)
if args.o:
write_fastq(args.f, seqs, args.o)
for t in seqs:
sys.stdout.write("{}\t{}\n".format(t, "; ".join(seqs[t])))
```
#### File: EdwardsLab/GregFrederickson/parse_xls_to_taxonomy.py
```python
import os
import sys
import argparse
from openpyxl import load_workbook
from taxon import connect_to_db, get_taxonomy, get_taxid_for_name
__author__ = '<NAME>'
taxonomy_str = {}
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def parse_sheet(xlsfile, verbose=True, nocolor=False):
"""
Read the xls file and extract the data we want
:param xlsfile: the file to read
:param verbose: more output
:param nocolor: no color ouput
:return:
"""
correct_names = {"Clostridium beijerincki": "Clostridium beijerinckii",
"Caldicellulosiruptor accharolyticus": "Caldicellulosiruptor saccharolyticus",
"Chlorobium vvibrioforme": "Chlorobium vibrioforme", "Exiguobacterium UNDEF": "Exiguobacterium",
"Psychrobacter arcticum": "Psychrobacter arcticus",
"Shewanella putefaciens": "Shewanella putrefaciens",
"Magnetococcus sp.": "Magnetococcus sp. PL-5-10",
"Alkaliphillus metalliredigenes": "Geobacter metallireducens",
"Alkalilimnicola ehrlichei": "Alkalilimnicola ehrlichii",
"Silicibacter sp.": "Silicibacter sp. 1S17",
"Psychrobacter cryopegella": "Psychrobacter cryohalolentis"}
data = {}
taxa = set()
wb = load_workbook(filename = xlsfile, read_only=True)
for s in wb.get_sheet_names():
headers = []
ws=wb[s]
data[s] = {}
if verbose:
if nocolor:
sys.stderr.write(f"Parsing: {s}\n")
else:
sys.stderr.write(f"{bcolors.OKGREEN}Parsing: {s}{bcolors.ENDC}\n")
for row in ws.rows:
if not headers:
for i,j in enumerate(row):
if 0 == i and not j.value:
headers.append('taxon')
if j.value:
headers.append(j.value.lower())
else:
headers.append(None)
for col in ['gp', 'mr', 'mp', 'blastn']:
if col not in headers:
if nocolor:
sys.stderr.write(f"ERROR: no column named {col} in {s}\n")
else:
sys.stderr.write(f"{bcolors.WARNING}ERROR:{bcolors.ENDC}: no column named {col} in {s}\n")
continue
# ignore empty rows
if None == row[0].value:
continue
# save the first 8 columns
taxonname = None
for i,j in enumerate(row):
if i > 9:
break
if 0 == i:
taxonname = j.value
spaces = taxonname.count(" ")
if spaces > 1:
spacesp = taxonname.split(" ")
taxonname = spacesp[0] + " " + spacesp[1]
if taxonname in correct_names:
taxonname = correct_names[taxonname]
taxa.add(taxonname)
data[s][taxonname] = {}
else:
if not headers[i]:
continue
if not taxonname:
if nocolor:
sys.stderr.write(f"FATAL: no taxonomy name\n")
else:
sys.stderr.write(f"{bcolors.FAIL}FATAL:{bcolors.ENDC}: no taxonomy name\n")
sys.exit(-1)
data[s][taxonname][headers[i]] = j.value
return data, taxa
def resolve_taxonomy(tid, conn, verbose=False, nocolor=False):
"""
Convert the taxonomy id to a tab separated string
:param tid: the taxonomy object
:param conn: the database connection
:param verbose: more output
:param nocolor: no color ouput
:return: a string representing the taxonomy
"""
global taxonomy_str
if tid in taxonomy_str:
return taxonomy_str[tid]
wanted_levels = ['superkingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species']
rnk = ['', '', '', '', '', '', '']
t, n = get_taxonomy(tid, conn)
while t.parent != 1 and t.taxid != 1:
if t.rank in wanted_levels:
rnk[wanted_levels.index(t.rank)] = n.scientific_name
t, n = get_taxonomy(t.parent, conn)
taxonomy_str[tid] = "\t".join(rnk)
return taxonomy_str[tid]
def add_taxonomy(taxa, verbose=False, nocolor=False):
"""
Add the taxonomy to our data structure
:param taxa: the set of taxa to look up
:param verbose: more output
:return:
"""
tids = {}
taxonomy = {-1 : ['', '', '', '', '', '', '']}
db = connect_to_db("/data/ncbi/taxonomy.sqlite3")
if not db:
sys.stderr.write(f"{bcolors.FAIL}FATAL: could not connect to database{bcolors.ENDC}\n")
sys.exit(-1)
for t in taxa:
tid = get_taxid_for_name(t, db)
if tid:
taxonomy[t] = resolve_taxonomy(tid, db)
else:
sys.stderr.write(f"{bcolors.WARNING}ERROR: No taxonomy id for {t}{bcolors.ENDC}\n")
tid = -1
tids[t] = tid
return tids, taxonomy
def print_out(data, tids, taxonomy, verbose=False, nocolor=False):
"""
Print all the data out
:param data: Out data hash
:param tids: the taxonomy ids
:param taxonomy: the taxonomy itself
:param verbose: more output
:param nocolor: no color ouput
:return:
"""
print("\t".join(['Sample', 'TaxID', 'superkingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species', 'gp', 'mr', 'mp']))
for s in data:
for t in data[s]:
tid = tids[t]
tax = taxonomy[t]
tr = []
for col in ['gp', 'mr', 'mp']:
tr.append(data[s][t][col])
truth = "\t".join(map(str, tr))
sys.stdout.write(f"{s}\t{tid}\t{tax}\t{t}\t{truth}\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=' ')
parser.add_argument('-f', help='excel file', required=True)
parser.add_argument('-n', help='no color output', action='store_true')
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
data, taxa = parse_sheet(args.f, args.v, args.n)
tids, taxonomy = add_taxonomy(taxa, args.v, args.n)
print_out(data, tids, taxonomy, args.v, args.n)
```
#### File: EdwardsLab/jplacer/parse_rename_write.py
```python
import os
import sys
import argparse
import json
import re
from ete3 import Tree
from ete3.parser.newick import NewickError
from taxon import get_taxonomy_db, get_taxonomy
def load_jplacer(jpf):
"""
load the jplacer file and return the tree
:param jpf: The jplacer file
:return: the data structure of the tree
"""
with open(jpf, 'r') as f:
data = json.load(f)
return data
def explore_jplacer(data):
"""
Parse a jplacer data structure
:param the data structure from the jplacer file:
:return:
"""
# print("{}\n".format(data.keys()))
# sys.exit()
print("{}".format(data['fields']))
print("{}".format(data['placements']))
sys.exit(0)
"""
d1 = data['placements'][0]
print("{}".format(d1))
"""
for d in data['placements']:
if 'nm' in d:
print("{}".format(d))
break
"""
for d in data['placements']:
print("\n".join(d.keys()))
"""
def clean_newick_id(name):
"""
Return a version of name suitable for placement in a newick file
:param name: The name to clean up
:return: a name with no colons, spaces, etc
"""
name = name.replace(' ', '_')
name = name.replace(':', '_')
return name
def get_placements(data, distmeasure):
"""
Get the placements and return a dict with the keys being the edge numbers where to do
the insertions and the values being a dict of nodes to insert at that point and their distances.
For this purposes we multiple the distance measure by the modifier for each entry
TODO: we have not implemented the approach for a single insertion as we don't have an example of that (yet!)
:param data: the parsed jplacer tree
:param distmeasure: the distance measure to use
:return: a dict of placement edge_numbers and sets of ids to add
"""
# first make sure the tree fields are in the correct order!
posn = data['fields'].index('edge_num')
if distmeasure not in data['fields']:
sys.stderr.write("Crap. We do not have {} in our possible fields: {}\n".format(distmeasure, data['fields']))
sys.exit(-1)
distn = data['fields'].index(distmeasure)
placements = {}
for pl in data['placements']:
multiplier = {}
if 'n' in pl:
sys.stderr.write("Crap, not sure what to do because I've never seen an example. You should be able to figure out from what I did with nm\n")
sys.exit(-1)
if 'nm' in pl:
for i in pl['nm']:
multiplier[i[0].replace(' ', '_')] = i[1]
for p in pl['p']:
edge_num = p[posn]
distance = p[distn]
if edge_num not in placements:
placements[edge_num] = {}
for thisid in multiplier:
newid = clean_newick_id(thisid)
placements[edge_num][newid] = multiplier[thisid] * distance * 1.0
return placements
def parse_jplacer_tree(data):
"""
Extract the tree from the jplacer data structure and make it into an ete3 object
:param data: the jplacer data structure
:return:
"""
try:
tree = Tree(data['tree'], quoted_node_names=True, format=1)
except NewickError as n:
tt = re.sub(r'(\:[\d\.]+){\d+}', r'\1', data['tree'])
tt = re.sub(r'{\d+};$', ';', tt)
tree = Tree(tt, quoted_node_names=True, format=1)
return tree
def find_a_node(tree, nodeid):
"""
Find a specific node in the tree
:param tree: the tree to search
:param nodeid: the node id to look for. This should be a string
:type nodeid: str
:return:
"""
print("Traversing and looking for {{{}}}".format(nodeid))
for t in tree.traverse("preorder"):
if "{{{}}}".format(nodeid) in t.name:
print("Found {}".format(t.name))
t.add_child(name="NEW CHILD")
print(tree.write(format=1))
def insert_new_nodes(tree, placements, namesf=None, verbose=False):
"""
Insert the new nodes in the tree at the correct place
:param tree: The phylogenetic tree
:param placements: the list of edges to place in the right place
:return:
"""
added = set()
addednames = set()
for t in tree.traverse("postorder"):
m = re.search('{(\d+)}', t.name)
if not m:
continue
thisid = int(m.groups()[0])
if thisid in placements:
added.add(thisid)
for n in placements[thisid]:
addednames.add(n)
t.add_child(name=n, dist=placements[thisid][n])
if verbose:
for p in placements:
if p not in added:
sys.stderr.write("We did not find a node to add {}\n".format(p))
if namesf:
with open(namesf, 'w') as namesout:
for p in addednames:
namesout.write("{}\n".format(p))
return tree
def rename_leaves_taxids(tree):
"""
Rename the leaf nodes with just the NCBI taxonomy ID if we have it
:param tree: the tree to rename
:return: the tree with renamed leaves
"""
for n in tree.get_leaves():
m = re.search(r'\[(\d+)\]', n.name)
if m:
n.name = m.groups()[0]
return tree
def rename_nodes_ncbi(tree, verbose=False):
"""
Rename the nodes based on everything below me
"""
# connect to the SQL dataabase
c = get_taxonomy_db()
wanted_levels = ['superkingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species', 'subspecies']
wanted_levels.reverse() # too lazy to write in reverse :)
taxonomy = {}
# first get all the leaves and their parents. This is just to speed things up ... maybe
for l in tree.get_leaves():
m = re.search('\[(\d+)\]', l.name)
if not m:
if verbose:
sys.stderr.write("No taxid in {}\n".format(l.name))
continue
tid = m.groups()[0]
taxonomy[l.name] = {}
t,n = get_taxonomy(tid, c)
if not t:
continue
while t.parent != 1 and t.taxid != 1:
if t.rank in wanted_levels:
taxonomy[l.name][t.rank] = n.scientific_name
t,n = get_taxonomy(t.parent, c)
# now traverse every node that is not a leaf and see if we can some up with a
# unique name for the node!
if verbose:
sys.stderr.write("Traversing the tree to rename the nodes\n")
for n in tree.traverse("preorder"):
if n.is_leaf():
continue
taxs = {w:set() for w in wanted_levels}
for l in n.get_leaves():
if l.name not in taxonomy:
continue
for w in wanted_levels:
if w in taxonomy[l.name]:
taxs[w].add(taxonomy[l.name][w])
# which is the LOWEST level with a single taxonomy
for w in wanted_levels:
if len(taxs[w]) == 1:
newname = "{} r_{}".format(taxs[w].pop(), w)
if verbose:
sys.stderr.write("Changing name from: {} to {}\n".format(n.name, newname))
n.name = newname
break
return tree
def reroot_tree(tree, verbose=False):
"""
Reroot the tree between bacteria and archaea.
This will only work after renaming the leaves on the tree.
:param tree: the tree
"""
didreroot = False
if verbose:
sys.stderr.write("rerooting the tree\n")
for n in tree.traverse("preorder"):
childs = n.get_children()
if verbose:
cname = ""
for c in childs:
cname += "| {} |".format(c.name)
sys.stderr.write("{}\t{}\t{}\n".format(len(childs), n.name, cname))
if len(childs) == 2:
if ("Archaea r_superkingdom" in childs[0].name and "Eukaryota r_superkingdom" in childs[1].name) or ("Archaea r_superkingdom" in childs[1].name and "Eukaryota r_superkingdom" in childs[0].name):
tree.set_outgroup(n)
if verbose:
sys.stderr.write("Rerooted on {}\n".format(n.name))
didreroot = True
break
if "Bacteria r_superkingdom" in childs[0].name and "Archaea r_superkingdom" in childs[1].name:
tree.set_outgroup(childs[0])
if verbose:
sys.stderr.write("Rerooted on {}\n".format(childs[0].name))
didreroot = True
break
if "Bacteria r_superkingdom" in childs[1].name and "Archaea r_superkingdom" in childs[0].name:
tree.set_outgroup(childs[1])
if verbose:
sys.stderr.write("Rerooted on {}\n".format(childs[1].name))
didreroot = True
break
if not didreroot:
for n in tree.traverse("preorder"):
if "Bacteria r_superkingdom" in n.name:
tree.set_outgroup(n)
if verbose:
sys.stderr.write("Rerooted on {} because it is bacteria\n".format(n.name))
break
return tree
def write_leaves(tree, outputf):
"""
Write a list of all the leaves, one line per leaf.
:param tree: the tree
:param outputf: the file to write
:return:
"""
with open(outputf, 'w') as out:
for n in tree.get_leaves():
out.write("{}\n".format(n.name))
def write_tree(tree, outputf):
"""
Write the tree to a file.
:param tree: The tree to write
:param outputf: The output filename
:return:
"""
tree.write(outfile=outputf, format=1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parse a jplacer file')
parser.add_argument('-j', help='jplacer file', required=True)
parser.add_argument('-o', help='output file to write the tree to', required=True)
parser.add_argument('-n', help='names file for nodes that were placed onto the tree. If provided we write out those names')
parser.add_argument('-l', help='filename for a file with a list of all leaf names of the final tree')
parser.add_argument('-d', help='Distance (can be either distal_length (default) or pendant_length', default='distal_length')
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
if args.d not in ['distal_length', 'pendant_length']:
sys.stderr.write("sorry, at the moment the -d option must be either distal_length or pendant_length)\n")
sys.exit()
data = load_jplacer(args.j)
tree = parse_jplacer_tree(data)
# explore_jplacer(data)
# find_a_node(tree, "2161")
placements = get_placements(data, args.d)
tree = insert_new_nodes(tree, placements, args.n, args.v)
#tree = rename_leaves_taxids(tree)
tree = rename_nodes_ncbi(tree, args.v)
tree = reroot_tree(tree, args.v)
write_tree(tree, args.o)
if args.l:
write_leaves(tree, args.l)
```
#### File: EdwardsLab/jplacer/rename_tree.py
```python
import os
import sys
import argparse
import json
import re
from ete3 import Tree
from ete3.parser.newick import NewickError
from taxon import get_taxonomy_db, get_taxonomy
def load_jplacer(jpf, verbose=False):
"""
load the jplacer file and return the tree
:param jpf: The jplacer file
:return: the data structure of the tree
"""
with open(jpf, 'r') as f:
data = json.load(f)
return data
def parse_jplacer_tree(data, verbose=False):
"""
Extract the tree from the jplacer data structure and make it into an ete3 object
:param data: the jplacer data structure
:return:
"""
try:
tree = Tree(data['tree'], quoted_node_names=True, format=1)
except NewickError as n:
tt = re.sub(r'(\:[\d\.]+){\d+}', r'\1', data['tree'])
tt = re.sub(r'{\d+};$', ';', tt)
tree = Tree(tt, quoted_node_names=True, format=1)
return tree
def rename_nodes(tree, verbose=False):
"""
Rename the nodes based on everything below me
"""
# connect to the SQL dataabase
c = get_taxonomy_db()
wanted_levels = ['superkingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species', 'subspecies']
wanted_levels.reverse() # too lazy to write in reverse :)
taxonomy = {}
# first get all the leaves and their parents. This is just to speed things up ... maybe
for l in tree.get_leaves():
m = re.search('\[(\d+)\]', l.name)
if not m:
if verbose:
sys.stderr.write("No taxid in {}\n".format(l.name))
continue
tid = m.groups()[0]
taxonomy[l.name] = {}
t,n = get_taxonomy(tid, c)
if not t:
continue
while t.parent != 1 and t.taxid != 1:
if t.rank in wanted_levels:
taxonomy[l.name][t.rank] = n.scientific_name
t,n = get_taxonomy(t.parent, c)
# now traverse every node that is not a leaf and see if we can some up with a
# unique name for the node!
sys.stderr.write("Traversing the tree\n")
for n in tree.traverse("preorder"):
if n.is_leaf():
continue
sys.stderr.write("Checking {}\n".format(n.name))
taxs = {w:set() for w in wanted_levels}
for l in n.get_leaves():
if l.name not in taxonomy:
continue
for w in wanted_levels:
if w in taxonomy[l.name]:
taxs[w].add(taxonomy[l.name][w])
# which is the LOWEST level with a single taxonomy
for w in wanted_levels:
if len(taxs[w]) == 1:
newname = "{} r_{})".format(taxs[w].pop(), w)
if verbose:
True
sys.stderr.write("Changing name from: {} to {}\n".format(n.name, newname))
n.name = newname
break
return tree
def reroot_tree(tree):
"""
Reroot the tree between bacteria and archaea.
This will only work after renaming the leaves on the tree.
:param tree: the tree
"""
sys.stderr.write("rerooting\n")
for n in tree.traverse("preorder"):
childs = n.get_children()
cname = ""
for c in childs:
cname += "| {} |".format(c.name)
sys.stderr.write("{}\t{}\t{}\n".format(len(childs), n.name, cname))
if len(childs) == 2:
if ("Archaea r_superkingdom" in childs[0].name and "Eukaryota r_superkingdom" in childs[1].name) or ("Archaea r_superkingdom" in childs[1].name and "Eukaryota r_superkingdom" in childs[0].name):
tree.set_outgroup(n)
sys.stderr.write("Rerooted on {}\n".format(n.name))
break
if "Bacteria r_superkingdom" in childs[0].name and "Archaea r_superkingdom" in childs[1].name:
tree.set_outgroup(childs[0])
sys.stderr.write("Rerooted on {}\n".format(childs[0].name))
break
if "Bacteria r_superkingdom" in childs[1].name and "Archaea r_superkingdom" in childs[0].name:
tree.set_outgroup(childs[1])
sys.stderr.write("Rerooted on {}\n".format(childs[1].name))
break
return tree
def write_tree(tree, outputf):
"""
Write the tree to a file.
:param tree: The tree to write
:param outputf: The output filename
:return:
"""
tree.write(outfile=outputf, format=1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parse a jplacer file')
parser.add_argument('-j', help='jplacer file', required=True)
parser.add_argument('-o', help='output file to write the tree to', required=True)
parser.add_argument('-v', help='verbose', action='store_true')
args = parser.parse_args()
data = load_jplacer(args.j, args.v)
tree = parse_jplacer_tree(data, args.v)
tree = rename_nodes(tree, args.v)
tree = reroot_tree(tree)
write_tree(tree, args.o)
```
#### File: EdwardsLab/kmers/kmer_entropy3.3.py
```python
import os
import sys
import argparse
from itertools import product
import json
from math import log2
def rc(dna):
"""
Reverse complement a DNA sequence
:param dna: The DNA sequence
:type dna: str
:return: The reverse complement of the DNA sequence
:rtype: str
"""
complements = str.maketrans('acgtrymkbdhvACGTRYMKBDHV', 'tgcayrkmvhdbTGCAYRKMVHDB')
rcseq = dna.translate(complements)[::-1]
return rcseq
def stream_fasta(fastafile, whole_id=True):
"""
Stream a fasta file, one read at a time. Saves memory!
:param fastafile: The fasta file to stream
:type fastafile: str
:param whole_id: Whether to return the whole id (default) or just up to the first white space
:type whole_id:bool
:return:A single read
:rtype:str, str
"""
try:
if fastafile.endswith('.gz'):
f = gzip.open(fastafile, 'rt')
elif fastafile.endswith('.lrz'):
f = subprocess.Popen(['/usr/bin/lrunzip', '-q', '-d', '-f', '-o-', fastafile], stdout=subprocess.PIPE).stdout
else:
f = open(fastafile, 'r')
except IOError as e:
sys.stderr.write(str(e) + "\n")
sys.stderr.write("Message: \n" + str(e.message) + "\n")
sys.exit("Unable to open file " + fastafile)
posn = 0
while f:
# first line should start with >
idline = f.readline()
if not idline:
break
if not idline.startswith('>'):
sys.exit("Do not have a fasta file at: {}".format(idline))
if not whole_id:
idline = idline.split(" ")[0]
idline = idline.strip().replace('>', '', 1)
posn = f.tell()
line = f.readline()
seq = ""
while not line.startswith('>'):
seq += line.strip()
posn = f.tell()
line = f.readline()
if not line:
break
f.seek(posn)
yield idline, seq
def stream_fastq(fqfile):
"""Read a fastq file and provide an iterable of the sequence ID, the
full header, the sequence, and the quaity scores.
Note that the sequence ID is the header up until the first space,
while the header is the whole header.
"""
if fqfile.endswith('.gz'):
qin = gzip.open(fqfile, 'rt')
else:
qin = open(fqfile, 'r')
linecounter = 0
while True:
header = qin.readline()
linecounter += 1
if not header:
break
if not header.startswith("@"):
raise FastqFormatError("The file does not appear to be a four-line fastq file at line ")
header = header.strip()
seqidparts = header.split(' ')
seqid = seqidparts[0]
seqid = seqid.replace('@', '')
seq = qin.readline().strip()
linecounter += 1
qualheader = qin.readline()
if not qualheader.startswith("+"):
raise FastqFormatError("The file does not appear to be a four-line fastq file at ")
linecounter += 1
qualscores = qin.readline().strip()
linecounter += 1
header = header.replace('@', '', 1)
if len(qualscores) != len(seq):
raise FastqFormatError("The sequence and qual scores are not the same length at line")
yield seqid, header, seq, qualscores
def count_kmers(faf, type, k, jsonout=None, verbose=False):
"""
Count the kmers
:param faf: fasta file
:param type: str either fasta or fastq
:param k: kmer size
:param verbose: more output
:return: a dict of kmers
"""
if verbose:
sys.stderr.write("Counting kmers (k={}) in {}\n".format(k, faf))
kmers = {}
if type == "fasta":
for id, seq in stream_fasta(faf):
rcseq = rc(seq)
posn = 0
while posn < len(seq) - k - 1:
kmers[seq[posn:posn+k]] = kmers.get(seq[posn:posn+k], 0) + 1
kmers[rcseq[posn:posn + k]] = kmers.get(rcseq[posn:posn + k], 0) + 1
posn += 1
if type == "fastq":
for id, fullid, seq, qual in stream_fastq(faf):
rcseq = rc(seq)
posn = 0
while posn < len(seq) - k - 1:
kmers[seq[posn:posn+k]] = kmers.get(seq[posn:posn+k], 0) + 1
kmers[rcseq[posn:posn + k]] = kmers.get(rcseq[posn:posn + k], 0) + 1
posn += 1
if jsonout:
with open(jsonout, 'w') as out:
json.dump({faf : kmers}, out)
if verbose:
sys.stderr.write("\tDone counting kmers\n")
return kmers
def shannon(kmers, verbose=False):
"""
Calculate the shannon entropy
:param kmers: the kmer dictionary
:param verbose: more output
:return: the shannon entropy of the kmers
"""
if verbose:
sys.stderr.write("Calculating Shannon's Entropy\n")
t = sum(kmers.values())
H = 0
for x in kmers:
H += (kmers[x] / t) * (log2(kmers[x]/t))
return -H
def evenness(kmers, H=None, verbose=False):
"""
Calculate the evenness
:param kmers: the kmer dictionary
:param H: shannon entropy (optional). If provided, we won't recalculate
:param verbose: more output
:return: the evenness of the kmers
"""
if not H:
H = shannon(kmers, verbose)
S = len(kmers.keys())
return H/log2(S)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Count the kmers in a file and report entropy and eveness')
parser.add_argument('-f', help='fasta file to count the entropy/evenness')
parser.add_argument('-q', help='fastq file to count the entropy/evenness')
parser.add_argument('-k', help='kmer size', required=True, type=int)
parser.add_argument('-j', help='json output for kmer counts')
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
if args.f:
kmers = count_kmers(args.f, 'fasta', args.k, args.j, args.v)
elif args.q:
kmers = count_kmers(args.q, 'fastq', args.k, args.j, args.v)
else:
sys.stderr.write("FATAL: Please supply either a fasta file or a fastq file\n")
sys.exit(-1)
H = shannon(kmers, args.v)
e = evenness(kmers, H, args.v)
if args.f:
print("{}\t{}\t{}\t{}".format(args.f, args.k, H, e))
else:
print("{}\t{}\t{}\t{}".format(args.q, args.k, H, e))
```
#### File: EdwardsLab/kmers/kmer_entropy_sa.py
```python
import os
import sys
import argparse
import gzip
from math import log2
BLUE = '\033[94m'
GREEN = '\033[92m'
ENDC = '\033[0m'
RED = '\033[91m'
def rc(dna):
"""
Reverse complement a DNA sequence
:param dna: The DNA sequence
:type dna: str
:return: The reverse complement of the DNA sequence
:rtype: str
"""
complements = str.maketrans('acgtrymkbdhvACGTRYMKBDHV', 'tgcayrkmvhdbTGCAYRKMVHDB')
rcseq = dna.translate(complements)[::-1]
return rcseq
def stream_fastq(fqfile):
"""Read a fastq file and provide an iterable of the sequence ID, the
full header, the sequence, and the quaity scores.
Note that the sequence ID is the header up until the first space,
while the header is the whole header.
"""
if fqfile.endswith('.gz'):
qin = gzip.open(fqfile, 'rt')
else:
qin = open(fqfile, 'r')
while True:
header = qin.readline()
if not header:
break
header = header.strip()
seqidparts = header.split(' ')
seqid = seqidparts[0]
seqid = seqid.replace('@', '')
seq = qin.readline()
seq = seq.strip()
qualheader = qin.readline()
qualscores = qin.readline().strip()
header = header.replace('@', '', 1)
yield seqid, header, seq, qualscores
def stream_fasta(fastafile,):
"""
Stream a fasta file, one read at a time. Saves memory!
:param fastafile: The fasta file to stream
:type fastafile: str
:return:The ID, and a single read
:rtype:str, str
"""
if not os.path.exists(fastafile):
sys.stderr.write(f"{RED}FATAL: {fastafile} does not exist\n{ENDC}")
sys.exit(2)
try:
if fastafile.endswith('.gz'):
f = gzip.open(fastafile, 'rt')
else:
f = open(fastafile, 'r')
except IOError as e:
sys.stderr.write(str(e) + "\n")
sys.stderr.write("Message: \n" + str(e.message) + "\n")
sys.exit("Unable to open file " + fastafile)
posn = 0
while f:
# first line should start with >
idline = f.readline()
if not idline:
break
if not idline.startswith('>'):
sys.exit("Do not have a fasta file at: {}".format(idline))
idline = idline.strip().replace('>', '', 1)
posn = f.tell()
line = f.readline()
seq = ""
while not line.startswith('>'):
seq += line.strip()
posn = f.tell()
line = f.readline()
if not line:
break
f.seek(posn)
yield idline, seq
def count_kmers_fastq(faf, k, verbose=False):
"""
Count the kmers
:param faf: fasta file
:param k: kmer size
:param verbose: more output
:return: a dict of kmers
"""
if verbose:
sys.stderr.write(f"{GREEN}Counting kmers (k={k}) in {faf}{ENDC}\n")
if not os.path.exists(faf):
sys.stderr.write(f"{RED}FATAL: {faf} does not exist\n{ENDC}")
sys.exit(2)
kmers = {}
for id, header, seq, qual in stream_fastq(faf):
rcseq = rc(seq)
posn = 0
while posn < len(seq) - k - 1:
kmers[seq[posn:posn+k]] = kmers.get(seq[posn:posn+k], 0) + 1
kmers[rcseq[posn:posn + k]] = kmers.get(rcseq[posn:posn + k], 0) + 1
posn += 1
if verbose:
sys.stderr.write(f"{BLUE}\tDone counting kmers (k={k}) in {faf}{ENDC}\n")
return kmers
def count_kmers(faf, k, verbose=False):
"""
Count the kmers
:param faf: fasta file
:param k: kmer size
:param verbose: more output
:return: a dict of kmers
"""
if verbose:
sys.stderr.write(f"{GREEN}Counting kmers (k={k}) in {faf}{ENDC}\n")
if not os.path.exists(faf):
sys.stderr.write(f"{RED}FATAL: {faf} does not exist\n{ENDC}")
sys.exit(2)
kmers = {}
for id, seq in stream_fasta(faf):
rcseq = rc(seq)
posn = 0
while posn < len(seq) - k - 1:
kmers[seq[posn:posn+k]] = kmers.get(seq[posn:posn+k], 0) + 1
kmers[rcseq[posn:posn + k]] = kmers.get(rcseq[posn:posn + k], 0) + 1
posn += 1
if verbose:
sys.stderr.write(f"{BLUE}\tDone counting kmers (k={k}) in {faf}{ENDC}\n")
return kmers
def shannon(kmers, verbose=False):
"""
Calculate the shannon entropy
:param kmers: the kmer dictionary
:param verbose: more output
:return: the shannon entropy of the kmers
"""
if verbose:
sys.stderr.write(f"{GREEN}Calculating Shannon's Entropy{ENDC}\n")
t = sum(kmers.values())
H = 0
for x in kmers:
H += (kmers[x] / t) * (log2(kmers[x]/t))
return -H
def evenness(kmers, H=None, verbose=False):
"""
Calculate the evenness
:param kmers: the kmer dictionary
:param H: shannon entropy (optional). If provided, we won't recalculate
:param verbose: more output
:return: the evenness of the kmers
"""
if not H:
H = shannon(kmers, verbose)
S = len(kmers.keys())
return H/log2(S), log2(S)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Count the kmers in a file and report entropy and eveness')
parser.add_argument('-f', help='fasta file to count the entropy/evenness')
parser.add_argument('-q', help='fastq file to count the entropy/eveness')
parser.add_argument('-k', help='kmer size', required=True, type=int)
parser.add_argument('-t', help='print field titles in output', action='store_true')
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
filename = None
if args.q:
kmers = count_kmers_fastq(args.q, args.k, args.v)
filename = args.q
elif args.f:
kmers = count_kmers(args.f, args.k, args.v)
filename = args.f
else:
sys.stderr.write(f"{RED}FATAL: Either -f (fasta) or -q (fastq) must be specified\n{ENDC}")
sys.exit(2)
H = shannon(kmers, args.v)
e,s = evenness(kmers, H, args.v)
if args.t:
print("File\tK-mer size\tShannon's Entropy\tRichness\tEvenness")
print(f"{filename}\t{args.k}\t{H}\t{s}\t{e}")
```
#### File: EdwardsLab/kyle/kmerbias.py
```python
import os
import sys
import argparse
from itertools import combinations
def repN(n, original, sample, khash, verbose=False):
"""
Permute the string, replacing each individual occurrence of N with {A, G, C, T}
:param n: The test string
:param original: The original string
:param sample: The specific sample
:param khash: our dict of dicts
:param verbose: more output, but it is written to stderr
:return:
"""
if "N" not in n:
return khash
for base in ["A", "C", "G", "T"]:
a = n.replace('N', base, 1)
if "N" in a:
repN(a, original, sample, khash, verbose)
else:
khash[sample][original] += khash[sample][a]
if verbose:
sys.stderr.write(f"{a}\t{original}\n")
return khash
def read_kmer_counts(infile, verbose=False):
"""
Read the k-mer counts file and create a dict of dicts
:param infile: the file to read. This should be the output from GetNucFrequency_PerSeq_varK.pl
:param verbose: more output, but it is written to stderr
:return: a dict of dicts
"""
khash = {}
with open(infile, 'r') as f:
names = f.readline().strip().split("\t")
# define an empty set of dicts
khash = {x:{} for x in names[1:]}
for l in f:
# convert this to an array
p = l.strip().split("\t")
key = p[0]
for i in range(1, len(p)):
khash[names[i]][key] = float(p[i])
return khash
def process_all_kmers(kmers, khash, verbose=False):
"""
Process and normalize all the k-mers
:param kmers: an array of all the k-mers
:param khash: our dict of dicts we've parsed out
:param verbose: more output, but it is written to stderr
:return:
"""
samples = sorted(khash.keys())
print("Matrix\t{}".format("\t".join(samples)))
for k in kmers:
sys.stdout.write("{}".format(k))
for s in samples:
if k not in khash[s]:
sys.stdout.write("\t0")
continue
ksize = len(k)
score = 1
current = 0
for i in range(ksize, 0, -1):
for c in combinations(range(ksize), i):
testk = ""
startEnd = c[-1] - c[0] + 1
size = len(c)
if verbose:
sys.stderr.write(f"c: {c} Startend es {startEnd} y size {size}\n")
if startEnd > size:
# For each possible K-mer you must add the probabilities and that sum is the cell by which you will multiply.
count = c[0]
t = 0
numN = 0
while t < len(c):
if c[t] == count:
testk += k[c[t]] # append the letter
count += 1
t += 1
else:
testk += 'N'
count += 1
numN += 1
# Now you have to define the Freq value with N based on the sum of all the N components
if testk not in khash[s]:
khash[s][testk] = 0
khash = repN(testk, testk, s, khash, verbose)
else:
if verbose:
sys.stderr.write(f"Getting testk from: {c[0]} to {c[-1]} ")
for t in range(c[0], c[-1]+1):
testk += k[t]
if verbose:
sys.stderr.write(f"{t} ({testk}) ")
if verbose:
sys.stderr.write("\n")
if verbose:
sys.stderr.write(f"At this point: s: {s} testk: {testk} score: {score} khash: {khash[s][testk]} current: {current}\n")
if 0 == current:
score *= khash[s][testk]
else:
score = score / khash[s][testk]
if verbose:
sys.stderr.write(f"At this point: s: {s} testk: {testk} score: {score} khash: {khash[s][testk]} current: {current}\n")
if 0 == current:
current = 1
else:
current = 0
sys.stdout.write(f"\t{score}")
sys.stdout.write("\n")
__author__ = '<NAME>'
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Save Kyles butt')
parser.add_argument('-f', help='output file from GetNucFrequency_PerSeq_varK.pl', required=True)
parser.add_argument('-k', help='kmers to test. You can either specify multiple -k or use a comma separated list', required=True, action='append')
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
khash = read_kmer_counts(args.f)
# figure out all the kmers
allkmers = set()
for k in args.k:
allkmers.update(k.split(","))
process_all_kmers(allkmers, khash, args.v)
```
#### File: EdwardsLab/ncbi/genbank_phages_via_ftp.py
```python
import StringIO
from ftplib import FTP
import gzip
from Bio import SeqIO
r = StringIO.StringIO()
def read_data(data):
r.write(data)
ftp = FTP('ftp.ncbi.nlm.nih.gov')
ftp.login()
ftp.cwd('genbank/')
ftp.retrbinary('RETR gbphg3.seq.gz', r.write)
r.seek(0)
for seq in SeqIO.parse(gzip.GzipFile(fileobj=r), 'genbank'):
print(seq.id + "\t" + seq.)
```
#### File: EdwardsLab/ncbi/parse_sra.py
```python
import os
import sys
from bs4 import BeautifulSoup
import argparse
import roblib
__author__ = '<NAME>'
def parse_run(filename, verbose):
"""
Parse a run.xml file. We mainly retrieve the SRA ID(s) from this file
:param filename: the path and filename of the file to parse
:type filename: str
:param verbose: print more output
:type verbose: bool
:return: A set of SRA run IDs
:rtype: set
"""
sra_ids = set()
if verbose:
sys.stderr.write("Parsing run file: {}\n".format(filename))
soup = BeautifulSoup(open(filename, 'r'), 'xml')
for r in soup.RUN_SET.find_all("RUN"):
for p in r.find_all("PRIMARY_ID"):
sra_ids.add(p.text)
return sra_ids
def parse_sample(filename, verbose):
"""
Parse a sample.xml file. We mainly retrieve the metadata from this file
:param filename: the path and filename of the file to parse
:type filename: str
:param verbose: print more output
:type verbose: bool
:return: A dict of metadata
:rtype: dict
:raises: KeyError
"""
data = {}
if verbose:
sys.stderr.write("Parsing sample file: {}\n".format(filename))
soup = BeautifulSoup(open(filename, 'r'), 'xml')
for sample in soup.SAMPLE_SET.find_all("SAMPLE"):
# get the identifier
identifiers = sample.find("IDENTIFIERS")
if identifiers:
data['primary_id'] = identifiers.find("PRIMARY_ID").text
else:
raise KeyError("FATAL: no IDENTIFIERS tag found in {}".format(filename))
# get the title
title = sample.find("TITLE")
if title:
data['title'] = title.text
# get the sample information
si = sample.find('SAMPLE_NAME')
if si:
sin = si.find('SCIENTIFIC_NAME')
if sin:
data['scientific_name'] = sin.text
sin = si.find('TAXON_ID')
if sin:
data['taxon_id'] = sin.text
xrefs= []
for sls in sample.find_all("SAMPLE_LINKS"):
for sl in sls.find_all("SAMPLE_LINK"):
for xr in sl.find_all("XREF_LINK"):
xrefs.append(xr.find("DB").text + "|" + xr.find("ID").text)
data['xref'] = "; ".join(xrefs)
for sas in sample.find_all("SAMPLE_ATTRIBUTES"):
for sa in sas.find_all("SAMPLE_ATTRIBUTE"):
tag = sa.find("TAG")
val = sa.find("VALUE")
if tag and val:
data[tag.text] = val.text
elif tag:
sys.stderr.write("Found a tag {} for {} but no value\n".format(tag, filename))
elif val:
sys.stderr.write("Found a value {} for {} but no tag\n".format(val, filename))
return data
def parse_directory(directory, verbose):
"""
Parse a directory and print out the data from the XML files therein
:param directory: The path of the directory to parse
:type directory: str
:param verbose: print more output
:type verbose: bool
:return: dictionary of results
:rtype: dict
"""
if not os.path.exists(directory):
raise IOError("FATAL: {} does not exist".format(directory))
files = os.listdir(directory)
runxmlfile = None
samplefile = None
# find the run.xml file
for f in files:
if f.endswith('run.xml'):
if runxmlfile:
sys.stderr.write("Crap, have two run.xml files\n")
runxmlfile = f
if f.endswith("sample.xml"):
if samplefile:
sys.stderr.write("Crap, have two sample.xml files\n")
samplefile = f
if not samplefile or not runxmlfile:
return None
data = {}
if samplefile:
data = parse_sample(os.path.join(directory, samplefile), verbose)
else:
sys.stderr.write("No sample.xml file for {}\n".format(directory))
if runxmlfile:
data['sra_ids'] = "; ".join(parse_run(os.path.join(directory, runxmlfile), verbose))
else:
sys.stderr.write("No run.xml file for {}\n".format(directory))
return data
def parse_parent_directory(directory, verbose):
"""
Parse the upper level parent directory of directories
:param directory: directory name - the directory of directories
:type directory: str
:param verbose: print more output
:type verbose: bool
:return:
:rtype:
"""
if not os.path.exists(directory):
raise IOError("FATAL: {} does not exist".format(directory))
files = os.listdir(directory)
results = {}
tags = set()
for f in files:
if os.path.isdir(os.path.join(directory, f)):
res = parse_directory(os.path.join(directory, f), verbose)
if not res:
continue
results[f] = res
for k in res:
tags.add(k)
else:
sys.stderr.write("Skipped {} because it is not a directory\n".format(f))
alltags = ['primary_id', 'title', 'scientific_name', 'taxon_id', 'xref', 'sra_ids']
tags.difference_update(set(alltags))
[alltags.append(x) for x in sorted(tags)]
alltags = [roblib.ascii_clean(x) for x in alltags]
print("DIRECTORY\t"+"\t".join(alltags))
for readid in results:
sys.stdout.write(readid)
for t in alltags:
sys.stdout.write("\t" + str(roblib.ascii_clean(results[readid].get(t, ""))))
sys.stdout.write("\n")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parse a directory of directories of metadata. You can download the metadata tarball from ftp://ftp-trace.ncbi.nlm.nih.gov/sra/reports/Metadata/')
parser.add_argument('-d', help='directory of directories of XML files', required=True)
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
parse_parent_directory(args.d, args.v)
```
#### File: EdwardsLab/ncbi/tax2spreadsheet.py
```python
import taxon
taxa=taxon.read_nodes()
names,blastname = taxon.read_names()
divs = taxon.read_divisions()
want = ['species', 'genus', 'family', 'order', 'class', 'phylum', 'kingdom']
def printtaxa(i):
bn=names[i].name
if i in blastname:
bn=blastname[i].name
level={}
node = i
while taxa[node].parent != '1' and node != '1':
if taxa[node].rank in want:
level[taxa[node].rank]=names[node].name
node=taxa[node].parent
print("{}\t{}".format(i, bn), end="")
for l in want:
if l in level:
print("\t{}".format(level[l]), end="")
else:
print("\t-", end="")
print("")
# levels: species genus family order class phylum kingdom
print ("id\tname", end="")
for l in want:
print("\t{}".format(l), end="")
print("")
for i in taxa:
if taxa[i].rank == "species":
printtaxa(i)
```
#### File: EdwardsLab/percent_pairwise_identity/pairwise_percent_ids.py
```python
import os
import sys
import argparse
def read_fasta(filename):
seq = {}
seqid = None
prot = ''
with open(filename, 'r') as f:
for l in f:
if l.startswith('>'):
if seqid:
seq[seqid] = prot
prot = ''
seqid = l.strip().replace('>', '')
else:
prot += l.strip()
seq[seqid] = prot
return seq
def pairwise(seqs):
allseqs = seqs.keys()
allseqs.sort()
for i in range(len(allseqs)):
for j in range(len(allseqs)):
maxp = max(len(seqs[allseqs[i]]), len(seqs[allseqs[j]]))
same = 0
diff = 0
for p in range(maxp):
if seqs[allseqs[i]][p] == '-' and seqs[allseqs[j]][p] == '-':
pass
if seqs[allseqs[i]][p] == seqs[allseqs[j]][p]:
same += 1
else:
diff += 1
print("{}\t{}\t{}".format(allseqs[i], allseqs[j], (1.0 * same/(same+diff))*100))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Calculate the percent pairwise ID between all pairs of sequences")
parser.add_argument('-f', help='protein fasta file', required=True)
args = parser.parse_args()
sq = read_fasta(args.f)
pairwise(sq)
```
#### File: EdwardsLab/roblib/rob_error.py
```python
class Error(Exception):
"""
Base class for exceptions in this module.
"""
pass
class SequencePairError(Error):
"""
Exception raised for sequences not being paired properly.
:param message: explanation of the error
"""
def __init__(self, message):
self.message = message
class FastqFormatError(Error):
"""
Exception raised for sequences not being paired properly.
:param message: explanation of the error
"""
def __init__(self, message):
self.message = message
```
#### File: EdwardsLab/roblib/translate.py
```python
import sys
"""
Translate and back translate DNA to protein
"""
aa_1_to_3_letter ={"A" : "Ala", "C" : "Cys", "D" : "Asp", "E" : "Glu", "F" : "Phe", "G" : "Gly", "H" : "His",
"I" : "Ile", "K" : "Lys", "L" : "Leu", "M" : "Met", "N" : "Asn", "P" : "Pro", "Q" : "Gln",
"R" : "Arg", "S" : "Ser", "T" : "Thr", "V" : "Val", "W" : "Trp", "Y" : "Tyr", "*" : "Stop"}
aa_1_letter_order = "A C D E F G H I K L M N P Q R S T V W Y".split() # Alpha by 1 letter
aa_3_letter_order = "A R N D C Q E G H I L K M F P S T W Y V".split() # PAM matrix order
aa_n_codon_order = "L R S A G P T V I C D E F H K N Q Y M W".split()
genetic_code = {
# DNA version
"TTT": 'F', "TCT": 'S', "TAT": 'Y', "TGT": 'C',
"TTC": 'F', "TCC": 'S', "TAC": 'Y', "TGC": 'C',
"TTA": 'L', "TCA": 'S', "TAA": '*', "TGA": '*',
"TTG": 'L', "TCG": 'S', "TAG": '*', "TGG": 'W',
"CTT": 'L', "CCT": 'P', "CAT": 'H', "CGT": 'R',
"CTC": 'L', "CCC": 'P', "CAC": 'H', "CGC": 'R',
"CTA": 'L', "CCA": 'P', "CAA": 'Q', "CGA": 'R',
"CTG": 'L', "CCG": 'P', "CAG": 'Q', "CGG": 'R',
"ATT": 'I', "ACT": 'T', "AAT": 'N', "AGT": 'S',
"ATC": 'I', "ACC": 'T', "AAC": 'N', "AGC": 'S',
"ATA": 'I', "ACA": 'T', "AAA": 'K', "AGA": 'R',
"ATG": 'M', "ACG": 'T', "AAG": 'K', "AGG": 'R',
"GTT": 'V', "GCT": 'A', "GAT": 'D', "GGT": 'G',
"GTC": 'V', "GCC": 'A', "GAC": 'D', "GGC": 'G',
"GTA": 'V', "GCA": 'A', "GAA": 'E', "GGA": 'G',
"GTG": 'V', "GCG": 'A', "GAG": 'E', "GGG": 'G',
# The following ambiguous encodings are not necessary, but
# speed up the processing of some ambiguous triplets:
"TTY": 'F', "TCY": 'S', "TAY": 'Y', "TGY": 'C',
"TTR": 'L', "TCR": 'S', "TAR": '*',
"TCN": 'S',
"CTY": 'L', "CCY": 'P', "CAY": 'H', "CGY": 'R',
"CTR": 'L', "CCR": 'P', "CAR": 'Q', "CGR": 'R',
"CTN": 'L', "CCN": 'P', "CGN": 'R',
"ATY": 'I', "ACY": 'T', "AAY": 'N', "AGY": 'S',
"ACR": 'T', "AAR": 'K', "AGR": 'R',
"ACN": 'T',
"GTY": 'V', "GCY": 'A', "GAY": 'D', "GGY": 'G',
"GTR": 'V', "GCR": 'A', "GAR": 'E', "GGR": 'G',
"GTN": 'V', "GCN": 'A', "GGN": 'G'
}
amino_acid_codons_DNA = {
"L": "TTA TTG CTA CTG CTT CTC".split(),
"R": "AGA AGG CGA CGG CGT CGC".split(),
"S": "AGT AGC TCA TCG TCT TCC".split(),
"A": "GCA GCG GCT GCC".split(),
"G": "GGA GGG GGT GGC".split(),
"P": "CCA CCG CCT CCC".split(),
"T": "ACA ACG ACT ACC".split(),
"V": "GTA GTG GTT GTC".split(),
"I": "ATA ATT ATC".split(),
"C": "TGT TGC".split(),
"D": "GAT GAC".split(),
"E": "GAA GAG".split(),
"F": "TTT TTC".split(),
"H": "CAT CAC".split(),
"K": "AAA AAG".split(),
"N": "AAT AAC".split(),
"Q": "CAA CAG".split(),
"Y": "TAT TAC".split(),
"M": "ATG".split(),
"U": "TGA".split(),
"W": "TGG".split(),
"l": "tta ttg cta ctg ctt ctc".split(),
"r": "aga agg cga cgg cgt cgc".split(),
"s": "agt agc tca tcg tct tcc".split(),
"a": "gca gcg gct gcc".split(),
"g": "gga ggg ggt ggc".split(),
"p": "cca ccg cct ccc".split(),
"t": "aca acg act acc".split(),
"v": "gta gtg gtt gtc".split(),
"i": "ata att atc".split(),
"c": "tgt tgc".split(),
"d": "gat gac".split(),
"e": "gaa gag".split(),
"f": "ttt ttc".split(),
"h": "cat cac".split(),
"k": "aaa aag".split(),
"n": "aat aac".split(),
"q": "caa cag".split(),
"y": "tat tac".split(),
"m": "atg".split(),
"u": "tga".split(),
"w": "tgg".split(),
'*': "TAA TAG TGA".split()
}
def translate_dna(sequence, verbose=False):
"""
Translate a DNA sequence and return a protein string
:param sequence: The DNA sequence to translate
:param verbose: More output
:return: a protein string
"""
posn=0
trans=""
while posn < len(sequence)-3:
codon = sequence[posn:posn+3]
if codon not in genetic_code:
sys.stderr.write("Uknown codon: {}\n".format(codon))
trans += "X"
continue
trans += genetic_code[codon]
posn += 3
return trans
def print_codon_table(ambiguous=False):
"""
Print the codon usage table
:param ambiguous: Print codons with ambiguous bases
:return:
"""
c = sorted(genetic_code.keys())
for codon in c:
test = codon.replace('A', '')
test = test.replace('G', '')
test = test.replace('C', '')
test = test.replace('T', '')
if test and not ambiguous:
continue
print("{}\t{}".format(codon, aa_1_to_3_letter[genetic_code[codon]]))
```
#### File: EdwardsLab/thea/orf_evidence.py
```python
import os
import sys
import argparse
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def read_ids(idf):
"""
Read the ID file
:param idf: an id file that has HOW\tGene
:return: a hash of gene->how called
"""
ids={}
with open(idf, 'r') as f:
for l in f:
p = l.strip().split("\t")
if p[1] in ids:
sys.stderr.write('Error: found {} more than once in {}\n'.format(p[1], idf))
ids[p[1]]=p[0]
return ids
def normalized_bitscore(m8file):
"""
Parse a rapsearch m8 file and calculate the normalized bit score for each entry
:param m8file:
:return: a hash of subject id->normalized bit score
"""
nbs = {}
with open(m8file, 'r') as f:
for l in f:
if l.startswith("#"):
continue
# Fields: Query Subject identity aln-len mismatch gap-openings q.start q.end s.start s.end log(e-value) bit-score
p = l.strip().split("\t")
if len(p) != 12:
sys.stderr.write("Skipped apparently broken line in {}: {}".format(f, l))
continue
s = p[1]
l = int(p[3])
b = float(p[11])
nbs[p[1]] = b/l
# nbs[p[1]] = b
return nbs
def read_directory(dir):
"""
Read a directory of files and put all the data in memory (eek!)
:param dir: Directory of files to read
:return:
"""
data = {}
for d in os.listdir(dir):
nbs = normalized_bitscore(os.path.join(dir, d))
data.update(nbs)
return data
def separate(nbs, ids):
"""
Separate the normalized bitscore into all predictions, thea predictions, no predictions
:param nbs: hash of normalized bit score
:param ids: hash of IDs
:return:
"""
allp = []
thea = []
nonp = []
for i in nbs:
if i not in ids:
continue
if ids[i] == "ANY":
allp.append(nbs[i])
elif ids[i] == "THEA":
thea.append(nbs[i])
elif ids[i] == "NONE":
nonp.append(nbs[i])
else:
sys.stderr.write("Not really sure what {} is for {}\n".format(ids[i], i))
return allp, thea, nonp
def plot_nbs(allp, thea, nop, figf):
"""
Plot the data for all predictions, theapredictions and nopredictions
:param allp: all predictions
:param thea: thea predictions
:param nop: orf predictions (not called)
:param figf: the output file name for the figure
:return:
"""
alldata = [allp, thea, nop]
labels = ["All", "THEA", "None"]
sys.stderr.write("Lengths: All: {} THEA: {} NONE: {}\n".format(len(allp), len(thea), len(nop)))
fig = plt.figure()
# add space at the bottom for the labels
ax = fig.add_subplot(111)
#ax.boxplot(alldata)
ax.violinplot(alldata, [1,2,3], showmeans=True)
ax.set_xlabel("Predictions")
ax.set_ylabel("Normalized bit score")
ax.set_xticks([1,2,3])
ax.set_xticklabels(labels)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
fig.set_facecolor('white')
# plt.show()
fig.savefig(figf)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Calculate the normalized bit score")
parser.add_argument('-m', help='m8 output file from rapsearch')
parser.add_argument('-d', help='directory of m8 files')
parser.add_argument('-i', help='ids file', required=True)
parser.add_argument('-f', help='figure output file name', default="fig.png")
parser.add_argument('-v', help='verbose output')
args = parser.parse_args()
ids = read_ids(args.i)
nbs = {}
if args.m:
nbs = normalized_bitscore(args.m)
elif args.d:
nbs = read_directory(args.d)
else:
sys.stderr.write("ERROR: either -d or -m must be supplied. Use -h for help")
sys.exit(-1)
a, t, n = separate(nbs, ids)
plot_nbs(a,t,n,args.f)
```
#### File: EdwardsLab/trees/dist_matrix.py
```python
import sys
import itertools
from ete3 import Tree
try:
t = Tree()
t.populate(int(sys.argv[1]), random_branches=True)
except ValueError:
print >>sys.stderr, 'loading', sys.argv[1]
t = Tree(sys.argv[1])
lineages = {}
for tip in t:
lin = []
n = tip
while n.up:
lin.append(n)
n = n.up
lineages[tip.name] = set(lin)
matrix = {}
def get_dist(a, b):
if a == b:
return 0.0
try:
return matrix[(a, b)]
except KeyError:
return matrix[(b, a)]
for tip_a, tip_b in itertools.permutations(lineages.keys(), 2):
d = sum([n.dist for n in lineages[tip_a] ^ lineages[tip_b]])
matrix[(tip_a, tip_b)] = d
#if len(matrix) % 10000 == 0:
# print >>sys.stderr, len(matrix)
leaves = t.get_leaf_names()
print '\t'.join(['#names'] + leaves)
for tip_a in leaves:
row = [tip_a]
for tip_b in leaves:
row.append(get_dist(tip_a, tip_b))
print '\t'.join(map(str, row))
# test
import random
s = random.sample(matrix.keys(), 1000)
for a,b in s:
d0 = get_dist(a, b)
d1 = t.get_distance(a, b)
if round(d0, 8) != round(d1, 8):
print >>sys.stderr, a, b, d0, d1
```
#### File: EdwardsLab/trees/tree_to_pairwisedistance.py
```python
import os
import sys
import argparse
from itertools import combinations
from ete3 import Tree
def make_dists(treefile, printone, verbose):
"""
Create pairwise distances from a tree file
:param treefile: the tree file to parse
:param printone: if true we only print one copy of the pair (ie. A -> B). If false we print A->B and B->A
:param verbose: make some additional output
:return:
"""
tree = Tree(treefile)
leaves = tree.get_leaves()
paths = {x:set() for x in leaves}
# get the paths going up the tree
# we get all the nodes up to the last one and store them in a set
if verbose:
sys.stderr.write("Precalculating distances\n")
for n in leaves:
if n.is_root():
continue
movingnode = n
while not movingnode.is_root():
paths[n].add(movingnode)
movingnode = movingnode.up
# now we want to get all pairs of nodes using itertools combinations. We need AB AC etc but don't need BA CA
leaf_distances = {x.name:{} for x in leaves}
if verbose:
sys.stderr.write("Iterating over the leaves\n")
for (leaf1, leaf2) in combinations(leaves, 2):
# figure out the unique nodes in the path
uniquenodes = paths[leaf1] ^ paths[leaf2]
distance = sum(x.dist for x in uniquenodes)
if printone:
if leaf1.name < leaf2.name:
print("{}\t{}\t{}".format(leaf1.name, leaf2.name, distance))
else:
print("{}\t{}\t{}".format(leaf2.name, leaf1.name, distance))
else:
print("{}\t{}\t{}".format(leaf1.name, leaf2.name, distance))
print("{}\t{}\t{}".format(leaf2.name, leaf1.name, distance))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert a tree into a distance matrix')
parser.add_argument('-t', help='Tree file', required=True)
parser.add_argument('-p', help='Print one direction (A->B). Default is to print A->B and B->A', action='store_true')
parser.add_argument('-v', help='Verbose output. (Mostly progress)', action='store_true')
args = parser.parse_args()
make_dists(args.t, args.p, args.v)
``` |
{
"source": "JohnEdChristensen/NiggliOptimize",
"score": 3
} |
#### File: NiggliOptimize/spHNF_manip/matrix_tools.py
```python
import numpy
import itertools
import os
def calculate_transform(transform, basis):
# basis = numpy.transpose(basis)
transformed_matrix = numpy.dot(transform, basis)
# return numpy.transpose(transformed_matrix).tolist()
return transformed_matrix
def create_nxn_matrices(n):
"""
Returns a list of all possible nxn matrices with coefficients of
-1, 0, or 1
list contains 3^n elements
"""
matrix_form = []
matrices_list = itertools.product({-1, 0, 1}, repeat=(n * n))
for matrix in matrices_list:
matrix_form.append(numpy.reshape(numpy.array(matrix), (n, n)))
print "Created " + str(len(matrix_form)) + " Matrices"
return matrix_form
def create_matrix(s1, s2, s3):
"""
takes in 3 strings of 3 numbers seperated by commas
returns a 3x3 matrix
"""
row1 = s1.split()
row1 = [float(i) for i in row1]
row2 = s2.split()
row2 = [float(i) for i in row2]
row3 = s3.split()
row3 = [float(i) for i in row3]
matrix = [row1, row2, row3]
return matrix
def det_is_n(expected_determinant, m):
"""
returns a 1 if nxn matrix m has a determinant equal to expectedDeterminant
returns a zero otherwise
"""
det = numpy.linalg.det(m)
return det == expected_determinant
def save_matrix(matrix_in, write_location):
numpy.savetxt(write_location, numpy.vstack(matrix_in), fmt='%-8f')
print "Saved " + str(len(matrix_in)) + " Matrices to " + write_location
def edit_struct_enum(struct_path, matrix):
with open(struct_path, 'r') as struct_enum:
struct_data = struct_enum.readlines()
struct_data[2] = str(matrix[0][0]) + "\t" + str(matrix[0][1]) + "\t" + str(matrix[0][2]) + "\n"
struct_data[3] = str(matrix[1][0]) + "\t" + str(matrix[1][1]) + "\t" + str(matrix[1][2]) + "\n"
struct_data[4] = str(matrix[2][0]) + "\t" + str(matrix[2][1]) + "\t" + str(matrix[2][2]) + "\n"
with open(struct_path, 'w') as struct_enum:
struct_enum.writelines(struct_data)
def read_pg_out(pgx_path):
pgx_path = open(pgx_path, "r")
size = float(pgx_path.readline().split()[2])
if size == 2 or size == 4 or size == 48 or size == 12:
size = 2
elif size == 8 or size == 16:
size = 3
else:
print "ERROR PG SIZE UNKNOWN: " + str(size)
matrix_list = []
for i in range(0, int(size)):
pgx_path.readline()
row1 = pgx_path.readline()
row2 = pgx_path.readline()
row3 = pgx_path.readline()
matrix_list.append(create_matrix(row1, row2, row3))
int_matrix_list = []
for e in matrix_list:
int_matrix_list.append(matrix_float_to_int(e))
return int_matrix_list
def matrix_float_to_int(matrix):
for i in range(0, 3):
for j in range(0, 3):
matrix[i][j] = int(round(matrix[i][j], 0))
return matrix
def generate_pg(basis): # pragma: no cover
edit_struct_enum("struct_enum.in", basis)
os.system("pg.x > pgx_out.txt")
return read_pg_out("pgx_out.txt")
def check_similarities(m_list1, m_list2):
similarities = 0
n = 0
for i in range(0, len(m_list1)):
for j in range(n, len(m_list2)):
n += 1
if numpy.array_equal(m_list1[i], m_list2[j]):
similarities += 1
return similarities
def find_equivalent_basis(basis, degenerate_label): # pragma: no cover
"""
takes in basis and the path to the degenerate point groups and transformations,
finds which index of point group is equal to the PG of the basis and then returns that index number
of the transform list
"""
pg = generate_pg(basis)
transform_data = numpy.loadtxt("Data/NiggliTransforms/" + degenerate_label + "_Transformed.txt")
transform_data = numpy.reshape(transform_data, (6960, 3, 3))
pg_data = numpy.loadtxt("Data/NiggliPGs/" + degenerate_label + "_PGs.txt")
size = len(pg_data) / 6960 / 3
pg_data = numpy.reshape(pg_data, (6960, size, 3, 3))
for i in range(0, len(pg_data)):
if numpy.array_equal(pg, pg_data[i]):
return transform_data[i]
return 0
def load_pg_list(label):
pg_data = numpy.loadtxt("Data/NiggliPGs/" + label + "_PGs.txt")
size = len(pg_data) / 6960 / 3
pg_data = numpy.reshape(pg_data, (6960, size, 3, 3))
return pg_data
def load_transform_list(label):
transform_data = numpy.loadtxt("Data/NiggliTransforms/" + label + "_Transformed.txt")
#size = len(transform_data) / 6960 / 3
transform_data = numpy.reshape(transform_data, (6960, 3, 3))
return transform_data
def is_one(pg):
is_one = 1
for i in pg:
for k in i:
for j in k:
for l in k:
if l != 1 and l != -1 and l != 0:
is_one = 0
return is_one
def get_URT(pg):
""" takes in 2 or 3 3x3 matricies
returns one if all have 0's in the upper rigth of the matrix
returns a 0 otherwise
"""
size = len(pg)
if size == 2:
condition12 = (pg[0][0][1] == 0) and (pg[1][0][1] == 0)
condition13 = (pg[0][0][2] == 0) and (pg[1][0][2] == 0)
condition23 = (pg[0][1][2] == 0) and (pg[1][1][2] == 0)
if condition12 and condition13 and condition23:
return 1
if size == 3:
condition12 = (pg[0][0][1] == 0) and (pg[1][0][1] == 0) and (pg[2][0][1] == 0)
condition13 = (pg[0][0][2] == 0) and (pg[1][0][2] == 0) and (pg[2][0][2] == 0)
condition23 = (pg[0][1][2] == 0) and (pg[1][1][2] == 0) and (pg[2][1][2] == 0)
if condition12 and condition13 and condition23:
return 1
return 0
def get_simple_pgs(pg):
""" takes in 2 or 3 3x3 matricies
returns one if all have 0's in positions x12 and x13
returns a 0 otherwise
"""
size = len(pg)
if size == 2:
condition12 = (pg[0][0][1] == 0) and (pg[1][0][1] == 0)
condition13 = (pg[0][0][2] == 0) and (pg[1][0][2] == 0)
if condition12 and condition13:
return 1
if size == 3:
condition12 = (pg[0][0][1] == 0) and (pg[1][0][1] == 0) and (pg[2][0][1] == 0)
condition13 = (pg[0][0][2] == 0) and (pg[1][0][2] == 0) and (pg[2][0][2] == 0)
if condition12 and condition13:
return 1
return 0
```
#### File: NiggliOptimize/tests/test_sphnf.py
```python
import pytest
import numpy as np
"""
def test_mono_39():
from pg_comp.base_mono import *
with open("tests/test_output/base_mono_1_200_n.out","r") as f:
n_500 = int(f.readline().strip())
srHNFs = []
for n in range(1,201):
temp = base_mono_37_39(n)
for t in temp:
if len(t) >0:
srHNFs.append(t)
assert len(srHNFs) == n_500
brute = []
with open("tests/test_output/base_mono_39_1_200_srHNFs.out","r") as f:
HNF = []
for line in f:
if len(line.strip().split()) == 0:
brute.append(HNF)
HNF = []
else:
HNF.append([int(i) for i in line.strip().split()])
for t in srHNFs:
assert t in brute
def test_mono_29():
from pg_comp.base_mono import *
with open("tests/test_output/base_mono_1_200_n.out","r") as f:
n_500 = int(f.readline().strip())
srHNFs = []
for n in range(1,201):
temp = base_mono_29_30(n)
for t in temp:
if len(t) >0:
srHNFs.append(t)
assert len(srHNFs) == n_500
brute = []
with open("tests/test_output/base_mono_29_1_200_srHNFs.out","r") as f:
HNF = []
for line in f:
if len(line.strip().split()) == 0:
brute.append(HNF)
HNF = []
else:
HNF.append([int(i) for i in line.strip().split()])
for t in srHNFs:
assert t in brute
def test_mono_28():
from pg_comp.base_mono import *
with open("tests/test_output/base_mono_1_200_n.out","r") as f:
n_500 = int(f.readline().strip())
srHNFs = []
for n in range(1,201):
temp = base_mono_28(n)
for t in temp:
if len(t) >0:
srHNFs.append(t)
assert len(srHNFs) == n_500
brute = []
with open("tests/test_output/base_mono_28_1_200_srHNFs.out","r") as f:
HNF = []
for line in f:
if len(line.strip().split()) == 0:
brute.append(HNF)
HNF = []
else:
HNF.append([int(i) for i in line.strip().split()])
for t in srHNFs:
assert t in brute
"""
``` |
{
"source": "johnedstone/how-to-use-pyvmomi",
"score": 2
} |
#### File: pyvmomi_restapi/vm_helpers/utils.py
```python
import logging
from django.conf import settings
logger = logging.getLogger(settings.PROJECT_LOGGING)
STATE_CHOICES = [choice[0] for choice in settings.STATE_CHOICES]
CHOICES = " or ".join(STATE_CHOICES)
def check_state(request):
detail = {}
state_data = request.data.get('state', None)
iso_path_data = request.data.get('iso_path', None)
vmname_data = request.data.get('vmname', None)
vsphere_service_data = request.data.get('vsphere_service', None)
logger.info('state_data: {}'.format(state_data))
if not state_data:
detail['state'] = ["""This field is required: {} """.format(CHOICES)]
if not vmname_data:
detail['vmname'] = ["""This field is required"""]
if not vsphere_service_data:
detail['vsphere_service'] = ["""This field is required"""]
if state_data and state_data not in STATE_CHOICES:
detail['state'] = ["""{} is not a valid choice: {} """.format(state_data, CHOICES)]
if state_data and state_data == 'mount' and not iso_path_data:
detail['iso_path'] = ["""This field is required when state = mount"""]
if state_data and state_data == 'umount' and iso_path_data:
detail['iso_path'] = ["""This field is not required when state = umount"""]
return detail
# vim: ai et ts=4 sw=4 sts=4 nu ru
``` |
{
"source": "Johne-DuChene/data_science_learning_app",
"score": 3
} |
#### File: data_science_learning_app/data_science_app/app.py
```python
from flask import Flask
# initialize the app
app = Flask(__name__)
# execute iris function at /iris route
@app.route("/iris")
def iris():
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
X, y = load_iris(return_X_y=True)
clf = LogisticRegression(
random_state = 42,
solver="lbfgs",
multi_class="multinomial"
).fit(X, y)
return str(clf.predict(X[:2, :]))
``` |
{
"source": "Johne-DuChene/spotify_notebook",
"score": 3
} |
#### File: spotify_notebook/main/main.py
```python
import spotipy
from spotipy.oauth2 import SpotifyOAuth
# to get environment variables.
import os
from dotenv import load_dotenv
# We'll be using the defaultdict datatype from the collections
# module as it never raises a key error.
from collections import defaultdict
import pandas as pd
# read our .env file (if present)
load_dotenv()
# I pull my credentials from my environment.
SPOTIPY_CLIENT_ID = os.getenv("SPOTIPY_CLIENT_ID")
SPOTIPY_CLIENT_SECRET = os.getenv("CLIENTSECRET")
SPOTIPY_REDIRECT_URI = os.getenv("SPOTIPY_REDIRECT_URI")
# this is essentially our cursor object. We specify the auth manager and pass in our
# credentials, which should be saved as environment variables.
sp = spotipy.Spotify(auth_manager = SpotifyOAuth(client_id = SPOTIPY_CLIENT_ID,
client_secret = SPOTIPY_CLIENT_SECRET, redirect_uri = SPOTIPY_REDIRECT_URI,
scope = "user-read-recently-played"))
# this function is to be used in case a song is absent from the df.
def find_song(name, year):
'''Returns a dataframe for a song given its name and year.'''
song_data = defaultdict()
# here we execute a query with the SpotiPy search function. The q parameter accepts a string
# and can be one of the following values: album, artist, track, year, upc,
# tag:hipster, tag:new, isrc, and genre.
results = sp.search(q = 'track: {} year: {}'.format(name, year), limit = 1)
# if null, return nothing.
if results['tracks']["items"] == []:
return None
# results is a dictionary which can be indexed
results = results['tracks']["items"][0]
# id is pulled
id = results["id"]
# now we use the id to query the api for audio features
audio_features = sp.audio_features(id)[0]
song_data['name'] = [name]
song_data['year'] = [year]
song_data['explicit'] = [int(results['explicit'])]
song_data['duration_ms'] = [results['duration_ms']]
song_data['popularity'] = [results['popularity']]
for key, value in audio_features.items():
song_data[key] = value
return pd.DataFrame(song_data).to_csv("song_data.csv")
results = sp.current_user_recently_played()
for idx, item in enumerate(results['items']):
track = item['track']
print(idx, track['artists'][0]['name'], " – ", track['name'])
``` |
{
"source": "johnegarza/AGFusion",
"score": 3
} |
#### File: AGFusion/agfusion/cli.py
```python
from os.path import split, exists, join
from os import mkdir, remove
import argparse
import gzip
import shutil
from future.standard_library import install_aliases
install_aliases()
from urllib.request import urlopen
from urllib.error import HTTPError
import pyensembl
import agfusion
from agfusion import exceptions
from agfusion.utils import AGFUSION_DB_URL, AVAILABLE_ENSEMBL_SPECIES, GENOME_SHORTCUTS
def list_available_databases():
"""
List the available databases that can be downloaded.
"""
print('\n')
print(
'{:<10}\t\t{:<5}\t\t{:<20}'
.format('Species', 'Release', 'Shortcut(s)')
)
for species, releases in AVAILABLE_ENSEMBL_SPECIES.items():
for release in releases:
shortcut = []
for genome, data in GENOME_SHORTCUTS.items():
if species in data and release in data:
shortcut.append(genome)
print(
'{:<10}\t\t{:<5}\t\t{:<20}'
.format(species, release, ','.join(shortcut))
)
exit()
def downloaddb(args):
"""
Download the AGFusion database from github
"""
if args.genome is not None:
if args.genome not in GENOME_SHORTCUTS:
print('Invalid genome shortcut! Use -a to see available shortcuts.')
exit()
else:
species = GENOME_SHORTCUTS[args.genome][0]
release = str(GENOME_SHORTCUTS[args.genome][1])
else:
if args.species is None or args.release is None:
print("Specify --species and --release or --genome!")
exit()
species = args.species
release = str(args.release)
file_path = join(
args.dir,
'agfusion.' + species + '.' + release + '.db.gz')
print("Downloading the AGFusion database to {}...".format(file_path))
db_url = AGFUSION_DB_URL + species + '.' + release + '.db.gz'
try:
response = urlopen(db_url)
except HTTPError:
print("Was unable to downloade the file {}!".format(db_url))
exit()
fout = open(file_path, 'wb')
fout.write(response.read())
fout.close()
with gzip.open(file_path, 'rb') as f_in, open(file_path.replace('.gz', ''), 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
remove(file_path)
def annotate(gene5prime, junction5prime, gene3prime, junction3prime,
agfusion_db, pyensembl_data, args, outdir=None, colors=None,
rename=None, scale=None, batch_out_dir=None):
"""
Annotate the gene fusion
"""
fusion = agfusion.Fusion(
gene5prime=gene5prime,
gene5primejunction=junction5prime,
gene3prime=gene3prime,
gene3primejunction=junction3prime,
db=agfusion_db,
pyensembl_data=pyensembl_data,
protein_databases=args.protein_databases,
noncanonical=args.noncanonical
)
if batch_out_dir is not None:
outdir = join(
batch_out_dir,
fusion.gene5prime.gene.name + '-' +
str(junction5prime) + '_' +
fusion.gene3prime.gene.name + '-' +
str(junction3prime)
)
fusion.save_transcript_cdna(
out_dir=outdir,
middlestar=args.middlestar
)
fusion.save_transcript_cds(
out_dir=outdir,
middlestar=args.middlestar
)
fusion.save_proteins(
out_dir=outdir,
middlestar=args.middlestar
)
fusion.save_images(
out_dir=outdir,
file_type=args.type,
scale=scale,
colors=colors,
rename=rename,
fontsize=args.fontsize,
height=args.height,
width=args.width,
dpi=args.dpi,
no_domain_labels=args.no_domain_labels,
plot_WT=args.WT,
exclude=args.exclude_domain
)
fusion.save_tables(out_dir=outdir)
def batch_mode(args, agfusion_db, pyensembl_data, rename, colors):
"""
Batch mode for annotation fusions from output from a fusion-finding
algorithm
"""
if not exists(args.out):
mkdir(args.out)
else:
agfusion_db.logger.warn(
'Output directory {} already exists! Overwriting...'
.format(args.out)
)
if args.algorithm in agfusion.parsers:
for fusion in agfusion.parsers[args.algorithm](args.file,
agfusion_db.logger):
try:
annotate(
gene5prime=fusion['gene5prime'],
junction5prime=fusion['gene5prime_junction'],
gene3prime=fusion['gene3prime'],
junction3prime=fusion['gene3prime_junction'],
agfusion_db=agfusion_db,
pyensembl_data=pyensembl_data,
args=args,
colors=colors,
rename=rename,
scale=None,
batch_out_dir=args.out
)
except exceptions.GeneIDException as e:
agfusion_db.logger.error(e)
except exceptions.JunctionException as e:
agfusion_db.logger.error(e)
except exceptions.TooManyGenesException as e:
agfusion_db.logger.error(e)
else:
agfusion_db.logger.error(
('\'{}\' is not an available option for -a! Choose one of the ' +
'following: {}.').format(
args.algorithm,
','.join(agfusion.parsers.keys())
)
)
exit()
def builddb(args):
"""
Build a AGFusion database
"""
agfusion_db = agfusion.AGFusionDBBManager(
args.dir,
args.species,
args.release,
args.pfam,
args.server
)
agfusion_db.logger.info('Fetching alternative gene names...')
agfusion_db.fetch_gene_names()
agfusion_db.logger.info('Fetching transcript tables...')
agfusion_db.fetch_transcript_table()
agfusion_db.fetch_refseq_table()
agfusion_db.logger.info('Fetching protein annotation data...')
agfusion_db.fetch_protein_annotation()
def add_common_flags(parser):
"""
Add commaond line flags that are common to multiple sub parsers
"""
parser.add_argument(
'-db',
'--database',
type=str,
required=True,
help='Path to the AGFusion database (e.g. --db /path/to/agfusion.homo_sapiens.87.db)'
)
parser.add_argument(
'-o',
'--out',
type=str,
required=True,
help='Directory to save results'
)
parser.add_argument(
'-nc',
'--noncanonical',
action='store_true',
required=False,
default=False,
help='(Optional) Include non-canonical gene transcripts ' +
'in the analysis (default False).'
)
parser.add_argument(
'--protein_databases',
type=str,
required=False,
nargs='+',
default=['pfam', 'tmhmm'],
help='(Optional) Space-delimited list of one or more protein ' +
'feature databases to include when visualizing proteins. ' +
'Options are: pfam, smart, superfamily, tigrfam, pfscan (Prosite_profiles), ' +
'tmhmm (i.e. transmembrane), seg (low_complexity regions), ncoils ' +
'(coiled coil regions), prints, ' +
'pirsf, and signalp (signal peptide regions) ' +
'(default: --protein_databases pfam and tmhmm).'
)
parser.add_argument(
'--recolor',
type=str,
required=False,
default=None,
action='append',
help='(Optional) Re-color a domain. Provide the original name of ' +
'the domain then your color (semi-colon delimited, all in ' +
'quotes). Can specify --recolor multiples for each domain. ' +
'(e.g. --color \"Pkinase_Tyr;blue\" --color \"I-set;#006600\").')
parser.add_argument(
'--rename',
type=str,
required=False,
default=None,
action='append',
help='(Optional) Rename a domain. Provide the original name of ' +
'the domain then your new name (semi-colon delimited, ' +
'all in quotes). Can specify --rename multiples for each ' +
'domain. (e.g. --rename \"Pkinase_Tyr;Kinase\").')
parser.add_argument(
'--exclude_domain',
type=str,
required=False,
default=[],
nargs='+',
help='(Optional) Exclude a certain domain(s) from plotting ' +
'by providing a space-separated list of domain names.')
parser.add_argument(
'--type',
type=str,
required=False,
default='png',
help='(Optional) Image file type (png, jpeg, pdf). Default: png')
parser.add_argument(
'-w',
'--width',
type=int,
required=False,
default=10,
help='(Optional) Image width in inches (default 10).')
parser.add_argument(
'-ht',
'--height',
type=int,
required=False,
default=3,
help='(Optional) Image file height in inches (default 3).')
parser.add_argument(
'--dpi',
type=int,
required=False,
default=None,
help='(Optional) Dots per inch.')
parser.add_argument(
'--fontsize',
type=int,
required=False,
default=12,
help='(Optional) Fontsize (default 12).')
parser.add_argument(
'--WT',
action='store_true',
required=False,
help='(Optional) Include this to plot wild-type architechtures ' +
'of the 5\' and 3\' genes')
parser.add_argument(
'-ms',
'--middlestar',
action='store_true',
required=False,
help='(Optional) Insert a * at the junction position for the ' +
'cdna, cds, and protein sequences (default False).')
parser.add_argument(
'-ndl',
'--no_domain_labels',
action='store_true',
required=False,
help='(Optional) Do not label domains.')
parser.add_argument(
'--debug',
default=False,
action='store_true',
help='(Optional) Enable debugging logging.'
)
def main():
"""
Main function for processing command line options
"""
parser = argparse.ArgumentParser(
description='Annotate Gene Fusion (AGFusion)'
)
subparsers = parser.add_subparsers(
help='AGFusion programs.',
dest="subparser_name")
annotate_parser = subparsers.add_parser(
'annotate',
help='Annotate and visualize a single fusion.')
annotate_parser.add_argument(
'-g5',
'--gene5prime',
type=str,
required=True,
help='5\' gene partner'
)
annotate_parser.add_argument(
'-g3',
'--gene3prime',
type=str,
required=True,
help='3\' gene partner'
)
annotate_parser.add_argument(
'-j5',
'--junction5prime',
type=int,
required=True,
help='Genomic location of predicted fuins for the 5\' gene partner. ' +
'The 1-based position that is the last nucleotide included in ' +
'the fusion before the junction.'
)
annotate_parser.add_argument(
'-j3',
'--junction3prime',
type=int,
required=True,
help='Genomic location of predicted fuins for the 3\' gene partner. ' +
'The 1-based position that is the first nucleotide included in ' +
'the fusion after the junction.'
)
add_common_flags(annotate_parser)
annotate_parser.add_argument(
'--scale',
type=int,
required=False,
default=-1,
help='(Optional) Set maximum width (in amino acids) of the ' +
'figure to rescale the fusion (default: max length of ' +
'fusion product)')
# batch file parser
batch_parser = subparsers.add_parser(
'batch',
help='Annotate fusions from an output file from a fusion ' +
'finding algorithm.')
batch_parser.add_argument(
'-f',
'--file',
type=str,
required=True,
help='Output file from fusion-finding algorithm.'
)
batch_parser.add_argument(
'-a',
'--algorithm',
type=str,
required=True,
help='The fusion-finding algorithm. Can be one of the following: ' +
', '.join(agfusion.parsers.keys()) + '.'
)
add_common_flags(batch_parser)
# download database
database_parser = subparsers.add_parser(
'download',
help='Download database for a reference genome.')
database_parser.add_argument(
'-d',
'--dir',
type=str,
default='',
help='(Optional) Directory to the database will be downloaded ' +
'to (defaults to current working directory).')
database_parser.add_argument(
'-g',
'--genome',
type=str,
default=None,
help='Specify the genome shortcut (e.g. hg19). To see all' +
'available shortcuts run \'agfusion download -a\'. Either ' +
'specify this or --species and --release.')
database_parser.add_argument(
'-s',
'--species',
type=str,
default=None,
help='The species (e.g. homo_sapiens).')
database_parser.add_argument(
'-r',
'--release',
type=int,
default=None,
help='The ensembl release (e.g. 87).')
database_parser.add_argument(
'-a',
'--available',
action='store_true',
required=False,
help='List available species and ensembl releases.')
# build database parser
build_database_parser = subparsers.add_parser(
'build',
help='Build database for a reference genome.')
build_database_parser.add_argument(
'-d',
'--dir',
type=str,
required=True,
help='Directory to write database file to.'
)
build_database_parser.add_argument(
'-s',
'--species',
type=str,
required=True,
help='The species (e.g. homo_sapiens).'
)
build_database_parser.add_argument(
'-r',
'--release',
type=int,
required=True,
help='The ensembl release (e.g. 87).'
)
build_database_parser.add_argument(
'--pfam',
type=str,
required=True,
help='File containing PFAM ID mappings.'
)
build_database_parser.add_argument(
'--server',
type=str,
required=False,
default='ensembldb.ensembl.org',
help='(optional) Ensembl server (default ensembldb.ensembl.org)'
)
# agfusion version number
parser.add_argument(
'-v',
'--version',
action='version',
version=agfusion.__version__
)
args = parser.parse_args()
if args.subparser_name == 'build':
builddb(args)
exit()
elif args.subparser_name == 'download':
if args.available:
list_available_databases()
else:
downloaddb(args)
exit()
# single or batch mode
if not exists(args.out):
mkdir(args.out)
# if user does not specify a sqlite database then use the one provided
# by the package
db_file = split(args.database)[1]
species = db_file.split('.')[1]
release = db_file.split('.')[2]
assert species in AVAILABLE_ENSEMBL_SPECIES, 'unsupported species!'
agfusion_db = agfusion.AGFusionDB(args.database, debug=args.debug)
agfusion_db.build = species + '_' + str(release)
# get the pyensembl data
pyensembl_data = pyensembl.EnsemblRelease(release, species)
try:
pyensembl_data.db
except ValueError:
agfusion_db.logger.error(
"Missing pyensembl data. Run pyensembl install --release " +
"{} --species {}".format(release, species)
)
exit()
# parse the re-coloring and re-naming
colors = {}
rename = {}
if args.rename is not None:
for i in args.rename:
pair = i.split(';')
assert len(pair) == 2, " did not properly specify --rename"
if pair[0] in rename:
agfusion_db.logger.warn(
"WARNING - you rename {} twice."
.format(pair[0])
)
rename[pair[0]] = pair[1]
if args.recolor is not None:
for i in args.recolor:
pair = i.split(';')
assert len(pair) == 2, " did not properly specify --colors"
if pair[0] in colors:
agfusion_db.logger.warn(
"You specified colors for {} twice."
.format(pair[0])
)
if pair[0] in rename:
colors[rename[pair[0]]] = pair[1]
else:
colors[pair[0]] = pair[1]
# check image file type is valid
if args.type not in ['png', 'pdf', 'jpeg']:
agfusion_db.logger.error(
"ERROR - provided an incorrect image file type: {}."
.format(args.type)
)
exit()
if args.subparser_name == 'annotate':
annotate(
gene5prime=args.gene5prime,
junction5prime=args.junction5prime,
gene3prime=args.gene3prime,
junction3prime=args.junction3prime,
agfusion_db=agfusion_db,
pyensembl_data=pyensembl_data,
args=args,
outdir=args.out,
colors=colors,
rename=rename,
scale=args.scale
)
elif args.subparser_name == 'batch':
batch_mode(args, agfusion_db, pyensembl_data, rename, colors)
```
#### File: AGFusion/agfusion/exceptions.py
```python
class DataBaseError(Exception):
def __init__(self,e):
Exception.__init__(self,e)
self.e = e
class GeneIDException(Exception):
def __init__(self,gene):
Exception.__init__(
self,
"No Ensembl ID found for {}! Check its spelling and if you are " \
"using the right genome build.".format(gene)
)
class TooManyGenesException(Exception):
def __init__(self,gene,ids,build):
Exception.__init__(
self,
"Multiple Ensembl IDs found matching {}: {} for genome {}." \
" Specify which Ensembl ID.".format(gene,', '.join(ids),build)
)
class JunctionException(Exception):
def __init__(self,gene,junction):
Exception.__init__(
self,
"Junction {} not within {} gene boundaries!"
.format(junction,gene)
)
``` |
{
"source": "johnehunt/computationalthinking",
"score": 4
} |
#### File: computationalthinking/week5/draw-mulitple-squares.py
```python
import turtle
def setup():
""" Provide the config for the screen """
turtle.title('Multiple Squares Animation')
turtle.setup(100, 100, 0, 0)
turtle.hideturtle()
def draw_square(size):
""" Draw a square in the current direction """
turtle.forward(size)
turtle.right(90)
turtle.forward(size)
turtle.right(90)
turtle.forward(size)
turtle.right(90)
turtle.forward(size)
setup()
for _ in range(0, 12):
draw_square(50)
# Rotate the starting direction
turtle.right(120)
# Add this so that the window will close when clicked on
turtle.exitonclick()
```
#### File: computationalthinking/week5/exceptions.py
```python
def my_function(x, y):
"""
A simple function to divide x by y
"""
print('my_function in')
solution = x / y
print('my_function out')
return solution
print('Starting')
print(my_function(6, 0))
try:
print('Before my_function')
result = my_function(6, 0)
print(result)
print('After my_function')
except:
print('oops')
print('-' * 20)
try:
print('Before my_function')
result = my_function(6, 0)
print(result)
print('After my_function')
except ZeroDivisionError:
print('oops')
print('-' * 20)
try:
print('Before my_function')
result = my_function(6, 0)
print(result)
print('After my_function')
except ZeroDivisionError as exp:
print(exp)
print('oops')
print('Done')
print('-' * 20)
try:
print('Before my_function')
result = my_function(6, 2)
print(result)
print('After my_function')
except ZeroDivisionError as exp:
print(exp)
print('oops')
else:
print('All OK')
print('-' * 20)
try:
print('At start')
result = my_function(6, 2)
print(result)
except ZeroDivisionError as e:
print(e)
else:
print('Everything worked OK')
finally:
print('Always runs')
print('-' * 20)
try:
result = my_function(6, 0)
print(result)
except Exception as e:
print(e)
print('-' * 20)
try:
print('Before my_function')
result = my_function(6, 0)
print(result)
print('After my_function')
except ZeroDivisionError as exp:
print(exp)
print('oops')
except ValueError as exp:
print(exp)
print('oh dear')
except:
print('That is it')
print('-' * 20)
try:
print('Before my_function')
result = my_function(6, 0)
print(result)
print('After my_function')
finally:
print('Always printed')
number = 0
input_accepted = False
while not input_accepted:
user_input = input('Please enter a number')
if user_input.isnumeric():
number = int(user_input)
input_accepted = True
else:
try:
number = float(user_input)
input_accepted = True
except ValueError:
print('Needs to be a number')
print(number)
``` |
{
"source": "johnehunt/Python3Intro",
"score": 4
} |
#### File: Python3Intro/09-operator-overloading/Quantity.py
```python
class Quantity:
def __init__(self, value=0):
self.value = value
# Define a set of methods to provide operator functionality
def __add__(self, other):
new_value = self.value + other.value
return Quantity(new_value)
def __sub__(self, other):
new_value = self.value - other.value
return Quantity(new_value)
def __mul__(self, other):
if isinstance(other, int):
new_value = self.value * other
else:
new_value = self.value * other.value
return Quantity(new_value)
def __pow__(self, other):
new_value = self.value ** other.value
return Quantity(new_value)
def __truediv__(self, other):
if isinstance(other, int):
new_value = self.value / other
else:
new_value = self.value / other.value
return Quantity(new_value)
def __floordiv__(self, other):
new_value = self.value // other.value
return Quantity(new_value)
def __mod__(self, other):
new_value = self.value % other.value
return Quantity(new_value)
def __eq__(self, other):
return self.value == other.value
def __ne__(self, other):
return self.value != other.value
def __ge__(self, other):
return self.value >= other.value
def __gt__(self, other):
return self.value > other.value
def __lt__(self, other):
return self.value < other.value
def __le__(self, other):
return self.value <= other.value
def __str__(self):
return 'Quantity[' + str(self.value) + ']'
print('Starting')
q1 = Quantity(5)
q2 = Quantity(10)
print('q1 =', q1, ', q2 =', q2)
q3 = q1 + q2
print('q3 =', q3)
print('q2 - q1 =', q2 - q1)
print('q1 * q2 =', q1 * q2)
print('q1 / q2 =', q1 / q2)
print('q1 < q2: ', q1 < q2)
print('q3 > q2: ', q3 > q2)
print('q3 == q1: ', q3 == q1)
print('q1 * 2', q1 * 2)
print('q2 / 2', q2 / 2)
print('Done')
```
#### File: Python3Intro/12-protocols/quantity.py
```python
class Quantity:
def __init__(self, value=0):
self.value = value
def __add__(self, other):
new_value = self.value + other.value
return Quantity(new_value)
def __sub__(self, other):
new_value = self.value - other.value
return Quantity(new_value)
def __mul__(self, other):
new_value = self.value * other.value
return Quantity(new_value)
def __pow__(self, other):
new_value = self.value ** other.value
return Quantity(new_value)
def __truediv__(self, other):
new_value = self.value / other.value
return Quantity(new_value)
def __floordiv__(self, other):
new_value = self.value // other.value
return Quantity(new_value)
def __mod__(self, other):
new_value = self.value % other.value
return Quantity(new_value)
def __eq__(self, other):
return self.value == other.value
def __ne__(self, other):
return self.value != other.value
def __ge__(self, other):
return self.value >= other.value
def __gt__(self, other):
return self.value > other.value
def __lt__(self, other):
return self.value < other.value
def __le__(self, other):
return self.value <= other.value
def __str__(self):
return 'Quantity[' + str(self.value) + ']'
def main():
q1 = Quantity(5)
q2 = Quantity(10)
print('q1 =', q1, ', q2 =', q2)
q3 = q1 + q2
print('q3 =', q3)
print('q1 * q2 =', q1 * q2)
print('q1 / q2 =', q1 / q2)
print('q1 < q2: ', q1 < q2)
print('q3 > q2: ', q3 > q2)
print('q3 == q1: ', q3 == q1)
if __name__ == '__main__':
main()
```
#### File: Python3Intro/16-testing/test_calculator.py
```python
import pytest
from calculator import Calculator
@pytest.fixture(scope='session', autouse=True)
def session_scope_fixture():
print('session_scope_fixture')
@pytest.fixture(scope='module', autouse=True)
def module_scope_fixture():
print('module_scope_fixture')
@pytest.fixture(scope='class', autouse=True)
def class_scope_fixture():
print('class_scope_fixture')
@pytest.fixture
def calculator():
""" Returns a Calculator instance """
print('calculator fixture')
return Calculator()
def test_initial_value(calculator):
assert calculator.total == 0
def test_add_one(calculator):
calculator.set(1)
calculator.add()
assert calculator.total == 1
def test_subtract_one(calculator):
calculator.set(1)
calculator.sub()
assert calculator.total == -1
def test_add_one_and_one(calculator):
calculator.set(1)
calculator.add()
calculator.set(1)
calculator.add()
assert calculator.total == 2
@pytest.mark.parametrize('input1,input2,expected', [
(3, 1, 4),
(3, 2, 5),
])
def test_calculator_add_operation(calculator, input1, input2, expected):
calculator.set(input1)
calculator.add()
calculator.set(input2)
calculator.add()
assert calculator.total == expected
@pytest.mark.skip(reason='not implemented yet')
def test_calculator_multiply(calculator):
calculator.multiply(2, 3)
assert calculator.total == 6
``` |
{
"source": "johnehunt/Python3introLabs",
"score": 4
} |
#### File: Python3introLabs/12-testing/number_guess_game.py
```python
import random
import traceback
from constants import MIN_VALUE, MAX_VALUE, MAX_NUMBER_OF_GUESSES
from players import Player, ComputerPlayer
from utils import get_user_yes_or_no
class NumberGuessGameException(Exception):
""" Class representing errors in the number guess game"""
def __init__(self, msg):
super().__init__(msg)
def welcome_message():
print('Welcome to the number guess game')
def display_instructions():
response = get_user_yes_or_no('Do you want to see the instructions?: ')
if response == 'y':
print("You have to guess a number between", MIN_VALUE, "and", MAX_VALUE)
print("You can play as many times as you like")
def game_over_message():
print('Game Over')
def get_player():
player = None
computer_plays = get_user_yes_or_no("Do you want the computer to play? ")
if computer_plays == 'y':
player = ComputerPlayer(MAX_VALUE)
else:
name = input('Please enter your name: ')
print('', name, '')
if name == '':
raise NumberGuessGameException('Invalid Name')
player = Player(name)
return player
def play_game():
""" Defines main loop controlling game"""
player = get_player()
keep_playing = True
while keep_playing:
# Initialise the players history of guesses
player.reset_history()
# Initialise the number to be guessed
number_to_guess = random.randint(MIN_VALUE, MAX_VALUE)
# Initialise the number of tries the player has made
player.reset_guess_count()
# Obtain their initial guess
guess = player.make_a_guess()
while number_to_guess != guess:
print('Sorry wrong number')
# Check to see they have not exceeded the maximum
# number of attempts if so break out of loop otherwise
# give the user come feedback
if player.guess_count == MAX_NUMBER_OF_GUESSES:
break
elif guess < number_to_guess:
print(player.name, 'your guess was lower than the number')
else:
print(player.name, 'your guess was higher than the number')
# Obtain their next guess and increment number of attempts
guess = player.make_a_guess()
# Check to see if they did guess the correct number
if number_to_guess == guess:
print('Well done', player.name, 'won!')
print('You took', player.guess_count, 'goes to complete the game')
else:
print('Sorry -', player.name, 'you loose')
print('The number you needed to guess was',
number_to_guess)
print('The length of the player history is', len(player))
print(player.name, 'your guesses were:')
player.print_history()
play_again = get_user_yes_or_no('Do you want ' + player.name + ' to play? (y/n) ')
if play_again == 'n':
keep_playing = False
# Start the program assume this is the main module
if __name__ == "__main__":
try:
welcome_message()
display_instructions()
play_game()
except NumberGuessGameException as exp:
print('A problem was encountered within the program')
print(exp)
# printing stack trace
traceback.print_exc()
game_over_message()
```
#### File: Python3introLabs/XX-operators/distances2.py
```python
class DistanceException(Exception):
def __init__(self, message, value):
super().__init__(message)
self.message = message
self.value = value
def __str__(self):
return f'DistanceException - {self.message} caused by {self.value}'
class Distance:
def __init__(self, value=0):
self.value = value
def __add__(self, other):
if isinstance(other, int):
new_value = self.value + other
elif isinstance(other, Distance):
new_value = self.value + other.value
else:
raise DistanceException('Incorrect Type', other)
return Distance(new_value)
def __sub__(self, other):
if isinstance(other, int):
new_value = self.value + other
elif isinstance(other, Distance):
new_value = self.value - other.value
else:
raise DistanceException('Incorrect Type', other)
return Distance(new_value)
def __truediv__(self, amount):
if isinstance(amount, int):
new_value = self.value / amount
else:
raise DistanceException('Incorrect Type', amount)
return Distance(new_value)
def __floordiv__(self, amount):
if isinstance(amount, int):
new_value = self.value // amount
else:
raise DistanceException('Incorrect Type', amount)
return Distance(new_value)
def __mul__(self, amount):
if isinstance(amount, int):
new_value = self.value * amount
else:
raise DistanceException('Incorrect Type', amount)
return Distance(new_value)
def __str__(self):
return f'Distance[{self.value}]'
try:
d1 = Distance(6)
d2 = Distance(3)
print(d1 + d2)
print(d1 - d2)
print(d1 / 2)
print(d2 // 2)
print(d2 * 2)
print(d1 + 'John')
except DistanceException as exp:
print('-' * 25)
print(exp)
print(exp.message)
print(exp.value)
``` |
{
"source": "johnehunt/PythonCleanCode",
"score": 3
} |
#### File: PythonCleanCode/03-class-design/Game.py
```python
class Game:
""" Represents main Game class"""
def __init__(self):
self.state = 'Ready'
self.score = 0
self.objects_found = []
self.keys = {}
self.messages = []
self.player = None
def play(self):
self.new_attribute = 1
```
#### File: 07-interfaces/ABCs/PrinterMixin.py
```python
from abc import ABC
class PrinterMixin(ABC):
def print_me(self):
print(self)
class IDPrinterMixin(ABC):
def print_id(self):
print(self.id)
class Person(object):
def __init__(self, name):
self.name = name
class Employee(Person, PrinterMixin, IDPrinterMixin):
def __init__(self, name, age, id):
super().__init__(name)
self.age = age
self.id = id
def __str__(self):
return 'Employee(' + self.id + ')' + self.name + '[' + str(self.age) + ']'
def main():
e = Employee('Megan', 21, 'MS123')
print(e)
e.print_me()
e.print_id()
if __name__ == '__main__':
main()
```
#### File: PythonCleanCode/08-functions/args_and_kwargs.py
```python
def printall(*args, **kwargs):
# Handle the args values
print('args:')
for arg in args:
print(arg)
print('-' * 20)
# Handle the key value pairs in kwargs
print('kwargs:')
for arg in kwargs.values():
print(arg)
printall(1, 2, 3, a="John", b="Hunt")
```
#### File: PythonCleanCode/08-functions/fail_fast.py
```python
def apply(operation, value1, value2):
if operation == "+":
if value1 is not None:
if value2 is not None:
return value1 + value2
elif operation == '-':
if value1 is not None:
if value2 is not None:
return value1 - value2
raise TypeError("Unknown operation: " + operation)
def apply_ff(operation, value1, value2):
if value1 is None or value2 is None:
raise ValueError('Values must be Non None')
if operation != '+' and operation != '-':
raise TypeError("Unknown operation: " + operation)
if operation == "+":
return value1 + value2
elif operation == '-':
return value1 - value2
try:
print(apply('+', 3, 4))
print(apply('*', 3, 4))
except TypeError as error:
print(error)
try:
print(apply_ff('+', 3, 4))
print(apply_ff('*', 3, 4))
except TypeError as error:
print(error)
def get_property(name):
if name == 'title':
return 'App'
return None
def max_connections():
conn_property = get_property('max-connections')
if conn_property is None:
return 10
else:
return int(property)
def max_connections_ff():
conn_property = get_property('max-connections')
if conn_property is None:
raise ValueError("Missing property max connections")
else:
return int(property)
print('max_connections():', max_connections())
try:
print('max_connections_ff():', max_connections_ff())
except ValueError as error:
print(error)
```
#### File: PythonCleanCode/08-functions/make_complex.py
```python
def make_complex1(*args):
x, y = args
return dict(**locals())
def make_complex2(x, y):
return {'x': x, 'y': y}
print(make_complex1(5, 6))
print(make_complex2(5, 6))
```
#### File: 09-functional-programming/higherorderfunctions/higher-order1.py
```python
def apply(x, function):
result = function(x)
return result
def mult(y):
return y * 10.0
print(apply(5, mult))
```
#### File: 09-functional-programming/higherorderfunctions/Person.py
```python
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def __str__(self):
return 'Person(' + self.name + ', ' + str(self.age) + ')'
```
#### File: PythonCleanCode/A-profiling/memory_usage3.py
```python
from memprof import *
class Person:
def __init__(self, name, hours, hourly_rate):
self.name = name
self.hours = hours
self.hourly_rate = hourly_rate
def __str__(self):
return 'Person' + self.name + ' worked ' + self.hours + ' @ ' + self.hourly_rate
def calculate_pay(person):
x = person.hours * person.hourly_rate
return x
@memprof
def get_pay():
employees = [Person('Phoebe', i, 6.50) for i in list(range(0, 1000))]
numbers = [n * n * n for n in list(range(0, 100000))]
total = sum([calculate_pay(p) for p in employees])
return numbers, total
if __name__ == '__main__':
print(get_pay())
```
#### File: PythonCleanCode/C-logging/logging_example10.py
```python
import logging
# Define a filter subclass
class MyFilter(logging.Filter):
def filter(self, record):
if 'John' in record.msg:
return False
else:
return True
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
# Set up the filter on the logger
logger = logging.getLogger(__name__)
logger.addFilter(MyFilter())
# Application code with logging
logger.debug('This is to help with debugging')
logger.info('This is information on John')
``` |
{
"source": "johnehunt/python-covid-data-analysis",
"score": 3
} |
#### File: python-covid-data-analysis/scikitlearn_data_prediction/evaluate_classifiers.py
```python
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
DATA_SET = 'merged_covid_data.csv'
TARGET_VARIABLE = 'retail_and_recreation_change'
FEATURES_SET = {'hospitalCases', 'newAdmissions', 'newCasesByPublishDate'}
def run_model(label,
model,
training_feature_set,
training_target_attribute,
test_feature_set,
test_target_attribute):
model.fit(training_feature_set, training_target_attribute)
# Determine the metrics - against the training data
pred_train_rf = model.predict(training_feature_set)
trainingRMSE = np.sqrt(mean_squared_error(training_target_attribute, pred_train_rf))
trainingRSquared = r2_score(training_target_attribute, pred_train_rf)
trainingRSquared *= 100
# Determine the metrics based on the test dataset
pred_test = model.predict(test_feature_set)
testingRMSE = np.sqrt(mean_squared_error(test_target_attribute, pred_test))
testingRSquared = r2_score(test_target_attribute, pred_test)
testingRSquared *= 100
print(f'Testing {label} against Training data')
print(f'Training RMSE - {trainingRMSE:.1f}')
print(f'Training R-squared - {trainingRSquared:.1f}%')
print(f'Testing {label} against test data')
print(f'Testing RMSE - {testingRMSE:.1f}')
print(f'Testing R-squared - {testingRSquared:.1f}%')
def print_menu(title, options, prompt):
while True:
print(title)
length_of_options = len(options)
for row in range(length_of_options):
print(f'\t{row + 1} {options[row]}')
user_option = input(prompt)
if not user_option.isnumeric():
print('Input must be a number')
else:
user_option = int(user_option)
if user_option < 0 or user_option > length_of_options:
print(f'Selection must be in the range 1 to {length_of_options}')
else:
break
return user_option
def save_classifier(regressor):
import pickle
input_yes_no = input(f'Do you want to save the classifier for later use? ')
if input_yes_no.lower() == 'y':
filename = input("Please input the name for the classifier file: ")
if not filename.endswith('.pkl'):
filename += '.pkl'
# Save the classifier:
file = open(filename, "bw")
pickle.dump(regressor, file)
print(f'Loading - {DATA_SET}')
# Load the merged data set
df = pd.read_csv(DATA_SET)
print('-' * 25)
print('Partition the Data - into train and test')
print('training data 80%, test data 20%')
train, test = train_test_split(df, test_size=0.2)
print(f'Size of training data {len(train)}')
print(f'Size of testing data {len(test)}')
training_features = train[FEATURES_SET].values
training_target = train[TARGET_VARIABLE].values
test_features = test[FEATURES_SET].values
test_target = test[TARGET_VARIABLE].values
print('-' * 25)
model = None
option = print_menu('Classifier Selection Menu',
['KNN',
'Decision Tree',
'Random Forest'],
'Please select an option: ')
if option == 1:
print('Build a KNN Classifier')
print('=' * 20)
# Use n nearest neighbors to predict the value of a future data point
print(f'KNeighborsRegressor(n_neighbors=3)')
model = KNeighborsRegressor(n_neighbors=3)
run_model('KNN', model, training_features, training_target, test_features, test_target)
elif option == 2:
print('Build a Decision Tree')
print('=' * 20)
MAX_DEPTH = 4
print(f'DecisionTreeRegressor(max_depth={MAX_DEPTH}, min_samples_leaf=0.13, random_state=3)')
model = DecisionTreeRegressor(max_depth=MAX_DEPTH, min_samples_leaf=0.13, random_state=3)
run_model('Decision Tree', model, training_features, training_target, test_features,
test_target)
elif option == 3:
print('Build a Random Forest')
print('=' * 20)
# You could explore additional settings for Random Forest
SIZE_OF_FOREST = 500
print(f'RandomForestRegressor(max_depth=4, n_estimators={SIZE_OF_FOREST})')
model = RandomForestRegressor(max_depth=4, n_estimators=SIZE_OF_FOREST)
run_model('Random Forest', model, training_features, training_target, test_features,
test_target)
save_classifier(model)
print('Done')
``` |
{
"source": "johnehunt/python-datastructures",
"score": 4
} |
#### File: python-datastructures/abstractdatatypes/deque.py
```python
class Deque:
"""A Deque ADT in Python.
Also known as a double-ended queue,
is an ordered collection of items similar to the queue.
It has two ends, a front and a rear, and the items
remain positioned in the collection.
"""
def __init__(self):
self.data = []
def is_empty(self):
return self.data == []
def add_front(self, item):
self.data.append(item)
def add_rear(self, item):
self.data.insert(0,item)
def remove_front(self):
return self.data.pop()
def remove_back(self):
return self.data.pop(0)
def size(self):
return len(self.data)
def clear(self):
self.data = []
def __str__(self):
return 'Deque' + str(self.data)
# Implement the length protocol
def __len__(self):
return self.size()
# Implement the iterable protocol
def __iter__(self):
temp = self.data.copy()
temp.reverse()
return iter(temp)
```
#### File: python-datastructures/searching/bubble_sort.py
```python
print('-' * 36)
print('Example of the Bubble Sort Algorithm')
print('-' * 36)
def bubble_sort(data_list):
pass_number = 0
for i in range(len(data_list) - 1, 0, -1):
pass_number += 1
print(f'Pass {pass_number} through the data')
for j in range(0, i):
if data_list[j] > data_list[j + 1]:
temp = data_list[j]
data_list[j] = data_list[j + 1]
data_list[j + 1] = temp
print(f'\tData at end of pass {pass_number}.{j} - {data_list}')
data = [93, 77, 54, 20, 17]
print('Initial data list -', data)
bubble_sort(data)
print('Final data list -', data)
# Note the effect of each pass through the data
# Where n = 5 (length of list of data)
# Pass 1 => n - 1 comparisons
# Pass 2 => n - 2 comparisons
# Pass 3 => n - 3 comparisons
# Pass 4 => n - 4 comparisons
```
#### File: python-datastructures/searching/insertion_sort.py
```python
print('-' * 40)
print('Example of the Insertion Sort Algorithm')
print('-' * 40)
def insertion_sort(data_list):
# Loop over the whole data list
for index in range(1, len(data_list)):
# Current starting position in unsorted list
current_value = data_list[index]
print(f'Current value {current_value} at index {index}')
position = index
# Find where in the sorted sublist the value should be placed
while position > 0 and data_list[position - 1] > current_value:
print(f'\tmoving {data_list[position -1]} at {position -1} to {position}')
data_list[position] = data_list[position - 1]
position = position - 1
print(f'\tsetting {position} to {current_value}')
data_list[position] = current_value
data = [93, 15, 77, 54, 20]
print('Initial data list -', data)
insertion_sort(data)
print('Final data list -', data)
```
#### File: python-datastructures/trees/list_based_tree.py
```python
test_tree = ['a', # root
['b', # left subtree
['d', [], []],
['e', [], []]],
['c', # right subtree
['f', [], []],
[]]
]
print('tree', test_tree)
print('left subtree = ', test_tree[1])
print('root = ', test_tree[0])
print('right subtree = ', test_tree[2])
# Functions to make it easier to work with trees
def create_tree(r):
return [r, [], []]
def insert_left(root, new_branch):
t = root.pop(1)
if len(t) > 1:
root.insert(1, [new_branch, t, []])
else:
root.insert(1, [new_branch, [], []])
return root
def insert_right(root, new_branch):
t = root.pop(2)
if len(t) > 1:
root.insert(2, [new_branch, [], t])
else:
root.insert(2, [new_branch, [], []])
return root
def get_root_value(root):
return root[0]
def set_root_value(root, new_value):
root[0] = new_value
def get_left_child(root):
return root[1]
def get_right_child(root):
return root[2]
# Program to exercise functions defined above
list_tree = create_tree(3)
insert_left(list_tree, 4)
insert_left(list_tree, 5)
insert_right(list_tree, 6)
insert_right(list_tree, 7)
print(list_tree)
l = get_left_child(list_tree)
print(l)
set_root_value(l, 9)
print(list_tree)
insert_left(l, 11)
print(list_tree)
print(get_right_child(get_right_child(list_tree)))
```
#### File: python-datastructures/trees/node_based_tree.py
```python
class BinaryTreeNode:
def __init__(self, root_value):
self.key = root_value
self.left_child = None
self.right_child = None
def insert_left(self, new_node):
if self.left_child is None:
self.left_child = BinaryTreeNode(new_node)
else:
t = BinaryTreeNode(new_node)
t.left_child = self.left_child
self.left_child = t
def insert_right(self, new_node):
if self.right_child is None:
self.right_child = BinaryTreeNode(new_node)
else:
t = BinaryTreeNode(new_node)
t.right_child = self.right_child
self.right_child = t
def get_right_child(self):
return self.right_child
def get_left_child(self):
return self.left_child
def set_root_value(self, obj):
self.key = obj
def get_root_value(self):
return self.key
def __str__(self):
return 'BinaryTreeNode(' + str(self.key) + \
', left child empty: ' + str(self.left_child is None) + \
', right child empty: ' + str(self.right_child is None) + \
')'
# Simple program illustrating use of BinaryTreeNode class
tree = BinaryTreeNode('a')
print(tree.get_root_value())
print(tree)
print(tree.get_left_child())
tree.insert_left('b')
print(tree.get_left_child())
print(tree.get_left_child().get_root_value())
tree.insert_right('c')
print(tree.get_right_child())
print(tree.get_right_child().get_root_value())
tree.get_right_child().set_root_value('hello')
print(tree.get_right_child().get_root_value())
print(tree)
``` |
{
"source": "johnehunt/PythonIntroDS",
"score": 4
} |
#### File: PythonIntroDS/06-functions/function4.py
```python
def greeter(*names):
for name in names:
print('Welcome', name)
greeter('John', 'Denise', 'Phoebe', 'Adam', 'Gryff', 'Jasmine')
```
#### File: PythonIntroDS/07-further-functions/filter-and-map.py
```python
data = [1, 3, 5, 2, 7, 4, 10]
print('data:', data)
def is_even(i):
return i % 2 == 0
# Filter for even numbers and use map to add 10
new_data = list(map(lambda i: i + 10, filter(is_even, data)))
print('new_data:', new_data)
```
#### File: PythonIntroDS/07-further-functions/reduce-examples.py
```python
from functools import reduce
data = [1, 3, 5, 2, 7, 4, 10]
# Using a lambda
result = reduce(lambda total, value: total + value, data)
print(f'reduce using lambda: {result}')
# Using a named function
def generate_total(running_total, value):
return running_total + value
result = reduce(generate_total, data)
print(f'reduce using named function: {result}')
result = reduce(generate_total, data, 10)
print(f'reduce using named function (start 10): {result}')
# Simpler version is to use sum
result = sum(data)
print(f'sum of data: {result}')
result = sum(data, 10)
print(f'sum of data (start 10): {result}')
```
#### File: PythonIntroDS/08-classes/class-with-comment.py
```python
class Person:
""" An example class to hold a persons name and age"""
def __init__(self, name, age):
self.name = name
self.age = age
def __str__(self):
return self.name + ' is ' + str(self.age)
p1 = Person('John', 36)
p2 = Person('Phoebe', 21)
print(p1.__doc__)
```
#### File: PythonIntroDS/08-classes/str_person.py
```python
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def __str__(self):
return self.name + ' is ' + str(self.age)
p1 = Person('John', 36)
p2 = Person('Phoebe', 21)
print(p1)
print(p2)
print('-' * 25)
px = p1
print(p1)
print(px)
print('id(p1):', id(p1))
print('id(px):', id(px))
```
#### File: PythonIntroDS/11-modules/main3.py
```python
from utils import *
def main():
printer(default_shape)
shape = Shape("circle")
printer(shape)
if __name__ == "__main__":
main()
```
#### File: PythonIntroDS/12-errors/exceptions.py
```python
def run_calculation(x):
x / 0
def print_value(i, data_list):
print(data_list[i])
def print_alt_value(i, data_list):
if i > len(data_list):
raise ValueError('Invalid length ' + str(i))
print(data_list[i])
def my_function(x, y):
print('my_function in')
result = x / y
print('my_function out')
return result
def f2():
print('f2 in')
function_bang()
print('f2 out')
def function_bang():
print('function_bang in')
raise ValueError('Bang!')
print('function_bang out')
class InvalidAgeException(Exception):
""" Valid Ages must be between 0 and 120 """
def __init__(self, value):
self.value = value
def __str__(self):
return 'InvalidAgeException(' + str(self.value) + ')'
class DivideByYWhenZeroException(Exception):
""" Sample Exception class"""
def divide(x, y):
try:
result = x / y
print(result)
except Exception as e:
raise DivideByYWhenZeroException from e
class Person:
""" An example class to hold a persons name and age"""
def __init__(self, name, age):
self.name = name
self._age = age
def __str__(self):
return self.name + ' is ' + str(self._age)
def set_age(self, value):
print('In set_age method(', value, ')')
if isinstance(value, int) & (value > 0 & value < 120):
self._age = value
else:
raise InvalidAgeException(value)
def main():
# divide(6, 0)
print('Starting')
try:
print('Before my_function')
my_function(6, 2)
print('After my_function')
except ZeroDivisionError as exp:
print('oops')
else:
print('All OK')
print('Done')
try:
my_function(6, 0)
except ZeroDivisionError as e:
print(e)
else:
print('Everything worked OK')
finally:
print('Always runs')
values = [1, 2, 3, 4]
try:
print_alt_value(7, values)
except Exception as e:
print(e)
values = [1, 2, 3, 4]
try:
print_value(2, values)
print_value(3, values)
except IndexError as e:
print('Exception: ', e)
else:
print('All OK')
finally:
print('Always runs')
try:
print(divide(3, 0))
except Exception as e:
print(e)
try:
p1 = Person("Jasmine", 22)
p1.set_age(-1)
except InvalidAgeException as exp:
print(f'Exception: {exp}')
try:
function_bang()
except ValueError as ve:
print(ve)
raise
if __name__ == "__main__":
main()
```
#### File: PythonIntroDS/13-testing/TestCalculatorClass.py
```python
import pytest
from calculator import Calculator
@pytest.fixture
def calculator():
""" Returns a Calculator instance """
print('calculator fixture')
return Calculator()
class TestCalculatorClass:
def setup_class(cls):
print('\nstartup once per class')
def teardown_class(cls):
print('\nteardown once per class')
def setup_method(self):
print('\nstartup per test')
def teardown_method(self):
print('\nteardown per test')
# Tests follow from here
def test_initial_value(self, calculator):
assert calculator.total == 0, "initial value not set"
def test_add_one(self, calculator):
calculator.set(1)
calculator.add()
assert calculator.total == 1
def test_add_2_and_3(self, calculator):
calculator.set(3)
calculator.add()
calculator.set(2)
calculator.add()
assert calculator.total == 5, "add 2+3 does not equal 5"
```
#### File: PythonIntroDS/13-testing/test_calculator_func.py
```python
from calculator import add
def setup_module():
print('module set up')
def teardown_module():
print('module teardown')
def setup_function():
print('function set up')
def teardown_function():
print('function teardown')
def test_add_one_and_one():
result = add(1, 1)
assert result == 2
```
#### File: PythonIntroDS/x-generators/even_number_generator_example.py
```python
def evens_up_to(limit):
value = 0
while value <= limit:
yield value
value += 2
for i in evens_up_to(6):
print(i, end=', ')
print('\n', '-' * 20)
for i in evens_up_to(4):
print('i:', i)
print('\t', end='')
for j in evens_up_to(6):
print('j:', j, end=', ')
print('')
# Don't have to use in a loop can explicitly
# request the next value
evens = evens_up_to(4)
print(next(evens), end=', ')
print(next(evens), end=', ')
print(next(evens))
``` |
{
"source": "johnehunt/PythonIntroLabsDS",
"score": 4
} |
#### File: PythonIntroLabsDS/08-operators/main.py
```python
CELSIUS = "Celsius"
FAHRENHEIT = "Fahrenheit"
class TemperatureReading:
""" Class representing temperature info.
It includes comparison operators
which allow temperatures to be compared as well as added and subtracted.
"""
def __init__(self, temp, date, location, scale):
self.temp = temp
self.date = date
self.location = location
self.scale = scale
def convert(self):
""" convert the temperature to a different scale """
print(self.scale)
print(CELSIUS)
print(self.scale == CELSIUS)
if self.scale == CELSIUS:
return TemperatureReading(celsius_to_fahrenheit(self.temp),
self.date,
self.location,
FAHRENHEIT)
else:
return TemperatureReading(fahrenheit_to_celsius(self.temp),
self.date,
self.location,
CELSIUS)
def __add__(self, other):
if isinstance(other, int) or isinstance(other, float):
new_value = self.temp + other
else:
new_value = self.temp + other.temp
return TemperatureReading(new_value, self.date, self.location, self.scale)
def __sub__(self, other):
if isinstance(other, int) or isinstance(other, float):
new_value = self.temp - other
else:
new_value = self.temp - other.temp
return TemperatureReading(new_value, self.date, self.location, self.scale)
def __eq__(self, other):
return self.temp == other.temp
def __ne__(self, other):
return self.temp != other.temp
def __ge__(self, other):
return self.temp >= other.temp
def __gt__(self, other):
return self.temp > other.temp
def __lt__(self, other):
return self.temp < other.temp
def __le__(self, other):
return self.temp <= other.temp
def __str__(self):
return 'TemperatureReading[' + self.scale + '](' + str(self.temp) + ' on ' + str(self.date) + ' at ' + str(
self.location) + ')'
def average(data):
if isinstance(data[0], int):
return sum(data) / len(data)
else:
raw_data = list(map(lambda r: r.temp, data))
return sum(raw_data) / len(raw_data)
def minimum(data, index=0):
result = None
if index == 0:
data_slice = data
else:
data_slice = data[index:]
for item in data_slice:
if result is None:
result = item
elif result.temp > item.temp:
result = item
return result
def maximum(data, index=0):
result = None
if index == 0:
data_slice = data
else:
data_slice = data[index:]
for item in data_slice:
if result is None:
result = item
elif result.temp < item.temp:
result = item
return result
def data_range(data):
return minimum(data), maximum(data)
def extract_readings(reading):
return reading.temp
def median(data):
sorted_data = sorted(data)
data_length = len(data)
index = (data_length - 1) // 2
if data_length % 2:
return sorted_data[index]
else:
return (sorted_data[index] + sorted_data[index + 1]) / 2.0
def celsius_to_fahrenheit(celsius):
return (celsius * 9 / 5) + 32
def fahrenheit_to_celsius(fahrenheit):
return (fahrenheit - 32) * 5/9
# Set up the data the data file
readings = [
TemperatureReading(13.5, '01/05/20', 'London', 'Celsius'),
TemperatureReading(12.6, '02/05/20', 'London', 'Celsius'),
TemperatureReading(15.3, '03/05/20', 'London', 'Celsius'),
TemperatureReading(12.2, '04/05/20', 'London', 'Celsius'),
TemperatureReading(16.6, '05/05/20', 'London', 'Celsius'),
TemperatureReading(14.6, '05/05/20', 'London', 'Celsius'),
TemperatureReading(15.6, '05/05/20', 'London', 'Celsius')
]
print('All Temperature Readings:')
print(*readings, sep=", ")
# Convert all the temperatures from Celsius to fahrenheit
fahrenheit_temperatures = list(map(lambda r: celsius_to_fahrenheit(r.temp), readings))
print('Fahrenheit Temperatures:', fahrenheit_temperatures)
# Obtain just the temperatures, dates and the indexes for each value
temperatures = list(map(lambda r: r.temp, readings))
print('Temperatures:', temperatures)
dates = list(map(lambda r: r.date, readings))
print('Dates:', dates)
# Find all temperatures above 14.0
higher_temperatures = list(filter(lambda r: r.temp > 14.0, readings))
print('Temperatures above 14.0:', *higher_temperatures)
# Find minimum, maximum etc in readings
print('Min temp in list =', minimum(readings))
print('Max temp in list =', maximum(readings))
print('Average temperature = {:.2f}'.format(average(readings)))
print('Median temperature value =', median(readings))
readings_range = data_range(readings)
print('Range of temperatures from ', str(readings_range[0].temp) + ' to ' + str(readings_range[1].temp))
# Add temperatures together
new_temperature = TemperatureReading(13.5, '01/05/20', 'London', 'Celsius') + TemperatureReading(15.5, '01/05/20', 'London', 'Celsius')
print('Add two temperatures', new_temperature)
# Add an int to a temperature
new_temperature = TemperatureReading(13.5, '01/05/20', 'London', 'Celsius') + 5
print('Add a temperature and a int', new_temperature)
# Add a float to a temperature
new_temperature = TemperatureReading(13.5, '01/05/20', 'London', 'Celsius') + 5.5
print('Add a temperature and a float', new_temperature)
another_temperature = TemperatureReading(13.5, '01/05/20', 'London', 'Celsius')
print(new_temperature > another_temperature)
print(new_temperature >= another_temperature)
print(new_temperature == another_temperature)
print(new_temperature != another_temperature)
print(new_temperature < another_temperature)
print(new_temperature <= another_temperature)
print('Done')
```
#### File: PythonIntroLabsDS/xx-matplotlib/loader.py
```python
import csv
from readings import TemperatureReading
def load_data(filename):
data = []
print('Loading file', filename)
with open(filename, newline='') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
row_length = len(row)
if row_length != 4:
print('Error in data (length is not 3):', row)
print('In line:', reader.line_num)
else:
temp = float(row[0])
scale = row[1]
date = row[2]
location = row[3]
reading = TemperatureReading(temp, date, location, scale)
data.append(reading)
print('Finished reading file')
return data
```
#### File: PythonIntroLabsDS/xx-matplotlib/main.py
```python
import matplotlib.pyplot as pyplot
from loader import load_data
from readings import TemperatureReading
from readings import RainfallReading
from readings import InvalidTemperatureException
from utils import *
def main():
# Load the data file
readings = load_data('data.csv')
print('All Temperature Readings:')
print(*readings, sep=", ")
# Convert all the temperatures from Celsius to fahrenheit
fahrenheit_temperatures = list(map(lambda r: celsius_to_fahrenheit(r.value), readings))
print('Fahrenheit Temperatures:', fahrenheit_temperatures)
# Find all temperatures above 14.0
higher_temperatures = list(filter(lambda r: r.value > 14.0, readings))
print('Temperatures above 14.0:', higher_temperatures)
# Find minimum, maximum etc in readings
print('Min temp in list =', minimum(readings))
print('Max temp in list =', maximum(readings))
print('Average temperature = {:.2f}'.format(average(readings)))
print('Median temperature value =', median(readings))
readings_range = data_range(readings)
print('Range of temperatures from ', str(readings_range[0].value) + ' to ' + str(readings_range[1].value))
# Add temperatures together
new_temperature = TemperatureReading(13.5, '01/05/20', 'London', 'Celsius') + TemperatureReading(15.5, '01/05/20',
'London',
'Celsius')
print('Add two temperatures', new_temperature)
new_temperature = TemperatureReading(13.5, '01/05/20', 'London', 'Celsius') + 5
print('Add a temperature and a int', new_temperature)
new_temperature = TemperatureReading(13.5, '01/05/20', 'London', 'Celsius') + 5.5
print('Add a temperature and a float', new_temperature)
# Working with Rainfall readings
rainfall_readings = [
RainfallReading(2.0, '01/05/20', '11:00', 'London'),
RainfallReading(2.6, '02/05/20', '11:30', 'London'),
RainfallReading(2.3, '03/05/20', '11:00', 'London'),
RainfallReading(3.2, '04/05/20', '12:00', 'London'),
RainfallReading(1.6, '05/05/20', '10:45', 'London')
]
print('All Rainfall Readings:')
print(*rainfall_readings, sep=", ")
print(f'Average rainfall {average(rainfall_readings)}')
try:
new_temperature = TemperatureReading(13.5, '01/05/20', 'London', 'Celsius') + '5.5'
except InvalidTemperatureException as e:
print(e)
# Obtain just the temperatures, dates and the indexes for each value
temperatures = list(map(lambda r: r.value, readings))
print('Temperatures:', temperatures)
dates = list(map(lambda r: r.date, readings))
print('Dates:', dates)
# Set the size of the graph
pyplot.figure(figsize=(10, 8))
# Generate a range for the indexes of the bar chart
index = range(len(readings))
# Set up the bar chart
pyplot.bar(index, temperatures, tick_label=dates)
pyplot.xticks(rotation=750)
pyplot.ylabel('Temperature')
pyplot.xlabel('Dates')
# Display the chart
pyplot.show()
print('Done')
if __name__ == '__main__':
main()
```
#### File: PythonIntroLabsDS/xx-matplotlib/utils.py
```python
def average(data):
if isinstance(data[0], int):
return sum(data) / len(data)
else:
raw_data = list(map(lambda r: r.value, data))
return sum(raw_data) / len(raw_data)
def minimum(data, index=0):
if index == 0:
data_slice = data
else:
data_slice = data[index:]
return min(data_slice)
def maximum(data, index=0):
if index == 0:
data_slice = data
else:
data_slice = data[index:]
return max(data_slice)
def data_range(data):
return minimum(data), maximum(data)
def median(data):
sorted_data = sorted(data)
data_length = len(data)
index = (data_length - 1) // 2
if data_length % 2:
return sorted_data[index]
else:
return (sorted_data[index] + sorted_data[index + 1]) / 2.0
def celsius_to_fahrenheit(celsius):
return (celsius * 9 / 5) + 32
def fahrenheit_to_celsius(fahrenheit):
return (fahrenheit - 32) * 5/9
``` |
{
"source": "johnekent/camera-utils",
"score": 3
} |
#### File: camera-utils/camutil/camera_fswebcam.py
```python
import subprocess
import logging
"""this is the version for fswebcam
It just wraps the command.
"""
class Camera(object):
def __init__(self):
logging.info("Created")
def _make_cmd(self, filename, device=None, resolution=None, brightness=None, contrast=None, zoom=None):
cmd = ['fswebcam']
# these would be nice for tests
if device:
cmd.append('-d')
cmd.append(device)
if resolution:
cmd.append('-r')
cmd.append(resolution)
# controls
if brightness:
cmd.append('-s')
cmd.append('brightness={}%'.format(brightness))
if contrast:
cmd.append('-s')
cmd.append('contrast={}%'.format(contrast))
if zoom:
cmd.append('-s')
cmd.append('zoom, absolute={}'.format(zoom))
cmd.append('--jpeg')
cmd.append('100')
cmd.append(filename)
return cmd
def take_picture(self, filename, device=None, resolution=None, brightness=None, contrast=None, zoom=None):
cmd = self._make_cmd(filename,
device=device,
resolution=resolution,
brightness=brightness,
contrast=contrast,
zoom=zoom)
logging.info("cmd = {}".format(str(cmd)))
status = subprocess.run(cmd)
``` |
{
"source": "John-Ellis/derpLernin",
"score": 3
} |
#### File: derpLernin/bin/calibrate_camera.py
```python
import argparse
from pathlib import Path
import cv2
import numpy as np
from derp.camera import Camera
import derp.util
def live_calibrate(camera, pattern_shape, n_matches_needed):
""" Find calibration parameters as the user moves a checkerboard in front of the camera """
print("Looking for %s checkerboard" % (pattern_shape,))
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
example_3d = np.zeros((pattern_shape[0] * pattern_shape[1], 3), np.float32)
example_3d[:, :2] = np.mgrid[0 : pattern_shape[1], 0 : pattern_shape[0]].T.reshape(-1, 2)
points_3d = []
points_2d = []
while len(points_3d) < n_matches_needed:
ret, frame = camera.cap.read()
assert ret
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findCirclesGrid(
gray_frame, pattern_shape, flags=cv2.CALIB_CB_ASYMMETRIC_GRID
)
cv2.imshow("camera", frame)
if ret:
points_3d.append(example_3d.copy())
points_2d.append(corners)
print("Found calibration %i of %i" % (len(points_3d), n_matches_needed))
drawn_frame = cv2.drawChessboardCorners(frame, pattern_shape, corners, ret)
cv2.imshow("calib", drawn_frame)
cv2.waitKey(10)
ret, camera_matrix, distortion_coefficients, _, _ = cv2.calibrateCamera(
points_3d, points_2d, gray_frame.shape[::-1], None, None
)
assert ret
return camera_matrix, distortion_coefficients
def live_undistort(camera, camera_matrix, distortion_coefficients):
""" Using a given calibration matrix, display the distorted, undistorted, and cropped frame"""
scaled_camera_matrix, roi = cv2.getOptimalNewCameraMatrix(
camera_matrix, distortion_coefficients, camera.size, 1, camera.size
)
while True:
ret, frame = camera.cap.read()
assert ret
distorted_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
undistorted_frame = cv2.undistort(
distorted_frame, camera_matrix, distortion_coefficients, None, scaled_camera_matrix,
)
roi_x, roi_y, roi_w, roi_h = roi
cropped_frame = undistorted_frame[roi_y : roi_y + roi_h, roi_x : roi_x + roi_w]
cv2.imshow("distorted %s" % (distorted_frame.shape,), distorted_frame)
cv2.imshow("undistorted %s" % (undistorted_frame.shape,), undistorted_frame)
cv2.imshow("cropped %s" % (cropped_frame.shape,), cropped_frame)
cv2.waitKey(10)
def main():
"""
Calibrate the live camera and optionally do a live display of the results
"""
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("config", type=Path, help="camera config path")
parser.add_argument("--height", type=int, default=4)
parser.add_argument("--width", type=int, default=10)
parser.add_argument("--count", type=int, default=10)
parser.add_argument("--view", action="store_true")
args = parser.parse_args()
config = {"camera": derp.util.load_config(args.config)}
camera = Camera(config)
pattern_shape = (args.height, args.width)
camera_matrix, distortion_coefficients = live_calibrate(camera, pattern_shape, args.count)
print(camera_matrix)
print(distortion_coefficients)
if args.view:
live_undistort(camera, camera_matrix, distortion_coefficients)
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
```
#### File: derpLernin/bin/drive.py
```python
import argparse
import logging
from multiprocessing import Event, Process
from pathlib import Path
import time
import derp.util
import derp.brain
import derp.camera
import derp.imu
import derp.joystick
import derp.servo
import derp.writer
def all_running(processes):
""" Returns whether all processes are currently alive """
for proc in processes:
proc.join(timeout=0)
if not proc.is_alive():
return False
return True
def loop(config, exit_event, func):
""" Makes running multiprocessing easier """
obj = func(config)
while not exit_event.is_set() and obj.run():
pass
del obj
def main():
""" Prepare arguments, configurations, variables and run the event loop. """
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("config", type=Path, help="Main config path, should include all hardeware")
args = parser.parse_args()
pid_path = '/tmp/derp_drive.pid'
if derp.util.is_already_running(pid_path):
return
derp.util.write_pid(pid_path)
config = derp.util.load_config(args.config)
recording_path = derp.util.make_recording_path()
derp.util.dump_config(config, recording_path / 'config.yaml')
config['recording_path'] = recording_path
logger = derp.util.init_logger('drive', config['recording_path'])
component_map = {
"brain": derp.brain.Clone,
"camera": derp.camera.Camera,
"imu": derp.imu.Imu,
"joystick": derp.joystick.Joystick,
"servo": derp.servo.Servo,
"writer": derp.writer.Writer,
}
processes = []
exit_event = Event()
for name in sorted(component_map):
if name not in config:
logger.info("skip %s", name)
continue
proc_args = (config, exit_event, component_map[name])
proc = Process(target=loop, name=name, args=proc_args)
proc.start()
processes.append(proc)
logger.info("start %s %i", name, proc.pid)
while all_running(processes):
time.sleep(0.1)
exit_event.set()
logger.info("exit")
if __name__ == "__main__":
main()
```
#### File: derpLernin/derp/camera.py
```python
import cv2
import time
from derp.part import Part
import derp.util
class Camera(Part):
"""The Camera manages the camera interface and sends camera messages."""
def __init__(self, config):
"""The Camera manages the camera interface and sends camera messages."""
super(Camera, self).__init__(config, "camera", [])
self._cap = None
self.size = (self._config["width"], self._config["height"])
self._frame = None
self.__connect()
def __del__(self):
super(Camera, self).__del__()
if self._cap is not None:
self._cap.release()
def __connect(self):
if self._cap is not None:
self._cap.release()
del self._cap
self._cap = None
time.sleep(1)
device = "device=/dev/video%i" % self._config["index"]
gst = None
if self._config["mode"] == "video":
gst = (
"v4l2src %s"
" ! video/x-raw,format=YUY2,width=%i,height=%i,framerate=%i/1 "
" ! videoconvert ! appsink"
% (device, self._config["width"], self._config["height"], self._config["fps"])
)
elif self._config["mode"] == "image":
gst = (
"v4l2src %s"
" ! image/jpeg,width=%i,height=%i,framerate=%i/1"
" ! jpegparse ! jpegdec ! videoconvert ! appsink"
% (device, self._config["width"], self._config["height"], self._config["fps"])
)
elif self._config["mode"] == "csi":
gst = (
"nvarguscamerasrc sensor-id=%i"
" ! video/x-raw(memory:NVMM),width=%i,height=%i,framerate=(fraction)%i/1,format=(string)NV12"
" ! nvtee ! nvvidconv flip-method=%i"
" ! video/x-raw,width=%i,height=%i,format=BGRx"
" ! videoconvert ! appsink"
% (
self._config["index"],
self._config["capture_width"],
self._config["capture_height"],
self._config["fps"],
self._config["flip_method"],
self._config["width"],
self._config["height"],
)
)
print(gst)
if gst is not None:
self._cap = cv2.VideoCapture(gst, cv2.CAP_GSTREAMER)
def read(self):
""" Read the camera image if possible, and if so update the timestamp we received data """
ret, self._frame = self._cap.read()
self._timestamp = derp.util.get_timestamp()
return ret
def run(self):
"""Get and publish the camera frame"""
if not self.read():
return False
self.publish(
"camera",
index=self._config["index"],
jpg=derp.util.encode_jpg(self._frame, self._config["quality"]),
)
return True
```
#### File: derpLernin/derp/fetcher.py
```python
import csv
import numpy as np
import PIL.Image
import torch.utils.data
import derp.util
class Fetcher(torch.utils.data.Dataset):
"""
Fetcher is an image-loader for use with training.
"""
def __init__(self, root, transforms, predict_config):
"""
Our data fetcher is responsible for handling data input to the model training.
It loads each image in the dataset along with the states for that image.
Since we use a feed dict, each state variable is stored as a mapping from its
string name to its value. It is then the responsibility of the data loader or
training script to properly convert to an array that can be optimized against.
"""
# Store constructor arguments
self.root = root.expanduser()
print(self.root)
self.transforms = transforms
self.predict_config = predict_config
# Pepare variables to store each item
self.paths = []
self.status = []
self.predict = []
# Read in states and paths
# Each video has a certain fixed number of state variables which we will encode as a dict
for recording_name in sorted(self.root.glob("recording-*")):
recording_path = self.root / recording_name
status_path = recording_path / "status.csv"
assert status_path.exists()
predict_path = recording_path / "predict.csv"
assert predict_path.exists()
with open(str(status_path)) as status_fd, open(str(predict_path)) as predict_fd:
sp_reader, pp_reader = csv.reader(status_fd), csv.reader(predict_fd)
for status_row, predict_row in zip(sp_reader, pp_reader):
assert status_row[0] == predict_row[0]
image_path = recording_path / status_row[0]
status = np.array([float(x) for x in status_row[1:]], dtype=np.float32)
predict = np.array([float(x) for x in predict_row[1:]], dtype=np.float32)
if not status:
status = np.zeros(1, dtype=np.float32)
self.paths.append(image_path)
self.status.append(status)
self.predict.append(predict)
def __getitem__(self, index):
""" Return the specified index. Apply transforms as specified """
thumb = PIL.Image.fromarray(derp.util.load_image(self.paths[index]))
status = self.status[index]
predict = self.predict[index]
if self.transforms is not None:
thumb = self.transforms(thumb)
return thumb, status, predict
def __len__(self):
""" Return the number of items our fetcher is responsible for """
return len(self.paths)
```
#### File: derpLernin/derp/joystick.py
```python
import fcntl
from io import FileIO
import os
from struct import Struct
import time
from evdev import InputDevice
from binascii import crc32
from pyudev import Context
from derp.part import Part
import derp.util
class DS4State:
left_analog_x = 128
left_analog_y = 128
right_analog_x = 128
right_analog_y = 128
up = 0
down = 0
left = 0
right = 0
button_square = 0
button_cross = 0
button_circle = 0
button_triangle = 0
button_l1 = 0
button_l2 = 0
button_l3 = 0
button_r1 = 0
button_r2 = 0
button_r3 = 0
button_share = 0
button_options = 0
button_trackpad = 0
button_ps = 0
timestamp = 0
left_trigger = 0
right_trigger = 0
accel_y = 0
accel_x = 0
accel_z = 0
orientation_roll = 0
orientation_yaw = 0
orientation_pitch = 0
trackpad_0_id = -1
trackpad_0_active = False
trackpad_0_x = 0
trackpad_0_y = 0
trackpad_1_id = -1
trackpad_2_active = False
trackpad_3_x = 0
trackpad_4_y = 0
battery_level = 0
usb = False
audio = False
mic = False
def __init__(self, recv_buffer=None):
if recv_buffer:
self.import_buffer(recv_buffer)
def import_buffer(self, recv_buffer):
short = Struct("<h")
dpad = recv_buffer[7] % 16
self.left_analog_x = recv_buffer[3]
self.left_analog_y = recv_buffer[4]
self.right_analog_x = recv_buffer[5]
self.right_analog_y = recv_buffer[6]
self.up = dpad in (0, 1, 7)
self.down = dpad in (3, 4, 5)
self.left = dpad in (5, 6, 7)
self.right = dpad in (1, 2, 3)
self.button_square = (recv_buffer[7] & 16) != 0
self.button_cross = (recv_buffer[7] & 32) != 0
self.button_circle = (recv_buffer[7] & 64) != 0
self.button_triangle = (recv_buffer[7] & 128) != 0
self.button_l1 = (recv_buffer[8] & 1) != 0
self.button_l2 = (recv_buffer[8] & 4) != 0
self.button_l3 = (recv_buffer[8] & 64) != 0
self.button_r1 = (recv_buffer[8] & 2) != 0
self.button_r2 = (recv_buffer[8] & 8) != 0
self.button_r3 = (recv_buffer[8] & 128) != 0
self.button_share = (recv_buffer[8] & 16) != 0
self.button_options = (recv_buffer[8] & 32) != 0
self.button_trackpad = (recv_buffer[9] & 2) != 0
self.button_ps = (recv_buffer[9] & 1) != 0
self.timestamp = recv_buffer[9] >> 2
self.left_trigger = recv_buffer[10]
self.right_trigger = recv_buffer[11]
self.accel_y = short.unpack_from(recv_buffer, 15)[0]
self.accel_x = short.unpack_from(recv_buffer, 17)[0]
self.accel_z = short.unpack_from(recv_buffer, 19)[0]
self.orientation_roll = -(short.unpack_from(recv_buffer, 21)[0])
self.orientation_yaw = short.unpack_from(recv_buffer, 23)[0]
self.orientation_pitch = short.unpack_from(recv_buffer, 25)[0]
self.trackpad_0_id = recv_buffer[37] & 0x7F
self.trackpad_0_active = (recv_buffer[37] >> 7) == 0
self.trackpad_0_x = ((recv_buffer[39] & 0x0F) << 8) | recv_buffer[38]
self.trackpad_0_y = recv_buffer[40] << 4 | ((recv_buffer[39] & 0xF0) >> 4)
self.trackpad_1_id = recv_buffer[41] & 0x7F
self.trackpad_2_active = (recv_buffer[41] >> 7) == 0
self.trackpad_3_x = ((recv_buffer[43] & 0x0F) << 8) | recv_buffer[42]
self.trackpad_4_y = recv_buffer[44] << 4 | ((recv_buffer[43] & 0xF0) >> 4)
self.battery_level = recv_buffer[32] % 16
self.usb = (recv_buffer[32] & 16) != 0
self.audio = (recv_buffer[32] & 32) != 0
self.mic = (recv_buffer[32] & 64) != 0
class Joystick(Part):
"""Joystick to drive the car around manually without keyboard."""
def __init__(self, config):
"""Joystick to drive the car around manually without keyboard."""
super(Joystick, self).__init__(config, "joystick", [])
# State/Controls
self.speed = 0
self.steer = 0
self.speed_offset = 0
self.steer_offset = 0
self.is_calibrated = True
self.is_autonomous = False
self.state = DS4State()
self.last_state = DS4State()
self.__fd = None
self.__input_device = None
self.__report_fd = None
self.__report_id = 0x11
self.__keep_running = True
self.__connect()
def __del__(self):
self.publish("action", isManual=True, speed=0, steer=0)
self.publish("controller", isAutonomous=False, speedOffset=0, steerOffset=0, exit=True)
super(Joystick, self).__del__()
try:
self.send(red=1, rumble_high=1)
time.sleep(0.5)
self.send(blue=0.1, green=0.1, red=0.5)
except:
pass
if self.__fd is not None:
self.__fd.close()
if self.__input_device is not None:
self.__input_device.ungrab()
def __find_device(self):
context = Context()
for hidraw_device in context.list_devices(subsystem="hidraw"):
hid_device = hidraw_device.parent
if hid_device.subsystem != "hid" or hid_device.get("HID_NAME") != "Wireless Controller":
continue
for child in hid_device.parent.children:
event_device = child.get("DEVNAME", "")
if event_device.startswith("/dev/input/event"):
break
else:
continue
device_addr = hid_device.get("HID_UNIQ", "").upper()
return device_addr, hidraw_device.device_node, event_device
return None, None, None
def __connect(self):
device_addr, hidraw_device, event_device = self.__find_device()
if device_addr is None:
return False
self.__report_fd = os.open(hidraw_device, os.O_RDWR | os.O_NONBLOCK)
self.__fd = FileIO(self.__report_fd, "rb+", closefd=False)
self.__input_device = InputDevice(event_device)
self.__input_device.grab()
buf = bytearray(38)
buf[0] = 0x02
try:
return bool(fcntl.ioctl(self.__fd, 3223734279, bytes(buf)))
except:
pass
if self.recv():
self.update_controller()
def __in_deadzone(self, value):
""" Deadzone checker for analog sticks """
return 128 - self._config["deadzone"] < value <= 128 + self._config["deadzone"]
def __normalize_stick(self, value, deadzone):
"""
Normalize stick value from [0, 255] to [0, 1]
Ignore a 128-centered deadzone
"""
value -= 128
value = value - deadzone if value > 0 else value + deadzone
value /= 127 - deadzone
return value
def recv(self, limit=1000, duration=0.001, report_size=78):
"""
Attempt to get a message from the device.
Args:
limit (int): number of device polls to do
duration (int): how long to wait between polls
Returns:
Whether we have successfully updated the status of the program
"""
for i in range(limit):
time.sleep(duration)
recv_buffer = bytearray(report_size)
try:
ret = self.__fd.readinto(recv_buffer)
except IOError:
# print("joystick: IO Error")
continue
except AttributeError:
# print("joystick: Attribute Error")
continue
if ret is None:
# print("joystick: ret is none")
continue
if ret < report_size:
# print("joystick: ret too small (%i) expected (%i)" % (ret, report_size))
continue
if recv_buffer[0] != self.__report_id:
# print("joystick: Wrong report id (%i) expected (%i):"
# % (recv_buffer[0], self.__report_id))
continue
self._timestamp = derp.util.get_timestamp()
self.last_state = self.state
self.state = DS4State(recv_buffer)
self.process_state()
return True
return False
def update_controller(self):
"""Send the state of the system to the controller"""
green = 1.0 if self.is_autonomous else 0
red = 1.0 if self.is_calibrated else 0
blue = 1.0
light_on = 1.0
light_off = 0.0
self.send(red=red, green=green, blue=blue, light_on=light_on, light_off=light_off)
return True
def send(self, rumble_high=0, rumble_low=0, red=0, green=0, blue=0, light_on=0, light_off=0):
"""Actuate the controller by setting its rumble or light color/blink"""
packet = bytearray(79)
packet[:5] = [0xA2, 0x11, 0x80, 0x00, 0xFF]
packet[7] = int(rumble_high * 255 + 0.5)
packet[8] = int(rumble_low * 255 + 0.5)
packet[9] = int(red * 255 + 0.5)
packet[10] = int(green * 255 + 0.5)
packet[11] = int(blue * 255 + 0.5)
packet[12] = int(light_on * 255 + 0.5)
packet[13] = int(light_off * 255 + 0.5)
crc = crc32(packet[:-4])
packet[-4] = crc & 0x000000FF
packet[-3] = (crc & 0x0000FF00) >> 8
packet[-2] = (crc & 0x00FF0000) >> 16
packet[-1] = (crc & 0xFF000000) >> 24
hid = bytearray((self.__report_id,))
if self.__fd is not None:
self.__fd.write(hid + packet[2:])
return True
return False
def process_state(self):
"""
For the given input, figure out how we should affect the state
and put that into out.
"""
self.controller_changed = False
self.action_changed = False
self.__keep_running = not self.state.button_trackpad
if not self.__in_deadzone(self.state.left_analog_x):
steer = self.__normalize_stick(self.state.left_analog_x, self._config["deadzone"]) * self._config['steer_normalizer']
if steer != self.steer:
self.steer = steer
self.action_changed = True
elif not self.__in_deadzone(self.last_state.left_analog_x):
self.steer = 0
self.action_changed = True
if self.state.left_trigger:
speed = -self.state.left_trigger / 255 * self._config['speed_normalizer']
if speed != self.speed:
self.speed = speed
self.action_changed = True
elif self.last_state.left_trigger:
self.speed = 0
self.action_changed = True
if self.state.right_trigger:
speed = self.state.right_trigger / 255 * self._config['speed_normalizer']
if speed != self.speed:
self.speed = speed
self.action_changed = True
elif self.last_state.right_trigger:
self.speed = 0
self.action_changed = True
if self.state.left and not self.last_state.left:
self.steer_offset -= 5 / 255
self.controller_changed = True
if self.state.right and not self.last_state.right:
self.steer_offset += 5 / 255
self.controller_changed = True
if self.state.up and not self.last_state.up:
self.speed_offset += 5 / 255
self.controller_changed = True
if self.state.down and not self.last_state.down:
self.speed_offset -= 5 / 255
self.controller_changed = True
if self.state.button_square and not self.last_state.button_square:
pass
if self.state.button_cross and not self.last_state.button_cross:
self.speed = 0
self.steer = 0
self.speed_offset = 0
self.is_autonomous = False
self.action_changed = True
self.controller_changed = True
if self.state.button_triangle and not self.last_state.button_triangle:
self.is_autonomous = True
self.controller_changed = True
if self.state.button_circle and not self.last_state.button_circle:
self.controller_changed = True
def run(self):
"""Query one set of inputs from the joystick and send it out."""
start_time = derp.util.get_timestamp()
if not self.recv():
print("joystick: timed out", start_time)
self.__connect()
return True
if self.controller_changed:
self.update_controller()
self.publish(
"controller",
isAutonomous=self.is_autonomous,
speedOffset=self.speed_offset,
steerOffset=self.steer_offset,
)
if self.action_changed:
self.publish("action", isManual=True, speed=self.speed, steer=self.steer)
return self.__keep_running
```
#### File: derpLernin/test/test_end2end.py
```python
import pytest
import numpy as np
import PIL.Image
import torch
import torchvision.transforms as transforms
import derp.util
import derp.model
@pytest.fixture
def frame():
return derp.util.load_image("test/100deg.jpg")
@pytest.fixture
def source_config():
return {'hfov': 50, 'vfov': 50, 'yaw': 0, 'pitch': 0, 'width': 100, 'height': 100,
'x': 0, 'y': 0, 'z': 1}
@pytest.fixture
def target_config():
return {'hfov': 32, 'vfov': 32, 'yaw': 0, 'pitch': -4, 'x': 0, 'y': 0, 'z': 1}
class Fetcher(torch.utils.data.Dataset):
def __init__(self, table):
self.table = table
self.transform = transforms.Compose([transforms.ToTensor()])
def __getitem__(self, index):
image = PIL.Image.fromarray(self.table[index][0])
return self.transform(image), self.table[index][1], self.table[index][2]
def __len__(self):
return len(self.table)
def test_perturb(frame, source_config, target_config):
""" verify that a zero perturb does nothing to the pixels """
bbox = derp.util.get_patch_bbox(target_config, source_config)
zero_frame = derp.util.perturb(frame.copy(), source_config)
assert (zero_frame - frame).sum() == 0
def test_perturb_learnability(frame, source_config, target_config):
bbox = derp.util.get_patch_bbox(target_config, source_config)
train_table, test_table = [], []
for shift in np.linspace(-0.4, 0.4, 51):
for rotate in np.linspace(-4, 4, 51):
p_frame = derp.util.perturb(frame.copy(), source_config, shift, rotate)
p_patch = derp.util.crop(p_frame, bbox)
table = test_table if shift == 0 or rotate == 0 else train_table
table.append([p_patch, torch.FloatTensor(), torch.FloatTensor([shift * 2.5,
rotate * 0.25])])
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
train_fetcher, test_fetcher = Fetcher(train_table), Fetcher(test_table)
train_loader = torch.utils.data.DataLoader(train_fetcher, 32, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_fetcher, len(test_fetcher))
model = derp.model.Tiny(np.roll(train_table[0][0].shape, 1), 0, 2).to(device)
optimizer = torch.optim.AdamW(model.parameters(), 1E-3)
criterion = torch.nn.MSELoss().to(device)
test_losses = []
for epoch in range(5):
train_loss = derp.model.train_epoch(device, model, optimizer, criterion, train_loader)
test_loss = derp.model.test_epoch(device, model, criterion, test_loader)
test_losses.append(test_loss)
assert min(test_losses) < 2E-3
``` |
{
"source": "JohnElmasry/excel_validator",
"score": 4
} |
#### File: excel_validator/validator/LengthValidator.py
```python
from validator.BaseValidator import BaseValidator
class LengthValidator(BaseValidator):
min = None
max = None
minMessage = "Min length error"
maxMessage = "Max length error"
message = "This value has incorrect length"
def validate(self, value):
#possible null values
if value is None:
return True
value = super(LengthValidator, self).validate(value)
if type(value) is not str:
value = (str)(value)
if self.min is not None and len(value) < self.min:
if self.minMessage is not None:
self.message = self.minMessage;
return False
if self.max is not None and len(value) > self.max:
if self.maxMessage is not None:
self.message = self.maxMessage;
return False
def __init__(self, params):
super(LengthValidator, self).__init__(params)
if 'min' in params:
self.min = params.get('min')
if 'max' in params:
self.max = params.get('max')
if 'minMessage' in params:
self.minMessage = params.get('minMessage')
if 'maxMessage' in params:
self.maxMessage = params.get('maxMessage')
``` |
{
"source": "johnelutz/538_Baseball",
"score": 3
} |
#### File: johnelutz/538_Baseball/DiceAct.py
```python
from D6D6 import D6D6die
def ActionMap():
ddd = D6D6die()
if ddd == [1, 1]:
return("double")
elif ddd in ([1, 2], [1, 3], [1, 4]):
return("single")
elif ddd == [1, 5]:
return("base on error")
elif ddd == [1, 6]:
return("base on balls")
elif ddd in ([2, 2], [2, 3], [2, 4], [2, 5]):
return("strike")
elif ddd == [2, 6]:
return("foul out")
elif ddd in ([3, 3], [3, 4], [3, 5], [3, 6]):
return("out at first")
elif ddd in ([4, 4], [4, 5], [4, 6]):
return("fly out")
elif ddd == [5, 5]:
return("double play")
elif ddd == [5, 6]:
return("triple")
elif ddd == [6, 6]:
return("home run")
def main():
print(ActionMap())
if __name__ == '__main__':
main()
``` |
{
"source": "JohnEmhoff/spavro",
"score": 3
} |
#### File: src/spavro/new_schema.py
```python
PRIMITIVE = (
u'null',
u'boolean',
u'string',
u'bytes',
u'int',
u'long',
u'float',
u'double',
)
class Schema(object):
def to_json(self):
raise NotImplemented()
def __str__(self):
return str(self.type)
def __repr__(self):
return "<{} type='{}'>".format(self.__class__.__name__, self)
class PrimitiveSchema(Schema):
def __init__(self, schema_name):
self.type = schema_name
class RecordField(object):
def __init__(self, fielddef):
self.name = fielddef['name']
self.type = parse_schema(fielddef['type'])
def __str__(self):
return str(self.type)
def __repr__(self):
return "<{} type='{}'>".format(self.__class__.__name__, self)
class RecordSchema(Schema):
def __init__(self, schema):
self.name = schema['name']
self.type = schema['type']
self.fields = [RecordField(field) for field in schema['fields']]
class UnionSchema(Schema):
def __init__(self, schemas, names=None):
self.type = 'union'
self.schemas = [parse_schema(schema, names) for schema in schemas]
class EnumSchema(Schema):
def __init__(self, schema):
self.type = 'enum'
self.symbols = schema['symbols']
self.name = schema.get('name', None)
class ArraySchema(Schema):
def __init__(self, schema):
raise NotImplemented()
class MapSchema(Schema):
def __init__(self, schema):
raise NotImplemented()
class FixedSchema(Schema):
def __init__(self, schema):
raise NotImplemented()
# all complex types are represented by dictionaries
complex_types = {
'record': RecordSchema,
'enum': EnumSchema,
'array': ArraySchema,
'map': MapSchema,
'fixed': FixedSchema
}
def parse_schema(schema, names=None):
if type(schema) is list:
return UnionSchema(schema)
elif type(schema) is dict:
if schema['type'] in complex_types:
return complex_types[schema['type']](schema)
elif schema['type'] in PRIMITIVE:
# could add if 'logicalType' in schema as a double guard
# this handles annotated schemas and logical types
# ignores everything else in the dictionary
return parse_schema(schema['type'])
elif schema in PRIMITIVE:
return PrimitiveSchema(schema)
raise Exception("Invalid schema: {}".format(schema))
```
#### File: spavro/test/test_schema_validation.py
```python
import unittest
from six import BytesIO
import spavro.io
from spavro.io import FastDatumWriter
# from spavro.io import SlowDatumWriter as FastDatumWriter
from spavro.exceptions import AvroTypeException
valid_data = (
("int_at_the_upper_boundary", 2147483647, '"int"'),
("int_at_the_lower_boundary", -2147483648, '"int"'),
("long_at_the_upper_boundary", 9223372036854775807, '"long"'),
("long_at_the_lower_boundary", -9223372036854775808, '"long"'),
("interger_data_float_schema", 123, '"float"'),
# booleans are considered an integer type? fascinating
("boolean_data_float_schema", True, '"float"'),
("boolean_data_integer_schema", True, '"int"'),
("optional_field", {"value": 100}, '{"fields": [{"type": ["null", "string"], "name": "id"}, {"type": "int", "name": "value"}], "type": "record", "name": "test_schema"}'),
("fixed", b'\x01\x01\x01\x01\x01\x01\x01\x01', '{"name": "testfix", "type": "fixed", "size": 8}'),
("make_sure_null_term_doesnt_break", b'\x01\x01\x00\x01\x01\x01\x01\x01', '{"name": "testfix", "type": "fixed", "size": 8}'),
)
invalid_data = (
("missing_required_field_1", {"value": 100}, '{"fields": [{"type": "string", "name": "id"}, {"type": "int", "name": "value"}], "type": "record", "name": "test_schema"}'),
("missing_required_field_2", {"id": "bork"}, '{"fields": [{"type": "string", "name": "id"}, {"type": "int", "name": "value"}], "type": "record", "name": "test_schema"}'),
("string_data_long_schema", u'boom!', '"long"'),
("string_data_boolean_schema", u"boom!", '"boolean"'),
("int_data_boolean_schema", 123, '"boolean"'),
("float_data_int_schema", 123.456, '"long"'),
("null_data_string_schema", None, '"string"'),
("null_data_int_schema", None, '"int"'),
("null_data_boolean_schema", None, '"boolean"'),
("mismatch_fixed_data_fixed_schema", b'\x97', '{"name": "testfix", "type": "fixed", "size": 8}'),
("int_too_big", 2147483648, '"int"'),
("int_too_small", -2147483649, '"int"'),
("long_too_big", 9223372036854775808, '"long"'),
("long_too_small", -9223372036854775809, '"long"'),
("wrong_data_in_array", [1, u'B'], '{"type": "array", "items": "int"}'),
)
class TestValidData(unittest.TestCase):
pass
def create_good_case(schema, datum):
write_schema = spavro.schema.parse(schema)
def test_write_good_data(self):
fastbuff = BytesIO()
fastencoder = spavro.io.FastBinaryEncoder(fastbuff)
fdw = FastDatumWriter(write_schema)
fdw.write(datum, fastencoder)
return test_write_good_data
def create_exception_case(schema, datum):
print(schema)
write_schema = spavro.schema.parse(schema)
def test_write_invalid_data(self):
with self.assertRaises(AvroTypeException) as context:
fastbuff = BytesIO()
fastencoder = spavro.io.FastBinaryEncoder(fastbuff)
fdw = FastDatumWriter(write_schema)
fdw.write(datum, fastencoder)
print(context.exception)
return test_write_invalid_data
def make_good_cases(cases):
for name, datum, schema in cases:
test_method = create_good_case(schema, datum)
test_method.__name__ = 'test_good_data_{}'.format(name)
setattr(TestValidData, test_method.__name__, test_method)
def make_exception_cases(cases):
for name, datum, schema in cases:
test_method = create_exception_case(schema, datum)
test_method.__name__ = 'test_invalid_data_{}'.format(name)
setattr(TestValidData, test_method.__name__, test_method)
make_good_cases(valid_data)
make_exception_cases(invalid_data)
``` |
{
"source": "johnerikhalse/esphomeyaml",
"score": 3
} |
#### File: components/binary_sensor/pn532.py
```python
import voluptuous as vol
import esphomeyaml.config_validation as cv
from esphomeyaml.components import binary_sensor
from esphomeyaml.components.pn532 import PN532Component
from esphomeyaml.const import CONF_NAME, CONF_UID
from esphomeyaml.core import HexInt
from esphomeyaml.helpers import ArrayInitializer, get_variable
DEPENDENCIES = ['pn532']
CONF_PN532_ID = 'pn532_id'
def validate_uid(value):
value = cv.string_strict(value)
for x in value.split('-'):
if len(x) != 2:
raise vol.Invalid("Each part (separated by '-') of the UID must be two characters "
"long.")
try:
x = int(x, 16)
except ValueError:
raise vol.Invalid("Valid characters for parts of a UID are 0123456789ABCDEF.")
if x < 0 or x > 255:
raise vol.Invalid("Valid values for UID parts (separated by '-') are 00 to FF")
return value
PLATFORM_SCHEMA = cv.nameable(binary_sensor.BINARY_SENSOR_PLATFORM_SCHEMA.extend({
vol.Required(CONF_UID): validate_uid,
cv.GenerateID(CONF_PN532_ID): cv.use_variable_id(PN532Component)
}))
def to_code(config):
hub = None
for hub in get_variable(config[CONF_PN532_ID]):
yield
addr = [HexInt(int(x, 16)) for x in config[CONF_UID].split('-')]
rhs = hub.make_tag(config[CONF_NAME], ArrayInitializer(*addr, multiline=False))
binary_sensor.register_binary_sensor(rhs, config)
```
#### File: components/binary_sensor/rdm6300.py
```python
import voluptuous as vol
import esphomeyaml.config_validation as cv
from esphomeyaml.components import binary_sensor, rdm6300
from esphomeyaml.const import CONF_NAME, CONF_UID
from esphomeyaml.helpers import get_variable
DEPENDENCIES = ['rdm6300']
CONF_RDM6300_ID = 'rdm6300_id'
PLATFORM_SCHEMA = cv.nameable(binary_sensor.BINARY_SENSOR_PLATFORM_SCHEMA.extend({
vol.Required(CONF_UID): cv.uint32_t,
cv.GenerateID(CONF_RDM6300_ID): cv.use_variable_id(rdm6300.RDM6300Component)
}))
def to_code(config):
hub = None
for hub in get_variable(config[CONF_RDM6300_ID]):
yield
rhs = hub.make_card(config[CONF_NAME], config[CONF_UID])
binary_sensor.register_binary_sensor(rhs, config)
```
#### File: components/cover/__init__.py
```python
import esphomeyaml.config_validation as cv
from esphomeyaml.const import CONF_ID, CONF_MQTT_ID, CONF_INTERNAL
from esphomeyaml.helpers import Pvariable, esphomelib_ns, setup_mqtt_component, add
PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend({
})
cover_ns = esphomelib_ns.namespace('cover')
Cover = cover_ns.Cover
MQTTCoverComponent = cover_ns.MQTTCoverComponent
CoverState = cover_ns.CoverState
COVER_OPEN = cover_ns.COVER_OPEN
COVER_CLOSED = cover_ns.COVER_CLOSED
OpenAction = cover_ns.OpenAction
CloseAction = cover_ns.CloseAction
StopAction = cover_ns.StopAction
COVER_SCHEMA = cv.MQTT_COMMAND_COMPONENT_SCHEMA.extend({
cv.GenerateID(): cv.declare_variable_id(Cover),
cv.GenerateID(CONF_MQTT_ID): cv.declare_variable_id(MQTTCoverComponent),
})
COVER_PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(COVER_SCHEMA.schema)
def setup_cover_core_(cover_var, mqtt_var, config):
if CONF_INTERNAL in config:
add(cover_var.set_internal(config[CONF_INTERNAL]))
setup_mqtt_component(mqtt_var, config)
def setup_cover(cover_obj, mqtt_obj, config):
cover_var = Pvariable(config[CONF_ID], cover_obj, has_side_effects=False)
mqtt_var = Pvariable(config[CONF_MQTT_ID], mqtt_obj, has_side_effects=False)
setup_cover_core_(cover_var, mqtt_var, config)
BUILD_FLAGS = '-DUSE_COVER'
```
#### File: esphomeyaml/components/esp32_touch.py
```python
import voluptuous as vol
from esphomeyaml import config_validation as cv
from esphomeyaml.components import binary_sensor
from esphomeyaml.const import CONF_ID, CONF_SETUP_MODE, CONF_IIR_FILTER, \
CONF_SLEEP_DURATION, CONF_MEASUREMENT_DURATION, CONF_LOW_VOLTAGE_REFERENCE, \
CONF_HIGH_VOLTAGE_REFERENCE, CONF_VOLTAGE_ATTENUATION, ESP_PLATFORM_ESP32
from esphomeyaml.core import TimePeriod
from esphomeyaml.helpers import App, Pvariable, add, global_ns
ESP_PLATFORMS = [ESP_PLATFORM_ESP32]
def validate_voltage(values):
def validator(value):
if isinstance(value, float) and value.is_integer():
value = int(value)
value = cv.string(value)
if not value.endswith('V'):
value += 'V'
return cv.one_of(*values)(value)
return validator
LOW_VOLTAGE_REFERENCE = {
'0.5V': global_ns.TOUCH_LVOLT_0V5,
'0.6V': global_ns.TOUCH_LVOLT_0V6,
'0.7V': global_ns.TOUCH_LVOLT_0V7,
'0.8V': global_ns.TOUCH_LVOLT_0V8,
}
HIGH_VOLTAGE_REFERENCE = {
'2.4V': global_ns.TOUCH_HVOLT_2V4,
'2.5V': global_ns.TOUCH_HVOLT_2V5,
'2.6V': global_ns.TOUCH_HVOLT_2V6,
'2.7V': global_ns.TOUCH_HVOLT_2V7,
}
VOLTAGE_ATTENUATION = {
'1.5V': global_ns.TOUCH_HVOLT_ATTEN_1V5,
'1V': global_ns.TOUCH_HVOLT_ATTEN_1V,
'0.5V': global_ns.TOUCH_HVOLT_ATTEN_0V5,
'0V': global_ns.TOUCH_HVOLT_ATTEN_0V,
}
ESP32TouchComponent = binary_sensor.binary_sensor_ns.ESP32TouchComponent
CONFIG_SCHEMA = vol.Schema({
cv.GenerateID(): cv.declare_variable_id(ESP32TouchComponent),
vol.Optional(CONF_SETUP_MODE): cv.boolean,
vol.Optional(CONF_IIR_FILTER): cv.positive_time_period_milliseconds,
vol.Optional(CONF_SLEEP_DURATION):
vol.All(cv.positive_time_period, vol.Range(max=TimePeriod(microseconds=436906))),
vol.Optional(CONF_MEASUREMENT_DURATION):
vol.All(cv.positive_time_period, vol.Range(max=TimePeriod(microseconds=8192))),
vol.Optional(CONF_LOW_VOLTAGE_REFERENCE): validate_voltage(LOW_VOLTAGE_REFERENCE),
vol.Optional(CONF_HIGH_VOLTAGE_REFERENCE): validate_voltage(HIGH_VOLTAGE_REFERENCE),
vol.Optional(CONF_VOLTAGE_ATTENUATION): validate_voltage(VOLTAGE_ATTENUATION),
})
def to_code(config):
rhs = App.make_esp32_touch_component()
touch = Pvariable(config[CONF_ID], rhs)
if CONF_SETUP_MODE in config:
add(touch.set_setup_mode(config[CONF_SETUP_MODE]))
if CONF_IIR_FILTER in config:
add(touch.set_iir_filter(config[CONF_IIR_FILTER]))
if CONF_SLEEP_DURATION in config:
sleep_duration = int(config[CONF_SLEEP_DURATION].total_microseconds * 0.15)
add(touch.set_sleep_duration(sleep_duration))
if CONF_MEASUREMENT_DURATION in config:
measurement_duration = int(config[CONF_MEASUREMENT_DURATION].total_microseconds * 0.125)
add(touch.set_measurement_duration(measurement_duration))
if CONF_LOW_VOLTAGE_REFERENCE in config:
value = LOW_VOLTAGE_REFERENCE[config[CONF_LOW_VOLTAGE_REFERENCE]]
add(touch.set_low_voltage_reference(value))
if CONF_HIGH_VOLTAGE_REFERENCE in config:
value = HIGH_VOLTAGE_REFERENCE[config[CONF_HIGH_VOLTAGE_REFERENCE]]
add(touch.set_high_voltage_reference(value))
if CONF_VOLTAGE_ATTENUATION in config:
value = VOLTAGE_ATTENUATION[config[CONF_VOLTAGE_ATTENUATION]]
add(touch.set_voltage_attenuation(value))
BUILD_FLAGS = '-DUSE_ESP32_TOUCH_BINARY_SENSOR'
```
#### File: components/fan/__init__.py
```python
import voluptuous as vol
import esphomeyaml.config_validation as cv
from esphomeyaml.const import CONF_ID, CONF_MQTT_ID, CONF_OSCILLATION_COMMAND_TOPIC, \
CONF_OSCILLATION_STATE_TOPIC, CONF_SPEED_COMMAND_TOPIC, CONF_SPEED_STATE_TOPIC, CONF_INTERNAL
from esphomeyaml.helpers import Application, Pvariable, add, esphomelib_ns, setup_mqtt_component
PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend({
})
fan_ns = esphomelib_ns.namespace('fan')
FanState = fan_ns.FanState
MQTTFanComponent = fan_ns.MQTTFanComponent
MakeFan = Application.MakeFan
TurnOnAction = fan_ns.TurnOnAction
TurnOffAction = fan_ns.TurnOffAction
ToggleAction = fan_ns.ToggleAction
FanSpeed = fan_ns.FanSpeed
FAN_SPEED_OFF = fan_ns.FAN_SPEED_OFF
FAN_SPEED_LOW = fan_ns.FAN_SPEED_LOW
FAN_SPEED_MEDIUM = fan_ns.FAN_SPEED_MEDIUM
FAN_SPEED_HIGH = fan_ns.FAN_SPEED_HIGH
FAN_SCHEMA = cv.MQTT_COMMAND_COMPONENT_SCHEMA.extend({
cv.GenerateID(): cv.declare_variable_id(FanState),
cv.GenerateID(CONF_MQTT_ID): cv.declare_variable_id(MQTTFanComponent),
vol.Optional(CONF_OSCILLATION_STATE_TOPIC): cv.publish_topic,
vol.Optional(CONF_OSCILLATION_COMMAND_TOPIC): cv.subscribe_topic,
})
FAN_PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(FAN_SCHEMA.schema)
FAN_SPEEDS = {
'OFF': FAN_SPEED_OFF,
'LOW': FAN_SPEED_LOW,
'MEDIUM': FAN_SPEED_MEDIUM,
'HIGH': FAN_SPEED_HIGH,
}
def validate_fan_speed(value):
return vol.All(vol.Upper, cv.one_of(*FAN_SPEEDS))(value)
def setup_fan_core_(fan_var, mqtt_var, config):
if CONF_INTERNAL in config:
add(fan_var.set_internal(config[CONF_INTERNAL]))
if CONF_OSCILLATION_STATE_TOPIC in config:
add(mqtt_var.set_custom_oscillation_state_topic(config[CONF_OSCILLATION_STATE_TOPIC]))
if CONF_OSCILLATION_COMMAND_TOPIC in config:
add(mqtt_var.set_custom_oscillation_command_topic(config[CONF_OSCILLATION_COMMAND_TOPIC]))
if CONF_SPEED_STATE_TOPIC in config:
add(mqtt_var.set_custom_speed_state_topic(config[CONF_SPEED_STATE_TOPIC]))
if CONF_SPEED_COMMAND_TOPIC in config:
add(mqtt_var.set_custom_speed_command_topic(config[CONF_SPEED_COMMAND_TOPIC]))
setup_mqtt_component(mqtt_var, config)
def setup_fan(fan_obj, mqtt_obj, config):
fan_var = Pvariable(config[CONF_ID], fan_obj, has_side_effects=False)
mqtt_var = Pvariable(config[CONF_MQTT_ID], mqtt_obj, has_side_effects=False)
setup_fan_core_(fan_var, mqtt_var, config)
BUILD_FLAGS = '-DUSE_FAN'
```
#### File: components/light/binary.py
```python
import voluptuous as vol
import esphomeyaml.config_validation as cv
from esphomeyaml.components import light
from esphomeyaml.const import CONF_MAKE_ID, CONF_NAME, CONF_OUTPUT, CONF_EFFECTS
from esphomeyaml.helpers import App, get_variable, variable
PLATFORM_SCHEMA = cv.nameable(light.LIGHT_PLATFORM_SCHEMA.extend({
cv.GenerateID(CONF_MAKE_ID): cv.declare_variable_id(light.MakeLight),
vol.Required(CONF_OUTPUT): cv.use_variable_id(None),
vol.Optional(CONF_EFFECTS): light.validate_effects(light.BINARY_EFFECTS),
}))
def to_code(config):
output = None
for output in get_variable(config[CONF_OUTPUT]):
yield
rhs = App.make_binary_light(config[CONF_NAME], output)
light_struct = variable(config[CONF_MAKE_ID], rhs)
light.setup_light(light_struct.Pstate, light_struct.Pmqtt, config)
```
#### File: components/sensor/htu21d.py
```python
import voluptuous as vol
import esphomeyaml.config_validation as cv
from esphomeyaml.components import sensor
from esphomeyaml.const import CONF_HUMIDITY, CONF_MAKE_ID, CONF_NAME, CONF_TEMPERATURE, \
CONF_UPDATE_INTERVAL
from esphomeyaml.helpers import App, Application, variable
DEPENDENCIES = ['i2c']
MakeHTU21DSensor = Application.MakeHTU21DSensor
PLATFORM_SCHEMA = sensor.PLATFORM_SCHEMA.extend({
cv.GenerateID(CONF_MAKE_ID): cv.declare_variable_id(MakeHTU21DSensor),
vol.Required(CONF_TEMPERATURE): cv.nameable(sensor.SENSOR_SCHEMA),
vol.Required(CONF_HUMIDITY): cv.nameable(sensor.SENSOR_SCHEMA),
vol.Optional(CONF_UPDATE_INTERVAL): cv.update_interval,
})
def to_code(config):
rhs = App.make_htu21d_sensor(config[CONF_TEMPERATURE][CONF_NAME],
config[CONF_HUMIDITY][CONF_NAME],
config.get(CONF_UPDATE_INTERVAL))
htu21d = variable(config[CONF_MAKE_ID], rhs)
sensor.setup_sensor(htu21d.Phtu21d.Pget_temperature_sensor(), htu21d.Pmqtt_temperature,
config[CONF_TEMPERATURE])
sensor.setup_sensor(htu21d.Phtu21d.Pget_humidity_sensor(), htu21d.Pmqtt_humidity,
config[CONF_HUMIDITY])
BUILD_FLAGS = '-DUSE_HTU21D_SENSOR'
```
#### File: components/sensor/max6675.py
```python
import voluptuous as vol
import esphomeyaml.config_validation as cv
from esphomeyaml import pins
from esphomeyaml.components import sensor
from esphomeyaml.components.spi import SPIComponent
from esphomeyaml.const import CONF_CS_PIN, CONF_MAKE_ID, CONF_NAME, CONF_SPI_ID, \
CONF_UPDATE_INTERVAL
from esphomeyaml.helpers import App, Application, get_variable, gpio_output_pin_expression, variable
MakeMAX6675Sensor = Application.MakeMAX6675Sensor
PLATFORM_SCHEMA = cv.nameable(sensor.SENSOR_PLATFORM_SCHEMA.extend({
cv.GenerateID(CONF_MAKE_ID): cv.declare_variable_id(MakeMAX6675Sensor),
cv.GenerateID(CONF_SPI_ID): cv.use_variable_id(SPIComponent),
vol.Required(CONF_CS_PIN): pins.gpio_output_pin_schema,
vol.Optional(CONF_UPDATE_INTERVAL): cv.update_interval,
}))
def to_code(config):
spi = None
for spi in get_variable(config[CONF_SPI_ID]):
yield
cs = None
for cs in gpio_output_pin_expression(config[CONF_CS_PIN]):
yield
rhs = App.make_max6675_sensor(config[CONF_NAME], spi, cs,
config.get(CONF_UPDATE_INTERVAL))
make = variable(config[CONF_MAKE_ID], rhs)
sensor.setup_sensor(make.Pmax6675, make.Pmqtt, config)
BUILD_FLAGS = '-DUSE_MAX6675_SENSOR'
```
#### File: components/sensor/tsl2561.py
```python
import voluptuous as vol
import esphomeyaml.config_validation as cv
from esphomeyaml.components import sensor
from esphomeyaml.const import CONF_ADDRESS, CONF_GAIN, CONF_INTEGRATION_TIME, CONF_MAKE_ID, \
CONF_NAME, CONF_UPDATE_INTERVAL
from esphomeyaml.helpers import App, Application, add, variable
DEPENDENCIES = ['i2c']
INTEGRATION_TIMES = {
14: sensor.sensor_ns.TSL2561_INTEGRATION_14MS,
101: sensor.sensor_ns.TSL2561_INTEGRATION_101MS,
402: sensor.sensor_ns.TSL2561_INTEGRATION_402MS,
}
GAINS = {
'1X': sensor.sensor_ns.TSL2561_GAIN_1X,
'16X': sensor.sensor_ns.TSL2561_GAIN_16X,
}
CONF_IS_CS_PACKAGE = 'is_cs_package'
def validate_integration_time(value):
value = cv.positive_time_period_milliseconds(value).total_milliseconds
if value not in INTEGRATION_TIMES:
raise vol.Invalid(u"Unsupported integration time {}.".format(value))
return value
MakeTSL2561Sensor = Application.MakeTSL2561Sensor
PLATFORM_SCHEMA = cv.nameable(sensor.SENSOR_PLATFORM_SCHEMA.extend({
cv.GenerateID(CONF_MAKE_ID): cv.declare_variable_id(MakeTSL2561Sensor),
vol.Optional(CONF_ADDRESS, default=0x39): cv.i2c_address,
vol.Optional(CONF_INTEGRATION_TIME): validate_integration_time,
vol.Optional(CONF_GAIN): vol.All(vol.Upper, cv.one_of(*GAINS)),
vol.Optional(CONF_IS_CS_PACKAGE): cv.boolean,
vol.Optional(CONF_UPDATE_INTERVAL): cv.update_interval,
}))
def to_code(config):
rhs = App.make_tsl2561_sensor(config[CONF_NAME], config[CONF_ADDRESS],
config.get(CONF_UPDATE_INTERVAL))
make_tsl = variable(config[CONF_MAKE_ID], rhs)
tsl2561 = make_tsl.Ptsl2561
if CONF_INTEGRATION_TIME in config:
add(tsl2561.set_integration_time(INTEGRATION_TIMES[config[CONF_INTEGRATION_TIME]]))
if CONF_GAIN in config:
add(tsl2561.set_gain(GAINS[config[CONF_GAIN]]))
if CONF_IS_CS_PACKAGE in config:
add(tsl2561.set_is_cs_package(config[CONF_IS_CS_PACKAGE]))
sensor.setup_sensor(tsl2561, make_tsl.Pmqtt, config)
BUILD_FLAGS = '-DUSE_TSL2561'
```
#### File: esphomeyaml/components/status_led.py
```python
import voluptuous as vol
from esphomeyaml import config_validation as cv, pins
from esphomeyaml.const import CONF_ID, CONF_PIN
from esphomeyaml.helpers import App, Pvariable, esphomelib_ns, gpio_output_pin_expression
StatusLEDComponent = esphomelib_ns.StatusLEDComponent
CONFIG_SCHEMA = vol.Schema({
cv.GenerateID(): cv.declare_variable_id(StatusLEDComponent),
vol.Optional(CONF_PIN): pins.gpio_output_pin_schema,
})
def to_code(config):
pin = None
for pin in gpio_output_pin_expression(config[CONF_PIN]):
yield
rhs = App.make_status_led(pin)
Pvariable(config[CONF_ID], rhs)
BUILD_FLAGS = '-DUSE_STATUS_LED'
```
#### File: components/switch/gpio.py
```python
import voluptuous as vol
import esphomeyaml.config_validation as cv
from esphomeyaml import pins
from esphomeyaml.components import switch
from esphomeyaml.const import CONF_MAKE_ID, CONF_NAME, CONF_PIN
from esphomeyaml.helpers import App, Application, gpio_output_pin_expression, variable
MakeGPIOSwitch = Application.MakeGPIOSwitch
PLATFORM_SCHEMA = cv.nameable(switch.SWITCH_PLATFORM_SCHEMA.extend({
cv.GenerateID(CONF_MAKE_ID): cv.declare_variable_id(MakeGPIOSwitch),
vol.Required(CONF_PIN): pins.gpio_output_pin_schema,
}))
def to_code(config):
pin = None
for pin in gpio_output_pin_expression(config[CONF_PIN]):
yield
rhs = App.make_gpio_switch(config[CONF_NAME], pin)
gpio = variable(config[CONF_MAKE_ID], rhs)
switch.setup_switch(gpio.Pswitch_, gpio.Pmqtt, config)
BUILD_FLAGS = '-DUSE_GPIO_SWITCH'
```
#### File: components/switch/output.py
```python
import voluptuous as vol
import esphomeyaml.config_validation as cv
from esphomeyaml.components import switch
from esphomeyaml.const import CONF_MAKE_ID, CONF_NAME, CONF_OUTPUT
from esphomeyaml.helpers import App, Application, get_variable, variable
MakeSimpleSwitch = Application.MakeSimpleSwitch
PLATFORM_SCHEMA = cv.nameable(switch.SWITCH_PLATFORM_SCHEMA.extend({
cv.GenerateID(CONF_MAKE_ID): cv.declare_variable_id(MakeSimpleSwitch),
vol.Required(CONF_OUTPUT): cv.use_variable_id(None),
}))
def to_code(config):
output = None
for output in get_variable(config[CONF_OUTPUT]):
yield
rhs = App.make_simple_switch(config[CONF_NAME], output)
gpio = variable(config[CONF_MAKE_ID], rhs)
switch.setup_switch(gpio.Pswitch_, gpio.Pmqtt, config)
BUILD_FLAGS = '-DUSE_SIMPLE_SWITCH'
```
#### File: esphomeyaml/components/web_server.py
```python
import logging
import voluptuous as vol
import esphomeyaml.config_validation as cv
from esphomeyaml import core
from esphomeyaml.const import CONF_PORT, CONF_JS_URL, CONF_CSS_URL, CONF_ID, ESP_PLATFORM_ESP32
from esphomeyaml.helpers import App, add, Pvariable, esphomelib_ns
_LOGGER = logging.getLogger(__name__)
WebServer = esphomelib_ns.WebServer
CONFIG_SCHEMA = vol.Schema({
cv.GenerateID(): cv.declare_variable_id(WebServer),
vol.Optional(CONF_PORT): cv.port,
vol.Optional(CONF_CSS_URL): cv.string,
vol.Optional(CONF_JS_URL): cv.string,
})
def to_code(config):
rhs = App.init_web_server(config.get(CONF_PORT))
web_server = Pvariable(config[CONF_ID], rhs)
if CONF_CSS_URL in config:
add(web_server.set_css_url(config[CONF_CSS_URL]))
if CONF_JS_URL in config:
add(web_server.set_js_url(config[CONF_JS_URL]))
BUILD_FLAGS = '-DUSE_WEB_SERVER'
def lib_deps(config):
if core.ESP_PLATFORM == ESP_PLATFORM_ESP32:
return 'FS'
return ''
``` |
{
"source": "johnernaut/syspro_rest",
"score": 3
} |
#### File: syspro_rest/syspro_rest/syspro_rest.py
```python
from urllib.parse import urlencode
import xmltodict
from . import requests
class APIError(Exception):
""" Custom error class """
pass
class SysproRest(object):
""" A python interface to the Syspro REST API's """
def __init__(self, **kwargs):
self.base_url = kwargs.get('url', '')
self.operator = kwargs.get('operator', '')
self.operator_pass = kwargs.get('operator_pass', '')
self.company = kwargs.get('company', '')
self.company_pass = kwargs.get('company_pass', '')
self.login()
def login(self):
""" logs into the Syspro REST service """
params = {'Operator': self.operator, 'OperatorPassword':
<PASSWORD>, 'CompanyId': self.company,
'CompanyPassword': <PASSWORD>}
resp = self._make_request('/Logon', params)
if 'ERROR' in resp.text:
raise APIError(resp.text.strip())
self.user_token = resp.text.strip()
def system_information(self):
""" gets the Syspro system information """
resp = self._make_request('/SystemInformation')
return resp.text
def get_logon_profile(self):
""" gets the current users logon profile """
params = {'UserId': self.user_token}
resp = self._make_request('/GetLogonProfile', params)
return xmltodict.parse(resp.text)
def _make_request(self, url, parameters=None):
final_url = self._build_url(url, parameters)
return requests.get(final_url)
def _build_url(self, url, params=None):
new_url = self.base_url + url
if params and len(params) > 0:
return new_url + '?' + urlencode(dict((k, v) for k, v in params.items()
if v is not None))
return new_url
``` |
{
"source": "johnernberg/nala",
"score": 2
} |
#### File: nala/nala/api.py
```python
import os
import re
from fnmatch import fnmatch
from .inspect import ForgivingDeclarationParser
from .generator import FileGenerator
from .generator import header_file
from .generator import does_generated_files_exist
RE_MOCKED_FUNC = re.compile(
r'(_mock|_mock_once|_mock_ignore_in|_mock_ignore_in_once|_mock_none'
r'|_mock_implementation)\s*\(')
RE_REAL_VARIADIC_FUNCTIONS = re.compile(
r'// NALA_REAL_VARIADIC_FUNCTION_BEGIN (.*?)\n'
r'(.*?)\n'
r'// NALA_REAL_VARIADIC_FUNCTION_END',
re.MULTILINE | re.DOTALL)
def find_mocked_function_name(expanded_source_code, index):
name = ''
while True:
index -= 1
char = expanded_source_code[index]
if char in ' \t\n\r':
break
name += char
if expanded_source_code[index - 4:index] == 'void':
return None
return name[::-1]
def find_mocked_functions(expanded_source_code):
functions = set()
for match in RE_MOCKED_FUNC.finditer(expanded_source_code):
function_name = find_mocked_function_name(expanded_source_code,
match.start())
if function_name is not None:
functions.add(function_name)
return functions
def find_cached_mocked_functions(nala_mocks_h):
functions = set()
with open(nala_mocks_h, 'r') as fin:
for line in fin:
if line.startswith('// NALA_DECLARATION'):
functions.add(line.split()[-1])
return functions
def has_implementation(function_name, implementation, no_implementation):
for pattern in implementation:
if fnmatch(function_name, pattern):
return True
for pattern in no_implementation:
if fnmatch(function_name, pattern):
return False
return None
def load_real_variadic_functions(filename):
with open(filename, 'r') as fin:
contents = fin.read()
return {
mo[0]: mo[1]
for mo in RE_REAL_VARIADIC_FUNCTIONS.findall(contents)
}
def generate_mocks(expanded_code,
output_directory,
rename_parameters_file,
real_variadic_functions_file,
cache,
implementation,
no_implementation):
"""Identify mocked functions and generate the source, header and
linker files.
"""
functions = find_mocked_functions(expanded_code)
if cache and does_generated_files_exist(output_directory):
cached_mocked_functions = find_cached_mocked_functions(
header_file(output_directory))
generate = (functions != cached_mocked_functions)
else:
generate = True
if not generate:
return
parser = ForgivingDeclarationParser(expanded_code,
functions,
rename_parameters_file)
if real_variadic_functions_file:
real_variadic_functions = load_real_variadic_functions(
real_variadic_functions_file)
else:
real_variadic_functions = {}
generator = FileGenerator(parser)
for function in parser.mocked_functions:
generator.add_mock(function,
has_implementation(function.name,
implementation,
no_implementation),
real_variadic_functions.get(function.name, ''))
generator.write_to_directory(output_directory)
``` |
{
"source": "JohnEskimSmith/export-elasticmq",
"score": 2
} |
#### File: export-elasticmq/lib/upload_settings.py
```python
__author__ = "SAI"
__license__ = "GPLv3"
__email__ = "<EMAIL>"
__status__ = "Dev"
__all__ = ["parse_args", "parse_settings", "AppConfig", "RecordOperation", "create_default_tags_for_routes_messages"]
from base64 import standard_b64encode
from dataclasses import dataclass
from random import shuffle
from aiohttp import ClientSession, TraceConfig
from .upload_utils import access_dot_path, return_dict_for_packed_record, return_value_from_dict_extended, check_iter
from ujson import load as ujson_load, dumps as ujson_dumps, loads as ujson_loads
import argparse
from typing import Callable, Optional, Dict, List, Any, Tuple, Union
import geoip2.database
from ipaddress import ip_address
from datetime import datetime
from yaml import (FullLoader as yaml_FullLoader,
load as yaml_load,
)
from urllib.parse import urlparse, urlunparse
from itertools import cycle
from aiohttp import BasicAuth
from pathlib import Path
import sys
from os import path as os_path, access as os_access, R_OK, sep, environ
import importlib
from asyncio import get_event_loop
from inspect import iscoroutinefunction as inspect_iscoroutinefunction
from .upload_sqs import return_queue_url_realtime
from contextlib import AsyncExitStack
from aiobotocore.session import AioSession
from aiobotocore.config import AioConfig
CONST_PATH_TO_MODULES = '/multimodules'
CONST_PATH_TO_CONVERTERS = 'converters'
class RecordOperation:
def __init__(self,
settings_converter: Dict,
settings_create_record: Dict,
settings_filter_record: Dict,
settings_custom_filter_record: Dict,
use_standart_filter: bool,
use_custom_filter: bool,
root_dir_to_modules: str,
root_dir_to_converters: str,
logger):
self.logger = logger
self.unfiltred = False
self.async_filter = False
if 'python' in [settings_converter.get('type'),
settings_create_record.get('type')] or use_custom_filter:
sys.path.append(root_dir_to_modules)
sys.path.append(root_dir_to_converters)
# self.converter_raw_function - function need for convert str line to Python Dict
self.converter_raw_function = None
self.special_converter_add = ','
if settings_converter['type'] == 'json':
self.raw_record_format = 'json'
self.converter_raw_function: Callable = self.simple_json
elif settings_converter['type'] == 'csv':
self.raw_record_format = 'csv'
self.converter_raw_function: Callable = self.simple_csv
self.special_converter_add = settings_converter.get('special', ',')
if not self.special_converter_add:
self.special_converter_add = ','
elif settings_converter['type'] == 'python':
self.raw_record_format = 'python'
_list_args = [settings_converter.get('path'), settings_converter.get('module')]
if not any(_list_args):
self.logger.error('converter: where settings for module - converter?')
exit(1)
if settings_converter.get('module'):
path_to_module = f'{root_dir_to_converters}{sep}{settings_converter["module"]}'
elif settings_converter.get('path'):
path_to_module: str = settings_converter['path']
try:
name_function: str = settings_converter.get('function', 'main')
if not name_function:
name_function = 'main'
self.converter_raw_function: Callable = load_python_module(path_to_module, name_function)
if self.converter_raw_function:
self.logger.info(f'loaded module: {path_to_module}, function:{name_function}')
else:
self.logger.error(f'not found module: {path_to_module}')
exit(1)
except Exception as e:
logger.error(e)
exit(1)
self.create_records_function = None
# self.create_records_function - function need for convert Dict from app to ready record Dict
if settings_create_record['type'] == 'default':
self.create_records_function: Callable = self.save_default # input single record
elif settings_create_record['type'] == 'json':
self.create_records_function: Callable = self.save_json # input single record
elif settings_create_record['type'] == 'python':
_list_args = [settings_create_record.get('path'), settings_create_record.get('module')]
if not any(_list_args):
self.logger.error('converter: where settings for module - create records?')
exit(1)
if settings_create_record.get('module'):
path_to_module = f'{root_dir_to_modules}{sep}{settings_create_record.get("module")}'
elif settings_create_record.get('path'):
path_to_module: str = settings_create_record.get('path')
try:
name_function: str = settings_create_record.get('function', 'main')
if not name_function:
name_function = 'main'
self.create_records_function: Callable = load_python_module(path_to_module, name_function)
if self.create_records_function:
self.logger.info(f'loaded module: {path_to_module}, function:{name_function}')
else:
self.logger.error(f'not found module: {path_to_module}')
exit(1)
except Exception as e:
logger.error(e)
exit(1)
self.default_create_fields = []
if settings_create_record.get('default_fields'):
if isinstance(settings_create_record['default_fields'], list):
self.default_create_fields = settings_create_record['default_fields']
elif isinstance(settings_create_record['default_fields'], str):
self.default_create_fields = list(
set([c.strip() for c in settings_create_record['default_fields'].split(',')]))
if not all([self.converter_raw_function, self.create_records_function]):
logger.error('settings: raw converter or function of create records - not found')
exit(1)
self.use_standart_filter = use_standart_filter
if self.use_standart_filter:
self.standart_filter_value: str = settings_filter_record.get('value_success')
self.standart_filter_path: str = settings_filter_record.get('path')
self.standart_filter_function = self.filter_default
self.use_custom_filter = use_custom_filter
if self.use_custom_filter:
_list_args = [settings_custom_filter_record.get('path'), settings_custom_filter_record.get('module')]
if not any(_list_args):
self.logger.error('custom filters: where settings for module - custom filter records?')
exit(1)
if settings_custom_filter_record.get('module'):
path_to_module = f'{root_dir_to_modules}{sep}{settings_custom_filter_record.get("module")}'
elif settings_custom_filter_record.get('path'):
path_to_module: str = settings_custom_filter_record.get('path')
try:
name_function: str = settings_custom_filter_record.get('function', 'main')
if not name_function:
name_function = 'main'
self.custom_filter_function: Callable = load_python_module(path_to_module, name_function)
if self.custom_filter_function:
self.logger.info(f'loaded module: {path_to_module}, function:{name_function}')
self.async_filter = inspect_iscoroutinefunction(self.custom_filter_function) # async def or def
else:
self.logger.error(f'not found module: {path_to_module}')
exit(1)
except Exception as e:
logger.error(e)
exit(1)
if not self.use_standart_filter and not self.use_custom_filter:
self.unfiltred = True
def simple_csv(self, lines: List[str]) -> List[Dict]:
results = []
for line in lines:
try:
dict_line = {f'field_{i}': v for i, v in enumerate(line.split(self.special_converter_add))}
results.append(dict_line)
except:
pass
return results
@staticmethod
def simple_json(lines: List[str]) -> List[Dict]:
results = []
for line in lines:
try:
results.append(ujson_loads(line))
except:
pass
return results
def save_default(self, record: Dict, port: int) -> Optional[Dict]: # TODO rewrite with kwargs
# TODO: rethink
# region not good idea
# if self.default_create_fields:
# set_of_record_fields = set(record.keys())
# set_default_fields = set(self.default_create_fields)
# _check = set_of_record_fields & set_default_fields
# if _check != set(self.default_create_fields):
# return None
# endregion
record_fields = ["ipv4", "ip_v4_int", "port", "datetime", "data"]
ip_str = record.get('ip')
ipv4_int = 0
if ip_str:
try:
ipv4 = ip_address(ip_str)
ipv4_int = int(ipv4)
except Exception as exc:
self.logger.error(exc)
ipv4 = ip_str
else:
ipv4 = ip_str
port_for_record = int(port)
record_datetime = datetime.utcnow()
_data = [str(ipv4), ipv4_int, port_for_record, record_datetime]
result = dict(zip(record_fields, _data))
if self.raw_record_format == 'json':
result['data'] = record.get('data')
elif self.raw_record_format == 'csv':
result['data'] = record
elif self.raw_record_format == 'python':
pass
for k in list(result.keys()):
if not result[k]:
result.pop(k)
return result
@staticmethod
def save_json(record: Dict, port: int) -> Optional[Dict]: # TODO rewrite with kwargs
record_fields = ["port", "datetime"]
try:
port_for_record = int(port)
except:
port_for_record = None
record_datetime = datetime.utcnow()
_data = [port_for_record, record_datetime]
result = dict(zip(record_fields, _data))
result['data'] = record
for k in list(result.keys()):
if not result[k]:
result.pop(k)
return result
def filter_default(self, record: Dict) -> bool:
result = False
try:
value_record: Any = return_value_from_dict_extended(record, self.standart_filter_path)
value_filter: Any = self.standart_filter_value
if (value_filter and value_record) and (value_record == value_filter):
result = True
elif check_iter(value_record) and value_filter:
# result = any([value_filter == value for value in value_record])
result = value_filter in value_record
except Exception as exp:
self.logger.error(exp)
return result
@dataclass(frozen=True)
class AppConfig:
senders: int
number_lines: int
mode_read_input: str
port: int
queue_sleep: int
operations: RecordOperation
input_file: Union[str, list]
input_stdin: str
collectors: dict
sqs: dict
size_bulk_mb: int
try_retry_upload: int
app_module: str
packing_dict: dict
geoip: dict
source: dict
statistics_file: str
statistics: dict
timeout_filter: int
def load_config(path):
with Path(path).open(encoding='utf-8') as fp:
return yaml_load(fp.read(), Loader=yaml_FullLoader)
def parse_args():
parser = argparse.ArgumentParser(description='Upload data to ElasticMQ(SQS)')
parser.add_argument('-settings',
type=str,
help='path to file with settings (yaml)',
required=True)
parser.add_argument('-log',
type=str,
default='color',
help='log_format: color,json')
parser.add_argument('-mode',
type=str,
dest='mode',
default='memory',
help='mode, how to read input file: memory, asyncio, default: memory')
parser.add_argument('-statistics',
type=str,
dest='statistics',
help='save statistics to file')
parser.add_argument('-multimodules',
type=str,
dest='multimodules',
default=CONST_PATH_TO_MODULES,
help='directory path for custom Python modules')
parser.add_argument('-converters',
type=str,
dest='converters',
default=CONST_PATH_TO_CONVERTERS,
help='directory path for custom Python modules')
# надо проверять наличие файла
return parser.parse_args()
async def parse_settings(args: argparse.Namespace, logger) -> AppConfig:
if args.settings:
settings = await parse_settings_file(args, logger)
return settings
# noinspection PyUnresolvedReferences
def load_function_from_pythonfile(py_module_path, function_name: str) -> Callable:
"""
Imports generator function from single .py file
"""
module_name_like_filename_py = str(py_module_path).rstrip('.py')
spec = importlib.util.spec_from_file_location(module_name_like_filename_py, py_module_path)
m = importlib.util.module_from_spec(spec)
spec.loader.exec_module(m)
return getattr(m, function_name)
def load_function_from_pythonmodule(module_name: str, function_name: str) -> Callable:
"""
Imports generator function from required module
"""
_mod = importlib.import_module(module_name)
return getattr(_mod, function_name)
# noinspection PyBroadException
def load_python_module(py_module_path: str, _name_function: str) -> Optional[Callable]:
"""
Imports generator from python file OR module
"""
if not py_module_path.endswith('.py'):
py_module_path += '.py'
_path_to_file = Path(py_module_path)
if _path_to_file.exists() and _path_to_file.is_file():
return load_function_from_pythonfile(_path_to_file, _name_function)
async def check_collector(collector: Dict,
logger) -> bool:
"""
simple check of connection to http Logcollector
:param collector:
:param logger:
:return:
"""
trace_request_ctx = {'logger': logger,
'name': collector['name'],
'check': True}
result = False
session = collector['session']
url = collector['url']
try:
async with session.head(url, trace_request_ctx=trace_request_ctx) as response:
http_status = response.status
if http_status == 200:
result = True
except:
pass
return result
async def create_sqs_client(session: AioSession, exit_stack: AsyncExitStack, auth_struct: Dict):
# Create client and add cleanup
client = await exit_stack.enter_async_context(session.create_client(**auth_struct))
return client
def create_default_tags_for_routes_messages(_sqs: Dict) -> Tuple[Dict, Dict]:
endpoint = _sqs['endpoint'].strip('/')
dest, database, space = endpoint.split('/')
_tag = {dest: {'database': database,
'space': space}
}
tags = ujson_dumps(_tag).encode('utf-8')
tags = standard_b64encode(tags).decode('utf-8')
return {'routes': tags}, {'routes': _tag}
async def parse_settings_file(args: argparse.Namespace, logger) -> AppConfig:
async def on_request_start(session, trace_config_ctx, params):
trace_config_ctx.start = get_event_loop().time()
async def on_request_end(session, trace_config_ctx, params):
elapsed = get_event_loop().time() - trace_config_ctx.start
if trace_config_ctx.trace_request_ctx:
if 'logger' in trace_config_ctx.trace_request_ctx and 'check' in trace_config_ctx.trace_request_ctx:
_name = trace_config_ctx.trace_request_ctx.get('name', '')
_logger = trace_config_ctx.trace_request_ctx['logger']
_logger.info("collector {} took request {}".format(_name, round(elapsed, 4)))
elif 'request' in trace_config_ctx.trace_request_ctx:
trace_config_ctx.trace_request_ctx['duration'] = round(elapsed, 4)
filename_settings: str = args.settings
try:
config: Dict = load_config(filename_settings)
except FileNotFoundError:
logger.error(f'not found: {filename_settings}')
exit(1)
senders = config.get('senders', 1024)
number_lines = config.get('number_lines', 1000)
queue_sleep = config.get('queue_sleep', 1)
try_retry_upload = config.get('try_retry_upload', 3)
size_bulk_mb = config.get('size_bulk_mb', 4)
if not isinstance(size_bulk_mb, int):
if isinstance(size_bulk_mb, str):
if size_bulk_mb.isdigit():
size_bulk_mb = int(size_bulk_mb)
if not isinstance(size_bulk_mb, int):
size_bulk_mb = 4
_collectors: Optional[Dict] = config.get('collectors')
_sqs: Optional[Dict] = config.get('sqs')
collectors = {}
sqs = {}
if not _collectors and not _sqs:
logger.error('collectors settings not found, sqs settings not found, exit.')
exit(1)
if _collectors and _sqs:
logger.error('incorrect settings found. There are settings for queues and collectors.\n'
'You only need to specify one method for sending data.')
exit(1)
elif _collectors:
collectors_list = []
collectors = {}
_collectors: dict = config.get('collectors')
trace_config = TraceConfig()
trace_config.on_request_start.append(on_request_start)
trace_config.on_request_end.append(on_request_end)
for collector_name, value in _collectors.items():
use_gzip: bool = value.get('gzip', False)
headers = {'content-encoding': 'gzip'} if use_gzip else {}
_collector = {'name': collector_name,
'url': value['endpoint'],
'use_gzip': use_gzip,
'session': ClientSession(auth=BasicAuth(value['user'], value['password']),
headers=headers,
json_serialize=ujson_dumps,
trace_configs=[trace_config]
)
}
# TODO timeout
check_collector_status = await check_collector(_collector, logger)
if check_collector_status:
collectors_list.append(_collector)
else:
await _collector['session'].close()
if collectors_list:
shuffle(collectors_list) # random shuffle list of collectors
collectors = {'cycle': cycle(collectors_list), # like simple round-robin
'list': collectors_list}
logger.info(f'Count collectors: {len(collectors["list"])}')
else:
logger.error('All collectors hosts are down or no routes to hosts')
exit(112)
if not collectors:
logger.error('errors with Collectors connections, exit.')
exit(1)
elif _sqs:
using_elasticmq = False
# 1.
# default tags:
tags = {}
if 'yandex' not in _sqs['endpoint_url']:
# endpoint = _sqs['endpoint'].strip('/')
# dest, database, space = endpoint.split('/')
# _tag = {dest: {'database': database,
# 'space': space}
# }
# _tag = ujson_dumps(_tag).encode('utf-8')
# _tag = standard_b64encode(_tag).decode('utf-8')
#
# tags = {'routes': _tag}
tags, _tags = create_default_tags_for_routes_messages(_sqs)
try:
_name_task = _sqs['name']
except:
# _name_task = space
_name_task = _tags['dest']['space']
currentuuid = _sqs['currentuuid']
sqsname_queue = f'Results_{_name_task}_cuuid_{currentuuid}'
if _sqs.get('region_name', '').lower() == 'elasticmq':
if 'https' in _sqs['endpoint_url'] and _sqs['use_ssl'] \
and not (_sqs.get('aws_ca_bundle', False) and
_sqs.get('client_crt', False) and
_sqs.get('client_key', False)):
logger.error(f'using elasticmq and SSL, but not seted some of crt, ca, key, exit')
exit(1)
else:
using_elasticmq = True
else:
if 'name_queue' not in _sqs:
sqsname_queue = 'TargetsDev'
else:
sqsname_queue = _sqs['name_queue']
keys = ['service_name', 'endpoint_url', 'region_name', 'aws_secret_access_key', 'aws_access_key_id', 'use_ssl']
init_keys = dict().fromkeys(keys)
for k in init_keys:
if k in _sqs:
init_keys[k] = _sqs[k]
if using_elasticmq:
client_crt = _sqs['client_crt']
client_key = _sqs['client_key']
config_elasticmq = AioConfig(client_cert=(client_crt, client_key))
init_keys['config'] = config_elasticmq
if not environ.get('AWS_CA_BUNDLE'):
environ['AWS_CA_BUNDLE'] = _sqs['aws_ca_bundle']
_session = AioSession()
exit_stack = AsyncExitStack()
client = await create_sqs_client(_session, exit_stack, auth_struct=init_keys)
logger.info(f'Trying to create a client for a queue: {sqsname_queue}')
queue_url = await return_queue_url_realtime(client, sqsname_queue, logger, tags=tags, auto_create=True)
if not queue_url:
logger.error(f"Uploader client can't create or access to queue: {sqsname_queue}")
exit(1)
else:
if using_elasticmq:
real_schema = urlparse(init_keys['endpoint_url'])
url_schema = urlparse(queue_url)
queue_url = url_schema._replace(scheme=real_schema.scheme,
netloc=real_schema.netloc)
queue_url = urlunparse(queue_url)
logger.info(f'Queue client created: {queue_url}')
sqs['url'] = queue_url
sqs['init_keys'] = init_keys
sqs['client'] = client
sqs['endpoint'] = _sqs['endpoint']
_input_files = []
_input_file = access_dot_path(config, 'input.file')
input_stdin = access_dot_path(config, 'input.stdin') # not implemented
if isinstance(_input_file, str):
_input_files = [_input_file]
elif isinstance(_input_file, list):
_input_files = _input_file
input_files = [f for f in _input_files]
for input_file in _input_files:
if input_file:
if not Path(input_file).exists():
logger.error(f'errors: "input.file" - file not found: {input_file}')
input_files.remove(input_file)
if len(input_files) == 0:
logger.error('all files not exists')
exit(2)
try:
filename_packing_dict = config['app_module_schema']
packing_dict = return_dict_for_packed_record(filename_packing_dict, logger)
except Exception as exp:
logger.error(exp)
packing_dict = {}
# region creat Reader for Geoip
status_geoip = config.get('geoip')
geoip = {}
if status_geoip:
# default paths for maxmind databases
asn_filename_path = 'geo/GeoLite2-ASN.mmdb'
city_filename_path = 'geo/GeoLite2-City.mmdb'
try:
if Path(asn_filename_path).exists() and Path(city_filename_path).exists() and status_geoip:
status_geoip = True
else:
status_geoip = False
except:
logger.error('geoip: MaxmindDB files not founded')
status_geoip = False
if status_geoip:
reader_asn = geoip2.database.Reader(asn_filename_path, mode=1)
reader_city = geoip2.database.Reader(city_filename_path, mode=1)
geoip = {'reader_asn': reader_asn,
'reader_city': reader_city}
app_module = config.get('app_module') # ??? need to rethink
source_file = config.get('source')
source_worker = {}
try:
if Path(source_file).exists():
with open(source_file, 'rt') as f:
source_worker = ujson_load(f)
except Exception as exp:
logger.error(f'file Dict-source(about machine) not exists: {exp}')
mode_read_input = args.mode
root_dir_to_modules = ''
if os_path.isdir(args.multimodules):
if os_access(args.multimodules, R_OK):
root_dir_to_modules = args.multimodules
if not root_dir_to_modules:
logger.error(f'directory for modules not found or error with read: {args.multimodules}')
exit(2)
root_dir_to_converters = args.converters
if os_path.isdir(CONST_PATH_TO_CONVERTERS):
if os_access(CONST_PATH_TO_CONVERTERS, R_OK):
root_dir_to_converters = CONST_PATH_TO_CONVERTERS
if not root_dir_to_converters:
logger.error(f'directory for converters not found or error with read: {root_dir_to_converters}')
exit(2)
settings_converter = access_dot_path(config, 'converter')
settings_create_record = access_dot_path(config, 'create_record')
settings_filter_record = access_dot_path(config, 'standart_filter')
settings_custom_filter_record = access_dot_path(config, 'custom_filter')
use_standart_filter = access_dot_path(config, 'use_standart_filter', False)
use_custom_filter = access_dot_path(config, 'use_custom_filter', False)
settings_for_records = RecordOperation(settings_converter,
settings_create_record,
settings_filter_record,
settings_custom_filter_record,
use_standart_filter,
use_custom_filter,
root_dir_to_modules,
root_dir_to_converters,
logger
)
timeout_filter = config.get('timeout_filter', 7)
app_settings = AppConfig(**{
'senders': senders,
'number_lines': number_lines,
'mode_read_input': mode_read_input,
'queue_sleep': queue_sleep,
'operations': settings_for_records,
'input_file': '' if not input_files else input_files,
'input_stdin': '' if not input_stdin else input_stdin,
'collectors': collectors if collectors else {}, # TODO: спорный момент, обдумать
'sqs': sqs if sqs else {}, # TODO: спорный момент, обдумать
'size_bulk_mb': size_bulk_mb,
'port': int(config['port']),
'geoip': geoip,
'timeout_filter': timeout_filter,
'app_module': app_module,
'packing_dict': packing_dict,
'try_retry_upload': try_retry_upload,
'source': source_worker,
'statistics_file': '' if not args.statistics else args.statistics,
'statistics': {'all_records': 0,
'valid_records': 0,
'duration': 0,
'size': 0}
}
)
return app_settings
``` |
{
"source": "JohnEskimSmith/jarm",
"score": 3
} |
#### File: lib/core/stats.py
```python
from datetime import datetime
__all__ = ['Stats']
from typing import Optional
class Stats:
"""
Holds application counters and timestamps
"""
def __init__(self, start_time: datetime = None):
self.start_time = start_time or datetime.utcnow()
self.count_input = 0
self.count_good = 0
self.count_error = 0
def dict(self, stopped: Optional[datetime] = None) -> dict:
stopped = stopped or datetime.utcnow()
return {
'duration': (stopped - self.start_time).total_seconds(),
'valid targets': self.count_input,
'success': self.count_good,
'fails': self.count_error
}
```
#### File: lib/util/settings.py
```python
import argparse
from os import path
from sys import stderr
from typing import Tuple, List
from lib.core import AppConfig, TargetConfig
__all__ = ['parse_args', 'parse_settings']
def parse_args():
"""
parsing arguments
:return:
"""
parser = argparse.ArgumentParser(description='JARM is an active Transport Layer Security (TLS) server fingerprinting tool. (Asyncio version)',
formatter_class=argparse.MetavarTypeHelpFormatter)
# input_stdin: str
parser.add_argument('--stdin', dest='input_stdin', action='store_true', help='Read targets from stdin')
parser.add_argument('-t', '--targets', nargs='+', type=str, default='', dest='single_targets',
help='Single targets: ipv4, fqdn, ipv4:port, fqdn:port. Example: facebook.com google.com:443 172.16.31.10/24:8443')
parser.add_argument('-f', '--input-file', dest='input_file', type=str, help='path to file with targets.\n Targets: ipv4, fqdn, ipv4:port, fqdn:port')
parser.add_argument('-o', '--output-file', dest='output_file', type=str, help='path to file with results')
parser.add_argument('--json', dest='json', action='store_true', default=True, help='Output format of records, default json')
parser.add_argument('--csv', dest='csv', action='store_true', help='Output format of records: csv')
parser.add_argument('-s', '--senders', dest='senders', type=int, default=1024,
help='Number of send coroutines to use (default: 1024)')
parser.add_argument('--queue-sleep', dest='queue_sleep', type=int, default=1,
help='Sleep duration if the queue is full, default 1 sec. Queue size == senders')
parser.add_argument('-tconnect', '--timeout-connection', dest='conn_timeout', type=int, default=12,
help='Set connection timeout for open_connection(asyncio), seconds (default: 12)')
parser.add_argument('-tread', '--timeout-read', dest='read_timeout', type=int, default=12,
help='Set connection timeout for reader from connection, seconds (default: 12)')
parser.add_argument('-tresolver', '--resolver-timeout', dest='resolver_timeout', type=int, default=4,
help='Set DNS resolutions timeout, seconds (default: 4)')
parser.add_argument('-p', '--port', type=int, help='Specify port (default: 443)', default=443)
# region filters
parser.add_argument('--filter-jarm', dest='jarm', type=str,
help='trying to find a jarm in a response')
parser.add_argument('--filter-cipher-tls', dest='cipher_tls', type=str,
help='trying to find a cipher_tls(substring in jarm)')
parser.add_argument('--show-only-success', dest='show_only_success', action='store_true',
help='Show(save) only success records')
# endregion
parser.add_argument('--show-statistics', dest='statistics', action='store_true')
return parser.parse_args()
def parse_settings(args: argparse.Namespace) -> Tuple[TargetConfig, AppConfig]:
if not args.input_stdin and not args.input_file and not args.single_targets:
print("""errors, set input source:
--stdin read targets from stdin;
-t,--targets set targets, see -h;
-f,--input-file read from file with targets, see -h""")
exit(1)
input_file = None
if args.input_file:
input_file = args.input_file
if not path.isfile(input_file):
abort(f'ERROR: file not found: {input_file}')
if not args.output_file:
output_file, write_mode = '/dev/stdout', 'wb'
else:
output_file, write_mode = args.output_file, 'a'
payloads = return_structs_tls()
# endregion
if not args.csv:
output_format = 'json'
else:
output_format = 'csv'
filter_jarm = ''
if args.jarm:
filter_jarm = args.jarm
filter_cipher_tls = ''
if args.cipher_tls:
filter_cipher_tls = args.cipher_tls
target_settings = TargetConfig(**{
'port': args.port,
'conn_timeout': args.conn_timeout,
'read_timeout': args.read_timeout,
'resolver_timeout': args.resolver_timeout,
'list_payloads': payloads,
})
app_settings = AppConfig(**{
'output_format': output_format,
'input_stdin': args.input_stdin,
'senders': args.senders,
'queue_sleep': args.queue_sleep,
'statistics': args.statistics,
'single_targets': args.single_targets,
'input_file': input_file,
'output_file': output_file,
'write_mode': write_mode,
'filter_jarm': filter_jarm,
'filter_cipher_tls': filter_cipher_tls,
'show_only_success': args.show_only_success
})
return target_settings, app_settings
def return_structs_tls() -> List[List[str]]:
"""
function from jarm.py with changes
:return:
"""
# Array format = [destination_host,destination_port,version,cipher_list,cipher_order,GREASE,RARE_APLN,1.3_SUPPORT,extension_orders]
tls1_2_forward = ["TLS_1.2", "ALL", "FORWARD", "NO_GREASE", "APLN",
"1.2_SUPPORT", "REVERSE"]
tls1_2_reverse = ["TLS_1.2", "ALL", "REVERSE", "NO_GREASE", "APLN",
"1.2_SUPPORT", "FORWARD"]
tls1_2_top_half = ["TLS_1.2", "ALL", "TOP_HALF", "NO_GREASE", "APLN",
"NO_SUPPORT", "FORWARD"]
tls1_2_bottom_half = ["TLS_1.2", "ALL", "BOTTOM_HALF", "NO_GREASE", "RARE_APLN",
"NO_SUPPORT", "FORWARD"]
tls1_2_middle_out = ["TLS_1.2", "ALL", "MIDDLE_OUT", "GREASE", "RARE_APLN",
"NO_SUPPORT", "REVERSE"]
tls1_1_middle_out = ["TLS_1.1", "ALL", "FORWARD", "NO_GREASE", "APLN",
"NO_SUPPORT", "FORWARD"]
tls1_3_forward = ["TLS_1.3", "ALL", "FORWARD", "NO_GREASE", "APLN",
"1.3_SUPPORT", "REVERSE"]
tls1_3_reverse = ["TLS_1.3", "ALL", "REVERSE", "NO_GREASE", "APLN",
"1.3_SUPPORT", "FORWARD"]
tls1_3_invalid = ["TLS_1.3", "NO1.3", "FORWARD", "NO_GREASE", "APLN",
"1.3_SUPPORT", "FORWARD"]
tls1_3_middle_out = ["TLS_1.3", "ALL", "MIDDLE_OUT", "GREASE", "APLN",
"1.3_SUPPORT", "REVERSE"]
# Possible versions: SSLv3, TLS_1, TLS_1.1, TLS_1.2, TLS_1.3
# Possible cipher lists: ALL, NO1.3
# GREASE: either NO_GREASE or GREASE
# APLN: either APLN or RARE_APLN
# Supported Verisons extension: 1.2_SUPPPORT, NO_SUPPORT, or 1.3_SUPPORT
# Possible Extension order: FORWARD, REVERSE
queue_tls = [tls1_2_forward, tls1_2_reverse, tls1_2_top_half, tls1_2_bottom_half, tls1_2_middle_out,
tls1_1_middle_out, tls1_3_forward, tls1_3_reverse, tls1_3_invalid, tls1_3_middle_out]
return queue_tls
def abort(message: str, exc: Exception = None, exit_code: int = 1):
print(message, file=stderr)
if exc:
print(exc, file=stderr)
exit(exit_code)
``` |
{
"source": "Johnetordoff/aquavalet",
"score": 2
} |
#### File: providers/filesystem/provider.py
```python
import os
import shutil
import logging
from aquavalet import streams, provider, exceptions
from .metadata import FileSystemMetadata
logger = logging.getLogger(__name__)
class FileSystemProvider(provider.BaseProvider):
"""Provider using the local filesystem as a backend-store"""
name = "filesystem"
async def validate_item(self, path, **kwargs):
if not os.path.exists(path) or os.path.isdir(path) and not path.endswith("/"):
raise exceptions.NotFoundError(
f"Item at '{path}' could not be found, folders must end with '/'"
)
if path == "/":
return FileSystemMetadata.root()
return FileSystemMetadata(path=path)
async def intra_copy(self, src_path, dest_path, dest_provider=None):
try:
if src_path.kind == "file":
shutil.copy(src_path.path, dest_path.path)
else:
shutil.copytree(src_path.path, dest_path.child(src_path.path))
except FileNotFoundError as exc:
raise exceptions.NotFoundError(exc.filename)
async def intra_move(self, src_path, dest_path, dest_provider=None):
try:
shutil.move(src_path.path, dest_path.path)
except FileNotFoundError as exc:
raise exceptions.NotFoundError(exc.filename)
async def rename(self, item, new_name):
try:
os.rename(item.path, item.rename(new_name))
except FileNotFoundError as exc:
raise exceptions.NotFoundError(exc.filename)
async def download(self, item, session=None, version=None, range=None):
file_pointer = open(item.path, "rb")
if range is not None and range[1] is not None:
return streams.file.FileStreamReader(file_pointer, range=range)
return streams.file.FileStreamReader(file_pointer)
async def upload(self, item, stream=None, new_name=None, conflict="warn"):
if os.path.isfile(item.path + new_name):
return await self.handle_conflict(
item=item, conflict=conflict, new_name=new_name, stream=stream
)
with open(item.path + new_name, "wb") as file_pointer:
async for chunk in stream:
file_pointer.write(chunk)
async def delete(self, item, comfirm_delete=False):
if item.is_file:
try:
os.remove(item.path)
except FileNotFoundError:
raise exceptions.NotFoundError(item.path)
else:
if item.is_root:
raise Exception("That's the root!")
shutil.rmtree(item.path)
async def metadata(self, item, version=None):
return item
async def children(self, item):
children = os.listdir(item.path)
children = [os.path.join(item.path, child) for child in children]
children = [
child + "/" if os.path.isdir(child) else child for child in children
]
return [FileSystemMetadata(path=child) for child in children]
async def create_folder(self, item, new_name):
os.makedirs(item.child(new_name), exist_ok=True)
item.raw["path"] = item.child(new_name) # TODO: Do this better
return item
def can_intra_copy(self, dest_provider, item=None):
return type(self) == type(dest_provider)
def can_intra_move(self, dest_provider, item=None):
return type(self) == type(dest_provider)
```
#### File: providers/osfstorage/metadata.py
```python
from aquavalet.providers.osfstyle.metadata import BaseOsfStyleItemMetadata
class OsfMetadata(BaseOsfStyleItemMetadata):
@property
def provider(self):
return "osfstorage"
BASE_URL = "https://files.osf.io/v1/resources/"
```
#### File: providers/osfstorage/provider.py
```python
from aquavalet.providers.osfstyle.provider import OsfProvider
from aquavalet.providers.osfstorage.metadata import OsfMetadata
from aquavalet.settings import OSF_TOKEN
class OSFStorageProvider(OsfProvider):
BASE_URL = "https://files.osf.io/v1/resources/"
API_URL = "https://api.osf.io/v2/files/{path}"
Item = OsfMetadata
def __init__(self, auth):
self.token = OSF_TOKEN
@property
def name(self):
return "osfstorage"
```
#### File: providers/osfstyle/provider.py
```python
import json
import aiohttp
from aquavalet import streams, provider, exceptions
from aquavalet.providers.utils import require_group, require_match
message_no_internal_provider = "No internal provider in url, path must follow pattern ^\/(?P<internal_provider>(?:\w|\d)+)?\/(?P<resource>[a-zA-Z0-9]{5,})?(?P<path>\/.*)?"
message_no_resource = "No resource in url, path must follow pattern ^\/(?P<internal_provider>(?:\w|\d)+)?\/(?P<resource>[a-zA-Z0-9]{5,})?(?P<path>\/.*)?"
message_no_path = "No path in url, path must follow pattern ^\/(?P<internal_provider>(?:\w|\d)+)?\/(?P<resource>[a-zA-Z0-9]{5,})?(?P<path>\/.*)?"
class OsfProvider(provider.BaseProvider):
NAME = "OSF"
PATH_PATTERN = r"^\/(?P<internal_provider>osfstorage)\/(?P<resource>[a-zA-Z0-9]{5,})\/((?P<path>[a-zA-Z0-9]{,}))"
@property
def default_headers(self):
return {"Authorization": f"Bearer {self.token}"}
async def validate_item(self, path):
match = require_match(self.PATH_PATTERN, path, "match could not be found")
self.internal_provider = require_group(
match, "internal_provider", message_no_internal_provider
)
self.resource = require_group(match, "resource", message_no_resource)
if not match.groupdict().get("path"):
return self.Item.root(self.internal_provider, self.resource)
else:
path = require_group(match, "path", message_no_path)
if self.internal_provider == "osfstorage":
async with aiohttp.ClientSession() as session:
async with session.get(
url=self.API_URL.format(path=path),
headers=self.default_headers,
) as resp:
if resp.status == 200:
data = (await resp.json())["data"]
else:
raise await self.handle_response(resp, path=path)
return self.Item(data, self.internal_provider, self.resource)
async def download(self, item, session, version=None, range=None):
download_header = self.default_headers
if range:
download_header.update({"Range": str(self._build_range_header(range))})
path = f"{self.resource}/providers/{self.internal_provider}{item.id}"
if version:
path += f"?version={version}"
resp = await session.get(url=self.BASE_URL + path, headers=download_header)
return streams.http.ResponseStreamReader(resp, range)
async def upload(self, item, stream, new_name, conflict="warn"):
async with aiohttp.ClientSession() as session:
async with session.put(
data=stream,
url=self.BASE_URL
+ f"{self.resource}/providers/{self.internal_provider}{item.id}",
headers=self.default_headers,
params={"kind": "file", "name": new_name, "conflict": conflict},
) as resp:
if resp.status in (200, 201):
data = (await resp.json())["data"]
else:
return await self.handle_response(
resp=resp,
item=item,
new_name=new_name,
stream=stream,
conflict=conflict,
)
return self.Item(data, self.internal_provider, self.resource)
async def handle_conflict_new_version(
self, resp, item, path, stream, new_name, conflict
):
children = await self.children(item)
try:
item = next(item for item in children if item.name == new_name)
except StopIteration:
raise exceptions.Gone(f"Item at path '{item.name}' is gone.")
async with aiohttp.ClientSession() as session:
async with session.put(
data=stream,
url=self.BASE_URL
+ f"{self.resource}/providers/{self.internal_provider}{item.id}",
headers=self.default_headers,
) as resp:
if resp.status in (200, 201):
data = (await resp.json())["data"]
else:
return await self.handle_response(
resp=resp,
item=item,
new_name=new_name,
stream=stream,
conflict=conflict,
)
return self.Item(data, self.internal_provider, self.resource)
async def delete(self, item, confirm_delete=0):
async with aiohttp.ClientSession() as session:
async with session.delete(
url=self.BASE_URL
+ f"{self.resource}/providers/{self.internal_provider}{item.id}",
params={"confirm_delete": 0},
headers=self.default_headers,
) as resp:
if resp.status in (204,):
return None
else:
raise await self.handle_response(resp, item)
async def metadata(self, item, version=None):
return item
async def create_folder(self, item, new_name):
async with aiohttp.ClientSession() as session:
async with session.put(
url=self.BASE_URL
+ f"{self.resource}/providers/{self.internal_provider}{item.id}",
headers=self.default_headers,
params={"kind": "folder", "name": new_name},
) as resp:
if resp.status in (201,):
data = (await resp.json())["data"]
else:
raise await self.handle_response(resp, item, new_name=new_name)
return self.Item(data, self.internal_provider, self.resource)
async def rename(self, item, new_name):
async with aiohttp.ClientSession() as session:
async with session.post(
url=self.BASE_URL
+ f"{self.resource}/providers/{self.internal_provider}{item.id}",
data=json.dumps({"action": "rename", "rename": new_name}),
headers=self.default_headers,
) as resp:
if resp.status == 200:
data = (await resp.json())["data"]
else:
raise await self.handle_response(resp, item)
return self.Item(data, self.internal_provider, self.resource)
async def children(self, item):
async with aiohttp.ClientSession() as session:
async with session.get(
url=self.BASE_URL
+ f"{self.resource}/providers/{self.internal_provider}{item.id}",
headers=self.default_headers,
) as resp:
if resp.status == 200:
data = (await resp.json())["data"]
else:
raise await self.handle_response(resp, item)
return self.Item.list(item, data)
def can_intra_copy(self, dest_provider, item=None):
if type(self) == type(dest_provider):
return True
async def intra_copy(self, item, dest_item, dest_provider=None):
async with aiohttp.ClientSession() as session:
async with session.post(
url=self.BASE_URL
+ f"{self.resource}/providers/{self.internal_provider}{item.id}",
data=json.dumps(
{
"action": "copy",
"path": dest_item.path + "/",
"provider": "osfstorage",
"resource": dest_provider.resource,
}
),
headers=self.default_headers,
) as resp:
print(resp)
async def versions(self, item):
async with aiohttp.ClientSession() as session:
async with session.get(
url=self.BASE_URL
+ f"{self.resource}/providers/{self.internal_provider}{item.id}?versions=",
headers=self.default_headers,
) as resp:
if resp.status == 200:
data = (await resp.json())["data"]
else:
raise await self.handle_response(resp, item)
return self.Item.versions(item, data)
```
#### File: tests/streams/test_request_stream.py
```python
import pytest
from tests.streams.fixtures import RequestStreamFactory
class TestRequestStream:
@pytest.mark.asyncio
async def test_request_stream_read(self):
stream = RequestStreamFactory()
assert await stream.read() == b"test data"
assert stream.at_eof()
@pytest.mark.asyncio
async def test_request_stream_read_exact(self):
stream = RequestStreamFactory()
assert await stream.read(4) == b"test"
assert not stream.at_eof()
@pytest.mark.asyncio
async def test_request_stream_read_chunked(self):
stream = RequestStreamFactory()
ind = 0
test_data = "test data"
stream.CHUNK_SIZE = 1
async for chunk in stream:
assert chunk == bytes(test_data[ind], "utf-8")
ind += 1
assert stream.at_eof()
@pytest.mark.asyncio
async def test_request_stream_size(self):
stream = RequestStreamFactory()
assert stream.size == 9
```
#### File: aquavalet/tests/utils.py
```python
import json
import aiohttp
from aiohttp.web import Response
def json_resp(json_data, status=200):
return Response(
body=json.dumps(json_data.copy()),
headers={"content-type": "application/json"},
status=status,
)
def data_resp(raw_data, status=200):
return Response(body=raw_data, status=status)
def empty_resp(status=200):
return Response(body=aiohttp.streams.EmptyStreamReader(), status=status)
``` |
{
"source": "Johnetordoff/osf-pigeon",
"score": 2
} |
#### File: Johnetordoff/osf-pigeon/conftest.py
```python
import mock
import pytest
import responses
from osf_pigeon import settings
@pytest.fixture
def mock_waterbutler(guid, zip_data):
with responses.RequestsMock(assert_all_requests_are_fired=True) as rsps:
rsps.add(
responses.GET,
f"{settings.OSF_FILES_URL}v1/resources/{guid}/providers/osfstorage/?zip=",
status=200,
body=zip_data,
)
yield rsps
@pytest.fixture
def mock_osf_api():
with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps:
yield rsps
@pytest.fixture
def mock_datacite(guid, identifiers_json):
with mock.patch.object(settings, "DOI_FORMAT", "{prefix}/osf.io/{guid}"):
doi = settings.DOI_FORMAT.format(prefix=settings.DATACITE_PREFIX, guid=guid)
with responses.RequestsMock(assert_all_requests_are_fired=True) as rsps:
rsps.add(
responses.GET,
f"{settings.DATACITE_URL}metadata/{doi}",
status=200,
body=b"pretend this is XML.",
)
rsps.add(
responses.GET,
f"{settings.OSF_API_URL}v2/registrations/{guid}/identifiers/",
body=identifiers_json,
status=200,
)
yield rsps
@pytest.fixture
def mock_ia_client():
with mock.patch("osf_pigeon.pigeon.internetarchive.get_session") as mock_ia:
mock_session = mock.Mock()
mock_ia_item = mock.Mock()
mock_ia.return_value = mock_session
mock_session.get_item.return_value = mock_ia_item
# ⬇️ we only pass one mock into the test
mock_ia.session = mock_session
mock_ia.item = mock_ia_item
with mock.patch(
"osf_pigeon.pigeon.internetarchive.Item", return_value=mock_ia_item
):
yield mock_ia
```
#### File: osf-pigeon/osf_pigeon/__main__.py
```python
import os
import argparse
import requests
from sanic import Sanic
from sanic.response import json
from osf_pigeon.pigeon import main, sync_metadata, get_id
from concurrent.futures import ThreadPoolExecutor
from sanic.log import logger
from asyncio import events
def run(main):
loop = events.new_event_loop()
try:
events.set_event_loop(loop)
return loop.run_until_complete(main)
finally:
try:
loop.run_until_complete(loop.shutdown_asyncgens())
finally:
events.set_event_loop(None)
loop.close()
app = Sanic("osf_pigeon")
pigeon_jobs = ThreadPoolExecutor(max_workers=10, thread_name_prefix="pigeon_jobs")
def task_done(future):
if future._exception:
exception = future._exception
exception = str(exception)
logger.debug(f"ERROR:{exception}")
if future._result:
guid, url = future._result
resp = requests.post(
f"{settings.OSF_API_URL}_/ia/{guid}/done/", json={"IA_url": url}
)
logger.debug(f"DONE:{future._result} Response:{resp}")
@app.route("/")
async def index(request):
return json({"🐦": "👍"})
@app.route("/archive/<guid>", methods=["GET", "POST"])
async def archive(request, guid):
future = pigeon_jobs.submit(run, main(guid))
future.add_done_callback(task_done)
return json({guid: future._state})
@app.route("/metadata/<guid>", methods=["POST"])
async def metadata(request, guid):
item_name = get_id(guid)
future = pigeon_jobs.submit(sync_metadata, item_name, request.json)
future.add_done_callback(task_done)
return json({guid: future._state})
parser = argparse.ArgumentParser(
description="Set the environment to run OSF pigeon in."
)
parser.add_argument(
"--env", dest="env", help="what environment are you running this for"
)
if __name__ == "__main__":
args = parser.parse_args()
if args.env:
os.environ["ENV"] = args.env
from osf_pigeon import settings
if args.env == "production":
app.run(host=settings.HOST, port=settings.PORT)
else:
app.run(host=settings.HOST, port=settings.PORT, auto_reload=True, debug=True)
``` |
{
"source": "Johnetordoff/waterbutler",
"score": 2
} |
#### File: waterbutler/core/provider.py
```python
import abc
import time
import asyncio
import logging
import weakref
import functools
import itertools
from urllib import parse
import furl
import aiohttp
from waterbutler import settings
from waterbutler.core import streams
from waterbutler.core import exceptions
from waterbutler.core.metrics import MetricsRecord
from waterbutler.core.utils import ZipStreamGenerator
from waterbutler.core.utils import RequestHandlerContext
logger = logging.getLogger(__name__)
_THROTTLES = weakref.WeakKeyDictionary()
def throttle(concurrency=10, interval=1):
def _throttle(func):
@functools.wraps(func)
async def wrapped(*args, **kwargs):
if asyncio.get_event_loop() not in _THROTTLES:
count, last_call, event = 0, time.time(), asyncio.Event()
_THROTTLES[asyncio.get_event_loop()] = (count, last_call, event)
event.set()
else:
count, last_call, event = _THROTTLES[asyncio.get_event_loop()]
await event.wait()
count += 1
if count > concurrency:
count = 0
if (time.time() - last_call) < interval:
event.clear()
await asyncio.sleep(interval - (time.time() - last_call))
event.set()
last_call = time.time()
return (await func(*args, **kwargs))
return wrapped
return _throttle
def build_url(base, *segments, **query):
url = furl.furl(base)
# Filters return generators
# Cast to list to force "spin" it
url.path.segments = list(filter(
lambda segment: segment,
map(
# Furl requires everything to be quoted or not, no mixtures allowed
# prequote everything so %signs don't break everything
lambda segment: parse.quote(segment.strip('/')),
# Include any segments of the original url, effectively list+list but returns a generator
itertools.chain(url.path.segments, segments)
)
))
url.args = query
return url.url
class BaseProvider(metaclass=abc.ABCMeta):
"""The base class for all providers. Every provider must, at the least, implement all abstract
methods in this class.
.. note::
When adding a new provider you must add it to setup.py's
`entry_points` under the `waterbutler.providers` key formatted
as: `<provider name> = waterbutler.providers.yourprovider:<FullProviderName>`
Keep in mind that `yourprovider` modules must export the provider class
"""
BASE_URL = None
def __init__(self, auth, credentials, settings, retry_on={408, 502, 503, 504}):
"""
:param dict auth: Information about the user this provider will act on the behalf of
:param dict credentials: The credentials used to authenticate with the provider,
ofter an OAuth 2 token
:param dict settings: Configuration settings for this provider,
often folder or repo
"""
self._retry_on = retry_on
self.auth = auth
self.credentials = credentials
self.settings = settings
self.provider_metrics = MetricsRecord('provider')
self.provider_metrics.add('auth', auth)
self.metrics = self.provider_metrics.new_subrecord(self.NAME)
@abc.abstractproperty
def NAME(self):
raise NotImplementedError
def __eq__(self, other):
try:
return (
type(self) == type(other) and
self.credentials == other.credentials
)
except AttributeError:
return False
def serialized(self):
return {
'name': self.NAME,
'auth': self.auth,
'settings': self.settings,
'credentials': self.credentials,
}
def build_url(self, *segments, **query):
"""A nice wrapper around furl, builds urls based on self.BASE_URL
:param tuple \*segments: A tuple of strings joined into /foo/bar/..
:param dict \*\*query: A dictionary that will be turned into query parameters ?foo=bar
:rtype: str
"""
return build_url(self.BASE_URL, *segments, **query)
@property
def default_headers(self):
"""Headers to be included with every request
Commonly OAuth headers or Content-Type
"""
return {}
def build_headers(self, **kwargs):
headers = self.default_headers
headers.update(kwargs)
return {
key: value
for key, value in headers.items()
if value is not None
}
@throttle()
async def make_request(self, method, url, *args, **kwargs):
"""A wrapper around :func:`aiohttp.request`. Inserts default headers.
:param str method: The HTTP method
:param str url: The url to send the request to
:keyword range: An optional tuple (start, end) that is transformed into a Range header
:keyword expects: An optional tuple of HTTP status codes as integers raises an exception
if the returned status code is not in it.
:type expects: tuple of ints
:param Exception throws: The exception to be raised from expects
:param tuple \*args: args passed to :func:`aiohttp.request`
:param dict \*\*kwargs: kwargs passed to :func:`aiohttp.request`
:rtype: :class:`aiohttp.Response`
:raises ProviderError: Raised if expects is defined
"""
kwargs['headers'] = self.build_headers(**kwargs.get('headers', {}))
retry = _retry = kwargs.pop('retry', 2)
range = kwargs.pop('range', None)
expects = kwargs.pop('expects', None)
throws = kwargs.pop('throws', exceptions.ProviderError)
if range:
kwargs['headers']['Range'] = self._build_range_header(range)
if callable(url):
url = url()
while retry >= 0:
try:
self.provider_metrics.incr('requests.count')
self.provider_metrics.append('requests.urls', url)
response = await aiohttp.request(method, url, *args, **kwargs)
self.provider_metrics.append('requests.verbose', ['OK', response.status, url])
if expects and response.status not in expects:
raise (await exceptions.exception_from_response(response, error=throws, **kwargs))
return response
except throws as e:
self.provider_metrics.append('requests.verbose', ['NO', e.code, url])
if retry <= 0 or e.code not in self._retry_on:
raise
await asyncio.sleep((1 + _retry - retry) * 2)
retry -= 1
def request(self, *args, **kwargs):
return RequestHandlerContext(self.make_request(*args, **kwargs))
async def move(self, dest_provider, src_path, dest_path, rename=None, conflict='replace', handle_naming=True):
"""Moves a file or folder from the current provider to the specified one
Performs a copy and then a delete.
Calls :func:`BaseProvider.intra_move` if possible.
:param BaseProvider dest_provider: The provider to move to
:param dict source_options: A dict to be sent to either :func:`BaseProvider.intra_move`
or :func:`BaseProvider.copy` and :func:`BaseProvider.delete`
:param dict dest_options: A dict to be sent to either :func:`BaseProvider.intra_move`
or :func:`BaseProvider.copy`
"""
args = (dest_provider, src_path, dest_path)
kwargs = {'rename': rename, 'conflict': conflict}
self.provider_metrics.add('move', {
'got_handle_naming': handle_naming,
'conflict': conflict,
'got_rename': rename is not None,
})
if handle_naming:
dest_path = await dest_provider.handle_naming(
src_path,
dest_path,
rename=rename,
conflict=conflict,
)
args = (dest_provider, src_path, dest_path)
kwargs = {}
# files and folders shouldn't overwrite themselves
if (
self.shares_storage_root(dest_provider) and
src_path.materialized_path == dest_path.materialized_path
):
raise exceptions.OverwriteSelfError(src_path)
self.provider_metrics.add('move.can_intra_move', False)
if self.can_intra_move(dest_provider, src_path):
self.provider_metrics.add('move.can_intra_move', True)
return (await self.intra_move(*args))
if src_path.is_dir:
metadata, created = await self._folder_file_op(self.move, *args, **kwargs)
else:
metadata, created = await self.copy(*args, handle_naming=False, **kwargs)
await self.delete(src_path)
return metadata, created
async def copy(self, dest_provider, src_path, dest_path, rename=None, conflict='replace', handle_naming=True):
args = (dest_provider, src_path, dest_path)
kwargs = {'rename': rename, 'conflict': conflict, 'handle_naming': handle_naming}
self.provider_metrics.add('copy', {
'got_handle_naming': handle_naming,
'conflict': conflict,
'got_rename': rename is not None,
})
if handle_naming:
dest_path = await dest_provider.handle_naming(
src_path,
dest_path,
rename=rename,
conflict=conflict,
)
args = (dest_provider, src_path, dest_path)
kwargs = {}
# files and folders shouldn't overwrite themselves
if (
self.shares_storage_root(dest_provider) and
src_path.materialized_path == dest_path.materialized_path
):
raise exceptions.OverwriteSelfError(src_path)
self.provider_metrics.add('copy.can_intra_copy', False)
if self.can_intra_copy(dest_provider, src_path):
self.provider_metrics.add('copy.can_intra_copy', True)
return (await self.intra_copy(*args))
if src_path.is_dir:
return (await self._folder_file_op(self.copy, *args, **kwargs))
download_stream = await self.download(src_path)
if getattr(download_stream, 'name', None):
dest_path.rename(download_stream.name)
return (await dest_provider.upload(download_stream, dest_path))
async def _folder_file_op(self, func, dest_provider, src_path, dest_path, **kwargs):
"""Recursively apply func to src/dest path.
Called from: func: copy and move if src_path.is_dir.
Calls: func: dest_provider.delete and notes result for bool: created
func: dest_provider.create_folder
func: dest_provider.revalidate_path
func: self.metadata
:param coroutine func: to be applied to src/dest path
:param *Provider dest_provider: Destination provider
:param *ProviderPath src_path: Source path
:param *ProviderPath dest_path: Destination path
"""
assert src_path.is_dir, 'src_path must be a directory'
assert asyncio.iscoroutinefunction(func), 'func must be a coroutine'
try:
await dest_provider.delete(dest_path)
created = False
except exceptions.ProviderError as e:
if e.code != 404:
raise
created = True
folder = await dest_provider.create_folder(dest_path, folder_precheck=False)
dest_path = await dest_provider.revalidate_path(dest_path.parent, dest_path.name, folder=dest_path.is_dir)
folder.children = []
items = await self.metadata(src_path)
self.provider_metrics.append('_folder_file_ops.item_counts', len(items))
for i in range(0, len(items), settings.OP_CONCURRENCY):
futures = []
for item in items[i:i + settings.OP_CONCURRENCY]:
futures.append(asyncio.ensure_future(
func(
dest_provider,
# TODO figure out a way to cut down on all the requests made here
(await self.revalidate_path(src_path, item.name, folder=item.is_folder)),
(await dest_provider.revalidate_path(dest_path, item.name, folder=item.is_folder)),
handle_naming=False,
)
))
if item.is_folder:
await futures[-1]
if not futures:
continue
done, _ = await asyncio.wait(futures, return_when=asyncio.FIRST_EXCEPTION)
for fut in done:
folder.children.append(fut.result()[0])
return folder, created
async def handle_naming(self, src_path, dest_path, rename=None, conflict='replace'):
"""Given a WaterButlerPath and the desired name, handle any potential naming issues.
i.e.:
cp /file.txt /folder/ -> /folder/file.txt
cp /folder/ /folder/ -> /folder/folder/
cp /file.txt /folder/file.txt -> /folder/file.txt
cp /file.txt /folder/file.txt -> /folder/file (1).txt
cp /file.txt /folder/doc.txt -> /folder/doc.txt
:param WaterButlerPath src_path: The object that is being copied
:param WaterButlerPath dest_path: The path that is being copied to or into
:param str rename: The desired name of the resulting path, may be incremented
:param str conflict: The conflict resolution strategy, replace or keep
Returns: WaterButlerPath dest_path: The path of the desired result.
"""
if src_path.is_dir and dest_path.is_file:
# Cant copy a directory to a file
raise ValueError('Destination must be a directory if the source is')
if not dest_path.is_file:
# Directories always are going to be copied into
# cp /folder1/ /folder2/ -> /folder1/folder2/
dest_path = await self.revalidate_path(
dest_path,
rename or src_path.name,
folder=src_path.is_dir
)
dest_path, _ = await self.handle_name_conflict(dest_path, conflict=conflict)
return dest_path
def can_intra_copy(self, other, path=None):
"""Indicates if a quick copy can be performed between the current provider and `other`.
.. note::
Defaults to False
:param waterbutler.core.provider.BaseProvider other: The provider to check against
:rtype: bool
"""
return False
def can_intra_move(self, other, path=None):
"""Indicates if a quick move can be performed between the current provider and `other`.
.. note::
Defaults to False
:param waterbutler.core.provider.BaseProvider other: The provider to check against
:rtype: bool
"""
return False
def intra_copy(self, dest_provider, source_path, dest_path):
"""If the provider supports copying files and/or folders within itself by some means other
than download/upload, then ``can_intra_copy`` should return ``True``. This method will
implement the copy. It accepts the destination provider, a source path, and the
destination path. Returns the metadata for the newly created file and a boolean indicating
whether the copied entity is completely new (``True``) or overwrote a previously-existing
file (``False``).
:param BaseProvider dest_provider: a provider instance for the destination
:param WaterButlerPath source_path: the Path of the entity being copied
:param WaterButlerPath dest_path: the Path of the destination being copied to
:rtype: (:class:`waterbutler.core.metadata.BaseFileMetadata`, :class:`bool`)
"""
raise NotImplementedError
async def intra_move(self, dest_provider, src_path, dest_path):
"""If the provider supports moving files and/or folders within itself by some means other
than download/upload/delete, then ``can_intra_move`` should return ``True``. This method
will implement the move. It accepts the destination provider, a source path, and the
destination path. Returns the metadata for the newly created file and a boolean indicating
whether the moved entity is completely new (``True``) or overwrote a previously-existing
file (``False``).
:param BaseProvider dest_provider: a provider instance for the destination
:param WaterButlerPath source_path: the Path of the entity being moved
:param WaterButlerPath dest_path: the Path of the destination being moved to
:rtype: (:class:`waterbutler.core.metadata.BaseFileMetadata`, :class:`bool`)
"""
data, created = await self.intra_copy(dest_provider, src_path, dest_path)
await self.delete(src_path)
return data, created
async def exists(self, path, **kwargs):
"""Check for existence of WaterButlerPath
Attempt to retrieve provider metadata to determine existence of a WaterButlerPath. If
successful, will return the result of `self.metadata()` which may be `[]` for empty
folders.
:param WaterButlerPath path: path to check for
:rtype: (`self.metadata()` or False)
"""
try:
return (await self.metadata(path, **kwargs))
except exceptions.NotFoundError:
return False
except exceptions.MetadataError as e:
if e.code != 404:
raise
return False
async def handle_name_conflict(self, path, conflict='replace', **kwargs):
"""Check WaterButlerPath and resolve conflicts
Given a WaterButlerPath and a conflict resolution pattern determine
the correct file path to upload to and indicate if that file exists or not
:param WaterButlerPath path: Desired path to check for conflict
:param str conflict: replace, keep, warn
:rtype: (WaterButlerPath, provider.metadata() or False)
:raises: NamingConflict
"""
exists = await self.exists(path, **kwargs)
if (not exists and not exists == []) or conflict == 'replace':
return path, exists
if conflict == 'warn':
raise exceptions.NamingConflict(path)
while True:
path.increment_name()
test_path = await self.revalidate_path(
path.parent,
path.name,
folder=path.is_dir
)
exists = await self.exists(test_path, **kwargs)
if not (exists or exists == []):
break
return path, False
async def revalidate_path(self, base, path, folder=False):
"""Take a path and a base path and build a WaterButlerPath representing `/base/path`. For
id-based providers, this will need to lookup the id of the new child object.
:param WaterButlerPath base: The base folder to look under
:param str path: the path of a child of `base`, relative to `base`
:param bool folder: whether the returned WaterButlerPath should represent a folder
:rtype: WaterButlerPath
"""
return base.child(path, folder=folder)
async def zip(self, path, **kwargs):
"""Streams a Zip archive of the given folder
:param str path: The folder to compress
"""
metadata = await self.metadata(path)
if path.is_file:
metadata = [metadata]
path = path.parent
return streams.ZipStreamReader(ZipStreamGenerator(self, path, *metadata))
def shares_storage_root(self, other):
"""Returns True if ``self`` and ``other`` both point to the same storage root. Used to
detect when a file move/copy action might result in the file overwriting itself. Most
providers have enough uniquely identifing information in the settings to detect this,
but some providers may need to override this to do further detection.
:param BaseProvider other: another provider instance to compare with
:returns bool: True if both providers use the same storage root.
"""
return self.NAME == other.NAME and self.settings == other.settings
@abc.abstractmethod
def can_duplicate_names(self):
"""Returns True if a file and a folder in the same directory can have identical names."""
raise NotImplementedError
@abc.abstractmethod
def download(self, **kwargs):
"""Download a file from this provider.
:param dict \*\*kwargs: Arguments to be parsed by child classes
:rtype: :class:`waterbutler.core.streams.ResponseStreamReader`
:raises: :class:`waterbutler.core.exceptions.DownloadError`
"""
raise NotImplementedError
@abc.abstractmethod
def upload(self, stream, **kwargs):
"""Uploads the given stream to the provider. Returns the metadata for the newly created
file and a boolean indicating whether the file is completely new (``True``) or overwrote
a previously-existing file (``False``)
:param dict \*\*kwargs: Arguments to be parsed by child classes
:rtype: (:class:`waterbutler.core.metadata.BaseFileMetadata`, :class:`bool`)
:raises: :class:`waterbutler.core.exceptions.DeleteError`
"""
raise NotImplementedError
@abc.abstractmethod
def delete(self, **kwargs):
"""
:param dict \*\*kwargs: Arguments to be parsed by child classes
:rtype: :class:`None`
:raises: :class:`waterbutler.core.exceptions.DeleteError`
"""
raise NotImplementedError
@abc.abstractmethod
def metadata(self, **kwargs):
"""Get metdata about the specified resource from this provider. Will be a :class:`list`
if the resource is a directory otherwise an instance of
:class:`waterbutler.core.metadata.BaseFileMetadata`
:param dict \*\*kwargs: Arguments to be parsed by child classes
:rtype: :class:`waterbutler.core.metadata.BaseMetadata`
:rtype: :class:`list` of :class:`waterbutler.core.metadata.BaseMetadata`
:raises: :class:`waterbutler.core.exceptions.MetadataError`
"""
raise NotImplementedError
@abc.abstractmethod
def validate_v1_path(self, path, **kwargs):
"""API v1 requires that requests against folder endpoints always end with a slash, and
requests against files never end with a slash. This method checks the provider's metadata
for the given id and throws a 404 Not Found if the implicit and explicit types don't
match. This method duplicates the logic in the provider's validate_path method, but
validate_path must currently accomodate v0 AND v1 semantics. After v0's retirement, this
method can replace validate_path.
``path`` is the string in the url after the provider name and refers to the entity to be
acted on. For v1, this must *always exist*. If it does not, ``validate_v1_path`` should
return a 404. Creating a new file in v1 is done by making a PUT request against the parent
folder and specifying the file name as a query parameter. If a user attempts to create a
file by PUTting to its inferred path, validate_v1_path should reject this request with a 404.
:param str path: user-supplied path to validate
:rtype: :class:`waterbutler.core.path.WaterButlerPath`
:raises: :class:`waterbutler.core.exceptions.NotFoundError`
"""
raise NotImplementedError
@abc.abstractmethod
def validate_path(self, path, **kwargs):
"""Validates paths passed in via the v0 API. v0 paths are much less strict than v1 paths.
They may represent things that exist or something that should be created. As such, the goal
of ``validate_path`` is to split the path into its component parts and attempt to determine
the ID of each part on the external provider. For instance, if the ``googledrive`` provider
receives a path of ``/foo/bar/baz.txt``, it will split those into ``/``, ``foo/``, ``bar/``,
and ``baz.txt``, and query Google Drive for the ID of each. ``validate_path`` then builds a
WaterButlerPath object with an ID, name tuple for each path part. The last part is
permitted to not have an ID, since it may represent a file that has not yet been created.
All other parts should have an ID.
The WaterButler v0 API is deprecated and will be removed in a future release. At that time
this method will be obsolete and will be removed from all providers.
:param str path: user-supplied path to validate
:rtype: :class:`waterbutler.core.path.WaterButlerPath`
:raises: :class:`waterbutler.core.exceptions.NotFoundError`
"""
raise NotImplementedError
def path_from_metadata(self, parent_path, metadata):
return parent_path.child(metadata.name, _id=metadata.path.strip('/'), folder=metadata.is_folder)
def revisions(self, **kwargs):
return [] # TODO Raise 405 by default h/t @rliebz
def create_folder(self, path, **kwargs):
"""Create a folder in the current provider at `path`. Returns a `BaseFolderMetadata` object
if successful. May throw a 409 Conflict if a directory with the same name already exists.
:param str path: user-supplied path to create. must be a directory.
:param boolean precheck_folder: flag to check for folder before attempting create
:rtype: :class:`waterbutler.core.metadata.BaseFolderMetadata`
:raises: :class:`waterbutler.core.exceptions.FolderCreationError`
"""
raise exceptions.ProviderError({'message': 'Folder creation not supported.'}, code=405)
def _build_range_header(self, slice_tup):
start, end = slice_tup
return 'bytes={}-{}'.format(
'' if start is None else start,
'' if end is None else end
)
def __repr__(self):
# Note: credentials are not included on purpose.
return ('<{}({}, {})>'.format(self.__class__.__name__, self.auth, self.settings))
```
#### File: providers/dropbox/exceptions.py
```python
from waterbutler.core.exceptions import ProviderError
class DropboxUnhandledConflictError(ProviderError):
def __init__(self, error_data):
message = ('Dropbox has many unique error messages for code 409(Conflict), this one was not specifically handled in the provider: {}'.format(error_data))
super().__init__(message, code=409)
class DropboxNamingConflictError(ProviderError):
def __init__(self, error_data):
message = ('Cannot complete action: file or folder already exists in this location')
super().__init__(message, code=409)
```
#### File: providers/dropbox/provider.py
```python
import json
import http
from waterbutler.core import streams
from waterbutler.core import provider
from waterbutler.core import exceptions
from waterbutler.core.path import WaterButlerPath
from waterbutler.providers.dropbox import settings
from waterbutler.providers.dropbox.metadata import DropboxRevision
from waterbutler.providers.dropbox.metadata import DropboxFileMetadata
from waterbutler.providers.dropbox.metadata import DropboxFolderMetadata
from waterbutler.providers.dropbox.exceptions import DropboxNamingConflictError
from waterbutler.providers.dropbox.exceptions import DropboxUnhandledConflictError
class DropboxProvider(provider.BaseProvider):
"""Provider for the Dropbox.com cloud storage service.
This provider uses the v2 Dropbox API. The v2 API assigns IDs to files and folders, but not all
endpoints currently support IDs. Dropbox WaterButlerPath objects will continue to use string
paths until they do. As of Nov. 3, 2016, endpoint ID support is classified as follows.
Can use ID as path::
/files/get_metadata
/files/copy_reference/get
/files/download
/files/list_revisions
Cannot use ID as path::
/files/copy
/files/copy_reference/save
/files/move
/files/upload
/files/delete
/files/list_folder
/files/create_folder
Does not use path::
/files/list_folder/continue
API docs: https://www.dropbox.com/developers/documentation/http/documentation
Quirks:
* Dropbox paths are case-insensitive.
"""
NAME = 'dropbox'
BASE_URL = settings.BASE_URL
def __init__(self, auth, credentials, settings):
super().__init__(auth, credentials, settings)
self.token = self.credentials['token']
self.folder = self.settings['folder']
self.metrics.add('folder_is_root', self.folder == '/')
async def dropbox_request(self, url, body, expects=(200, 409,), *args, **kwargs):
"""Convenience wrapper around ``BaseProvider.request`` for simple Dropbox API calls. Sets
the method to ``POST``, jsonifies the ``body`` param, and provides default error handling
for Dropbox's standard 409 error response structure.
:param str url: the url of the endpoint to POST to
:param dict body: the data to send in the request body, will be jsonified
:param tuple expects: expected error codes, defaults to 200 (success) and 409 (error)
:param tuple \*args: passed through to BaseProvider.request()
:param dict \*\*kwargs: passed through to BaseProvider.request()
"""
async with self.request(
'POST',
url,
data=json.dumps(body),
expects=expects,
*args,
**kwargs,
) as resp:
data = await resp.json()
if resp.status == 409:
self.dropbox_conflict_error_handler(data, body.get('path', ''))
return data
def dropbox_conflict_error_handler(self, data, error_path=''):
"""Takes a standard Dropbox error response and an optional path and tries to throw a
meaningful error based on it.
:param dict data: the error received from Dropbox
:param str error_path: the path where the error occurred. Base folder will be stripped.
"""
if error_path.startswith(self.folder):
error_path = error_path[len(self.folder):]
if not error_path.startswith('/'):
error_path = '/{}'.format(error_path)
if 'error' in data:
error_class = data['error']['.tag']
if error_class in data['error']:
error_type = data['error'][error_class]
if error_type['.tag'] == 'not_found':
raise exceptions.NotFoundError(error_path)
if 'conflict' in error_type:
raise DropboxNamingConflictError(data['error_summary'])
if data['error'].get('reason', False) and 'conflict' in data['error']['reason']['.tag']:
raise DropboxNamingConflictError('{} for path {}'.format(data['error_summary'],
error_path))
raise DropboxUnhandledConflictError(str(data))
async def validate_v1_path(self, path, **kwargs):
if path == '/':
return WaterButlerPath(path, prepend=self.folder)
implicit_folder = path.endswith('/')
data = await self.dropbox_request(
self.build_url('files', 'get_metadata'),
{'path': self.folder.rstrip('/') + path.rstrip('/')},
throws=exceptions.MetadataError,
)
explicit_folder = data['.tag'] == 'folder'
if explicit_folder != implicit_folder:
raise exceptions.NotFoundError(str(path))
return WaterButlerPath(path, prepend=self.folder)
async def validate_path(self, path, **kwargs):
return WaterButlerPath(path, prepend=self.folder)
def can_duplicate_names(self):
return False
def shares_storage_root(self, other):
"""Dropbox settings only include the root folder. If a cross-resource move occurs
between two dropbox providers that are on different accounts but have the same folder
base name, the parent method could incorrectly think the action is a self-overwrite.
Comparing credentials means that this is unique per connected account."""
return super().shares_storage_root(other) and self.credentials == other.credentials
@property
def default_headers(self):
return {'Authorization': 'Bearer {}'.format(self.token),
'Content-Type': 'application/json'}
async def intra_copy(self, dest_provider, src_path, dest_path):
dest_folder = dest_provider.folder
try:
if self == dest_provider:
data = await self.dropbox_request(
self.build_url('files', 'copy'),
{
'from_path': src_path.full_path.rstrip('/'),
'to_path': dest_path.full_path.rstrip('/'),
},
expects=(200, 201, 409),
throws=exceptions.IntraCopyError,
)
else:
from_ref_data = await self.dropbox_request(
self.build_url('files', 'copy_reference', 'get'),
{'path': src_path.full_path.rstrip('/')},
throws=exceptions.IntraCopyError,
)
from_ref = from_ref_data['copy_reference']
data = await dest_provider.dropbox_request(
self.build_url('files', 'copy_reference', 'save'),
{'copy_reference': from_ref, 'path': dest_path.full_path.rstrip('/')},
expects=(200, 201, 409),
throws=exceptions.IntraCopyError,
)
data = data['metadata']
except DropboxNamingConflictError:
await dest_provider.delete(dest_path)
resp, _ = await self.intra_copy(dest_provider, src_path, dest_path)
return resp, False
if data['.tag'] == 'file':
return DropboxFileMetadata(data, dest_folder), True
folder = DropboxFolderMetadata(data, dest_folder)
folder.children = [item for item in await dest_provider.metadata(dest_path)]
return folder, True
async def intra_move(self, dest_provider, src_path, dest_path):
if dest_path.full_path.lower() == src_path.full_path.lower():
# Dropbox does not support changing the casing in a file name
raise exceptions.InvalidPathError('In Dropbox to change case, add or subtract other characters.')
try:
data = await self.dropbox_request(
self.build_url('files', 'move'),
{
'from_path': src_path.full_path.rstrip('/'),
'to_path': dest_path.full_path.rstrip('/'),
},
expects=(200, 201, 409),
throws=exceptions.IntraMoveError,
)
except DropboxNamingConflictError:
await dest_provider.delete(dest_path)
resp, _ = await self.intra_move(dest_provider, src_path, dest_path)
return resp, False
dest_folder = dest_provider.folder
if data['.tag'] == 'file':
return DropboxFileMetadata(data, dest_folder), True
folder = DropboxFolderMetadata(data, dest_folder)
folder.children = [item for item in await dest_provider.metadata(dest_path)]
return folder, True
async def download(self, path, revision=None, range=None, **kwargs):
path_arg = {"path": ("rev:" + revision if revision else path.full_path)}
resp = await self.make_request(
'POST',
self._build_content_url('files', 'download'),
headers={'Dropbox-API-Arg': json.dumps(path_arg), 'Content-Type': ''},
range=range,
expects=(200, 206, 409,),
throws=exceptions.DownloadError,
)
if resp.status == 409:
data = await resp.json()
self.dropbox_conflict_error_handler(data)
if 'Content-Length' not in resp.headers:
size = json.loads(resp.headers['dropbox-api-result'])['size']
else:
size = None
return streams.ResponseStreamReader(resp, size=size)
async def upload(self, stream, path, conflict='replace', **kwargs):
path, exists = await self.handle_name_conflict(path, conflict=conflict)
path_arg = {"path": path.full_path}
if conflict == 'replace':
path_arg['mode'] = 'overwrite'
resp = await self.make_request(
'POST',
self._build_content_url('files', 'upload'),
headers={
'Content-Type': 'application/octet-stream',
'Dropbox-API-Arg': json.dumps(path_arg),
'Content-Length': str(stream.size),
},
data=stream,
expects=(200, 409,),
throws=exceptions.UploadError,
)
data = await resp.json()
if resp.status == 409:
self.dropbox_conflict_error_handler(data, path.path)
return DropboxFileMetadata(data, self.folder), not exists
async def delete(self, path, confirm_delete=0, **kwargs):
"""Delete file, folder, or provider root contents
:param WaterButlerPath path: WaterButlerPath path object for folder
:param int confirm_delete: Must be 1 to confirm root folder delete
"""
if path.is_root:
if confirm_delete == 1:
return await self._delete_folder_contents(path)
else:
raise exceptions.DeleteError(
'confirm_delete=1 is required for deleting root provider folder',
code=400
)
await self.dropbox_request(
self.build_url('files', 'delete'),
{'path': self.folder.rstrip('/') + '/' + path.path.rstrip('/')},
throws=exceptions.DeleteError,
)
async def metadata(self, path, revision=None, **kwargs):
full_path = path.full_path.rstrip('/')
url = self.build_url('files', 'get_metadata')
body = {'path': full_path}
if revision:
body = {'path': 'rev:' + revision}
elif path.is_folder:
url = self.build_url('files', 'list_folder')
if path.is_folder:
ret = []
has_more = True
page_count = 0
while has_more:
page_count += 1
data = await self.dropbox_request(url, body, throws=exceptions.MetadataError)
for entry in data['entries']:
if entry['.tag'] == 'folder':
ret.append(DropboxFolderMetadata(entry, self.folder))
else:
ret.append(DropboxFileMetadata(entry, self.folder))
if not data['has_more']:
has_more = False
else:
url = self.build_url('files', 'list_folder', 'continue')
body = {'cursor': data['cursor']}
self.metrics.add('metadata.folder.pages', page_count)
return ret
data = await self.dropbox_request(url, body, throws=exceptions.MetadataError)
# Dropbox v2 API will not indicate file/folder if path "deleted"
if data['.tag'] == 'deleted':
raise exceptions.MetadataError(
"Could not retrieve '{}'".format(path),
code=http.client.NOT_FOUND,
)
# Dropbox will match a file or folder by name within the requested path
if path.is_file and data['.tag'] == 'folder':
raise exceptions.MetadataError(
"Could not retrieve file '{}'".format(path),
code=http.client.NOT_FOUND,
)
return DropboxFileMetadata(data, self.folder)
async def revisions(self, path, **kwargs):
# Dropbox v2 API limits the number of revisions returned to a maximum
# of 100, default 10. Previously we had set the limit to 250.
data = await self.dropbox_request(
self.build_url('files', 'list_revisions'),
{'path': path.full_path.rstrip('/'), 'limit': 100},
throws=exceptions.RevisionsError,
)
if data['is_deleted'] is True:
raise exceptions.RevisionsError(
"Could not retrieve '{}'".format(path),
code=http.client.NOT_FOUND,
)
if data['is_deleted']:
return []
return [DropboxRevision(item) for item in data['entries']]
async def create_folder(self, path, **kwargs):
"""
:param str path: The path to create a folder at
"""
WaterButlerPath.validate_folder(path)
data = await self.dropbox_request(
self.build_url('files', 'create_folder'),
{'path': path.full_path.rstrip('/')},
throws=exceptions.CreateFolderError,
)
return DropboxFolderMetadata(data, self.folder)
def can_intra_copy(self, dest_provider, path=None):
return type(self) == type(dest_provider)
def can_intra_move(self, dest_provider, path=None):
return self == dest_provider
def _build_content_url(self, *segments, **query):
return provider.build_url(settings.BASE_CONTENT_URL, *segments, **query)
async def _delete_folder_contents(self, path, **kwargs):
"""Delete the contents of a folder. For use against provider root.
:param WaterButlerPath path: WaterButlerPath path object for folder
"""
meta = (await self.metadata(path))
for child in meta:
dropbox_path = await self.validate_path(child.path)
await self.delete(dropbox_path)
```
#### File: providers/figshare/provider.py
```python
import http
import json
import asyncio
import aiohttp
from waterbutler.core import streams
from waterbutler.core import provider
from waterbutler.core import exceptions
from waterbutler.providers.figshare.path import FigsharePath
from waterbutler.providers.figshare import metadata, settings
class FigshareProvider:
"""Provider for Figshare repositories.
**On paths:** Figshare does not have any notion of paths and has a very flat structure. Top
level objects are one of the following.
A 'project' that contains 'articles'. The project can be either public or private.
A 'collection' that points to private and/or public 'articles' and can itself be either public
or private.
An 'article' that contains 0 or more 'files' and may or may not be associated with a project.
Articles may be either public or private.
'Articles' may contain 'files'.
'Articles' are one of (currently) ten types. All but one of these 'defined_types' may contain no
more than one file. The exception is the 'fileset' 'defined_type' which may contain more than
one 'file'.
The FigshareProvider allows for the possibility of a private 'article', a private 'project', or
a private 'collection' as the root of a waterbutler provider instance. The FigshareProvider's
default configuration treats 'articles' with a 'defined_type' of 'fileset' as a folder, and all
other 'defined_type's as a file.
In practice, this means that when returning the metadata for the root(/) folder, only 'articles'
of 'defined_type' 'fileset' will be returned as a folder. All other 'articles' will be returned
as a file if they contain a file and ignored if they do not contain a file.
If the root is configured as a provider, it will contain 0 or more files.
Valid FigsharePaths for root project/collection::
/
/<article_id for type fileset>/ (default configuration)
/<article_id of any type>/<file_id>
Valid FigsharePaths for root article::
/
/<file_id>
Invalid FigsharePaths for root project/collection examples::
/<article_id of any type>
/<article_id of any type other then fileset>/ (default configuration)
/<article_id of any type>/<file_id>/
path of any depth greater then 2
Invalid FigsharePaths for root article examples::
/<any_id>/
/<any_id other then a file_id>
path of any depth greater then 1
API docs: https://docs.figshare.com/
"""
def __new__(cls, auth, credentials, settings):
if settings['container_type'] == 'project':
return FigshareProjectProvider(
auth, credentials,
dict(settings, container_id=settings['container_id'])
)
if settings['container_type'] in ('article', 'fileset'):
return FigshareArticleProvider(
auth, credentials, dict(settings, container_id=settings['container_id'])
)
raise exceptions.ProviderError(
'Invalid "container_type" {0}'.format(settings['container_type'])
)
class BaseFigshareProvider(provider.BaseProvider):
NAME = 'figshare'
BASE_URL = settings.BASE_URL
VIEW_URL = settings.VIEW_URL
DOWNLOAD_URL = settings.DOWNLOAD_URL
VALID_CONTAINER_TYPES = settings.VALID_CONTAINER_TYPES
def __init__(self, auth, credentials, settings):
super().__init__(auth, credentials, settings)
self.token = self.credentials['token']
self.container_type = self.settings['container_type']
if self.container_type not in self.VALID_CONTAINER_TYPES:
raise exceptions.ProviderError('{} is not a valid container type.'.format(self.container_type))
if self.container_type == 'fileset':
self.container_type = 'article'
self.container_id = self.settings['container_id']
self.metrics.add('container', {
'given_type': self.settings['container_type'],
'actual_type': self.container_type,
})
@property
def root_path_parts(self):
return (self.container_type + 's', self.container_id)
@property
def default_headers(self):
return {
'Authorization': 'token {}'.format(self.token),
}
def build_url(self, is_public: bool, *segments, **query):
"""A nice wrapper around furl, builds urls based on self.BASE_URL
:param bool is_public: ``True`` if addressing public resource
:param tuple \*segments: A tuple of strings joined into ``/foo/bar/``
:param dict \*\*query: A dictionary that will be turned into query parameters ``?foo=bar``
:rtype: str
Subclassed to include handling of ``is_public`` argument. ``collection`` containers may
contain public articles which are accessed through an URN with a different prefix.
"""
if not is_public:
segments = ('account', (*segments))
return (super().build_url(*segments, **query))
async def make_request(self, method, url, *args, **kwargs):
"""JSONifies ``data`` kwarg, if present and a ``dict``.
:param str method: HTTP method
:param str url: URL
:param tuple \*args:
:param dict \*\*kwargs:
"""
if isinstance(kwargs.get('data'), dict):
kwargs['data'] = json.dumps(kwargs['data'])
return await super().make_request(method, url, *args, **kwargs)
def can_duplicate_names(self):
"""Figshare allows articles to have duplicate titles and files to have duplicate names, but
does not allow the creation of duplicate files and folders.
"""
return False
async def _get_url_super(self, url):
# Use super to avoid is_public logic
# Allows for taking advantage of asyncio.gather
response = await super().make_request('GET', url, expects=(200, ))
return await response.json()
def _path_split(self, path):
"""Strip trailing slash from path string, then split on remaining slashes.
:param str path: url path string to be split.
"""
return path.rstrip('/').split('/')
async def download(self, path, **kwargs):
"""Download the file identified by ``path`` from this project.
:param FigsharePath path: FigsharePath to file you want to download
:rtype ResponseStreamReader:
"""
if not path.is_file:
raise exceptions.NotFoundError(str(path))
file_metadata = await self.metadata(path)
download_url = file_metadata.extra['downloadUrl']
if download_url is None:
raise exceptions.DownloadError('Download not available', code=http.client.FORBIDDEN)
params = {} if file_metadata.is_public else {'token': self.token}
resp = await aiohttp.request('GET', download_url, params=params)
if resp.status == 404:
await resp.release()
raise exceptions.DownloadError('Download not available', code=http.client.FORBIDDEN)
return streams.ResponseStreamReader(resp)
def path_from_metadata(self, parent_path, metadata):
"""Build FigsharePath for child entity given child's metadata and parent's path object.
:param FigsharePath parent_path: path obj for child's parent
:param metadata: Figshare*Metadata object for child
"""
return parent_path.child(metadata.name, _id=str(metadata.id),
folder=(metadata.kind == 'folder'))
async def revisions(self, path, **kwargs):
# Public articles have revisions, but projects, collections, and private articles do not.
# For now, return a single Revision labeled "latest".
return [metadata.FigshareFileRevisionMetadata()]
async def _upload_file(self, article_id, name, stream):
"""Uploads a file to Figshare and returns the file id.
:param str article_id: the id of the parent article
:param str name: the name of the file
:param stream: the file stream to upload
:rtype: `str`
:return: id of new file
"""
# Process for creating a file:
# 1. Get file ID
file_id = await self._make_file_placeholder(article_id, name, stream.size)
# 2. Get upload url and file parts info
# added sleep() as file was not availble right away after getting 201 back.
# polling with HEADs is another possible solution
await asyncio.sleep(settings.FILE_CREATE_WAIT)
upload_url, parts = await self._get_file_upload_url(article_id, file_id)
# 3. Upload parts
self.metrics.add('upload.parts.count', len(parts))
await self._upload_file_parts(stream, upload_url, parts)
# 4. Mark upload complete
await self._mark_upload_complete(article_id, file_id)
return file_id
async def _make_file_placeholder(self, article_id, name, size):
"""Create a placeholder for a file to be uploaded later. Takes the id of the parent
article, a name for the file, and the size. Returns the id set aside for the file.
:param str article_id: the id of the parent article
:param str name: the name of the file
:param int size: the size of the file
:returns str: the id of the file placeholder
"""
file_resp = await self.make_request(
'POST',
self.build_url(False, 'articles', article_id, 'files'),
data=json.dumps({'name': name, 'size': size}),
expects=(201, ),
)
file_json = await file_resp.json()
return file_json['location'].rsplit('/', 1)[1]
async def _get_file_upload_url(self, article_id, file_id):
"""Request an upload url and partitioning spec from Figshare.
See: https://docs.figshare.com/api/file_uploader/
:param str article_id: the id of the parent article
:param str file_id: the name of the file
:returns (str, list): the upload url and the parts specification
"""
# TODO: retry with backoff
resp = await self.make_request(
'GET',
self.build_url(False, 'articles', article_id, 'files', file_id),
expects=(200, 404),
)
if resp.status == 404:
raise exceptions.ProviderError(
'Could not get upload_url. File creation may have taken more '
'than {} seconds to finish.'.format(str(settings.FILE_CREATE_WAIT)))
upload_json = await resp.json()
upload_url = upload_json['upload_url']
parts_resp = await self.make_request('GET', upload_url, expects=(200, ),)
parts_json = await parts_resp.json()
return upload_url, parts_json['parts'] # str, list
async def _upload_file_parts(self, stream, upload_url, parts):
"""Takes a stream, the upload url, and a list of parts to upload, and send the chunks
dictated by ``parts`` to figshare.
See: https://docs.figshare.com/api/file_uploader/
:param stream: the file stream to upload
:param str upload_url: the base url to upload to
:param list parts: a structure describing the expected partitioning of the file
"""
for part in parts:
size = part['endOffset'] - part['startOffset'] + 1
part_number = part['partNo']
upload_response = await self.make_request(
'PUT',
upload_url + '/' + str(part_number),
data=stream.readexactly(size),
expects=(200, ),
)
await upload_response.release()
async def _mark_upload_complete(self, article_id, file_id):
"""Signal to Figshare that all of the parts of the file have been uploaded successfully.
See: https://docs.figshare.com/api/file_uploader/
:param str article_id: the id of the parent article
:param str file_id: the name of the file
"""
resp = await self.make_request(
'POST',
self.build_url(False, 'articles', article_id, 'files', file_id),
expects=(202, ),
)
await resp.release()
class FigshareProjectProvider(BaseFigshareProvider):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
async def validate_v1_path(self, path, **kwargs):
"""Take a string path from the url and attempt to map it to an entity within this project.
If the entity is found, returns a FigsharePath object with the entity identifiers included.
Otherwise throws a 404 Not Found. Will also assert that the entity type inferred from the
path matches the type of the entity at that url.
:param str path: entity path from the v1 API
:rtype FigsharePath:
"""
if path == '/':
return FigsharePath('/', _ids=('', ), folder=True, is_public=False)
path_parts = self._path_split(path)
if len(path_parts) not in (2, 3):
raise exceptions.InvalidPathError('{} is not a valid Figshare path.'.format(path))
article_id = path_parts[1]
file_id = path_parts[2] if len(path_parts) == 3 else None
articles = await self._get_all_articles()
# TODO: need better way to get public/private
# This call's return value is currently busted at figshare for collections. Figshare always
# returns private-looking urls.
is_public = False
for item in articles:
if '/articles/' + article_id in item['url']:
article_name = item['title']
if settings.PRIVATE_IDENTIFIER not in item['url']:
is_public = True
article_segments = (*self.root_path_parts, 'articles', article_id)
if file_id:
file_response = await self.make_request(
'GET',
self.build_url(is_public, *article_segments, 'files', file_id),
expects=(200, ),
)
file_json = await file_response.json()
file_name = file_json['name']
if path[-1] == '/':
raise exceptions.NotFoundError('File paths must not end with "/". '
'{} not found.'.format(path))
return FigsharePath('/' + article_name + '/' + file_name,
_ids=(self.container_id, article_id, file_id),
folder=False,
is_public=is_public)
article_response = await self.make_request(
'GET',
self.build_url(is_public, *article_segments),
expects=(200, ),
)
article_json = await article_response.json()
if article_json['defined_type'] in settings.FOLDER_TYPES:
if not path[-1] == '/':
raise exceptions.NotFoundError('Folder paths must end with "/". {} not found.'.format(path))
return FigsharePath('/' + article_name + '/', _ids=(self.container_id, article_id),
folder=True, is_public=is_public)
raise exceptions.NotFoundError('This article is not configured as a folder defined_type. '
'{} not found.'.format(path))
async def validate_path(self, path, **kwargs):
"""Take a string path from the url and attempt to map it to an entity within this project.
If the entity is found, returns a FigsharePath object with the entity identifiers included.
Otherwise returns a FigsharePath with empty identifiers.
:param str path: identifier_path URN as passed through the v0 API
:rtype FigsharePath:
Quirks:
* v0 may pass an identifier_path whose last part is a name and not an identifier, in the
case of file/folder creation calls.
* validate_path validates parent and returns a FigsharePath as accurately as possible.
"""
if path == '/':
return FigsharePath('/', _ids=('', ), folder=True, is_public=False)
path_parts = self._path_split(path)
if len(path_parts) not in (2, 3):
raise exceptions.InvalidPathError('{} is not a valid Figshare path.'.format(path))
article_id = path_parts[1]
file_id = path_parts[2] if len(path_parts) == 3 else None
articles = await self._get_all_articles()
# TODO: need better way to get public/private
# This call's return value is currently busted at figshare for collections. Figshare always
# returns private-looking urls.
is_public = False
for item in articles:
if '/articles/' + article_id in item['url']:
article_name = item['title']
if settings.PRIVATE_IDENTIFIER not in item['url']:
is_public = True
article_segments = (*self.root_path_parts, 'articles', article_id)
if file_id:
file_response = await self.make_request(
'GET',
self.build_url(is_public, *article_segments, 'files', file_id),
expects=(200, 404, ),
)
if file_response.status == 200:
file_response_json = await file_response.json()
file_name = file_response_json['name']
return FigsharePath('/' + article_name + '/' + file_name,
_ids=(self.container_id, article_id, file_id),
folder=False,
is_public=is_public)
await file_response.release()
article_response = await self.make_request(
'GET',
self.build_url(is_public, *article_segments),
expects=(200, 404, ),
)
if article_response.status == 200:
article_json = await article_response.json()
if article_json['defined_type'] in settings.FOLDER_TYPES:
# Case of v0 file creation
if file_id:
ids = ('', article_id, '')
folder = False
path_urn = '/' + article_name + '/' + file_id
else:
ids = ('', article_id)
folder = True
path_urn = '/' + article_name + '/'
return FigsharePath(path_urn, _ids=ids, folder=folder, is_public=is_public)
else:
await article_response.release()
if file_id:
# Catch for if neither file nor article exist
raise exceptions.NotFoundError(path)
# Return for v0 folder creation
return FigsharePath(path, _ids=('', ''), folder=True, is_public=False)
async def revalidate_path(self, parent_path, child_name, folder):
"""Look for file or folder named ``child_name`` under ``parent_path``. If it finds a match,
it returns a FigsharePath object with the appropriate ids set. Otherwise, it returns a
FigsharePath where the ids are set to ``None``.
Due to the fact that figshare allows duplicate titles/names for
articles/files, revalidate_path can not be relied on to always return
the correct id of an existing child_name. It will return the first id that
matches the folder and child_name arguments or '' if no match.
:param FigsharePath parent_path: Path of parent
:param str child_name: Name of child
:param bool folder: ``True`` if child is folder
:rtype: ``FigsharePath``
:return: a FigsharePath object, with ids set if a match was found
"""
parent_is_folder = False
urn_parts = (*self.root_path_parts, 'articles')
child_id = None
if not parent_path.is_root: # parent is fileset or article
if not folder: # child is article/file
list_children_response = await self.make_request(
'GET',
self.build_url(False, *urn_parts, parent_path.identifier),
expects=(200, ),
)
article_json = await list_children_response.json()
for file in article_json['files']:
if file['name'] == child_name:
child_id = str(file['id'])
break
return parent_path.child(child_name, _id=child_id, folder=folder,
parent_is_folder=parent_is_folder)
# parent is root
children = await self._get_all_articles()
articles = await asyncio.gather(*[
self._get_url_super(article_json['url'])
for article_json in children
])
for article in articles:
is_folder = article['defined_type'] in settings.FOLDER_TYPES
article_id = str(article['id'])
article_name = str(article['title'])
if folder != is_folder:
continue
elif folder:
if article_name == child_name:
child_id = article_id
break
else:
parent_is_folder = False
for file in article['files']:
if file['name'] == child_name:
parent_path = parent_path.child(article_name, _id=article_id, folder=False)
child_id = str(file['id'])
break
return parent_path.child(child_name, _id=child_id, folder=folder,
parent_is_folder=parent_is_folder)
async def upload(self, stream, path, conflict='replace', **kwargs):
"""Upload a file to provider root or to an article whose defined_type is
configured to represent a folder.
:param asyncio.StreamReader stream: stream to upload
:param FigsharePath path: FigsharePath to upload the file to.
:param dict \*\*kwargs: Will be passed to returned metadata object
"""
if path.identifier and conflict == 'replace':
raise exceptions.UnsupportedOperationError('Files in Figshare cannot be updated')
path, exists = await self.handle_name_conflict(path, conflict=conflict)
if not path.parent.is_root:
parent_resp = await self.make_request(
'GET',
self.build_url(False, *self.root_path_parts, 'articles', path.parent.identifier),
expects=(200, ),
)
parent_json = await parent_resp.json()
if not parent_json['defined_type'] in settings.FOLDER_TYPES:
del path._parts[1]
# Create article or retrieve article_id from existing article
if not path.parent.is_root:
article_id = path.parent.identifier
else:
article_name = json.dumps({'title': path.name})
if self.container_type == 'project':
article_id = await self._create_article(article_name)
elif self.container_type == 'collection':
# TODO don't think this is correct. Probably should POST to /accounts/articles
article_id = await self._create_article(article_name)
article_list = json.dumps({'articles': [article_id]})
await self.make_request(
'POST',
self.build_url(False, *self.root_path_parts, 'articles'),
data=article_list,
expects=(201, ),
)
file_id = await self._upload_file(article_id, path.name, stream)
# Build new file path and return metadata
path = FigsharePath('/' + article_id + '/' + file_id,
_ids=(self.container_id, article_id, file_id),
folder=False,
is_public=False)
return (await self.metadata(path, **kwargs)), True
async def create_folder(self, path, **kwargs):
"""Create a folder at ``path``. Returns a `FigshareFolderMetadata` object if successful.
:param FigsharePath path: FigsharePath representing the folder to create
:rtype: :class:`waterbutler.core.metadata.FigshareFolderMetadata`
:raises: :class:`waterbutler.core.exceptions.CreateFolderError`
"""
if (len(path.parts) == 2) and path.is_folder:
article_name = path.parts[-1].value
else:
raise exceptions.CreateFolderError(
'Only projects and collections may contain folders. Unable to create '
'"{}/"'.format(path.name),
code=400,
)
article_data = json.dumps({'title': article_name, 'defined_type': 'fileset'})
article_id = await self._create_article(article_data)
get_article_response = await self.make_request(
'GET',
self.build_url(False, *self.root_path_parts, 'articles', article_id),
expects=(200, ),
throws=exceptions.NotFoundError,
)
article_json = await get_article_response.json()
return metadata.FigshareFolderMetadata(article_json)
async def delete(self, path, confirm_delete=0, **kwargs):
"""Delete the entity at ``path``.
:param FigsharePath path: Path to be deleted
:param int confirm_delete: Must be 1 to confirm root folder delete
:rtype: None
:raises: :class:`waterbutler.core.exceptions.NotFoundError`
:raises: :class:`waterbutler.core.exceptions.DeleteError`
Quirks:
* If the FigsharePath given is for the provider root path, then the contents of the
provider root path will be deleted, but not the provider root itself.
"""
if not path.identifier:
raise exceptions.NotFoundError(str(path))
if path.is_root:
if confirm_delete == 1:
return await self._delete_container_contents()
raise exceptions.DeleteError(
'confirm_delete=1 is required for deleting root provider folder',
code=400
)
if len(path.parts) == 2:
if not path.is_folder:
raise exceptions.NotFoundError(str(path))
delete_path = (*self.root_path_parts, 'articles', path.parts[1]._id)
elif len(path.parts) == 3:
if path.is_folder:
raise exceptions.NotFoundError(str(path))
article_response = await self.make_request(
'GET',
self.build_url(False, *self.root_path_parts, 'articles', path.parts[1]._id),
expects=(200, ),
)
article_json = await article_response.json()
if article_json['defined_type'] in settings.FOLDER_TYPES:
delete_path = ('articles', path.parts[1]._id, 'files', path.parts[2]._id)
else:
delete_path = (*self.root_path_parts, 'articles', path.parts[1]._id)
delete_article_response = await self.make_request(
'DELETE',
self.build_url(False, *delete_path),
expects=(204, ),
)
await delete_article_response.release()
async def metadata(self, path, **kwargs):
"""Return metadata for entity identified by ``path`` under the parent project.
:param FigsharePath path: entity whose metadata will be returned
:rtype: FigshareFileMetadata obj or list of Metadata objs
"""
if path.is_root:
path.is_public = False
contents = await asyncio.gather(*[
# TODO: collections may need to use each['url'] for correct URN
# Use _get_url_super ? figshare API needs to get fixed first.
self._get_article_metadata(str(each['id']), path.is_public)
for each in await self._get_all_articles()
])
return [each for each in contents if each]
if not path.parts[-1].identifier:
raise exceptions.NotFoundError(str(path))
if len(path.parts) > 3:
raise exceptions.NotFoundError(str(path))
article_response = await self.make_request(
'GET',
self.build_url(path.is_public, *self.root_path_parts,
'articles', path.parts[1].identifier),
expects=(200, 404),
)
if article_response.status == 404:
raise exceptions.NotFoundError(str(path))
article_json = await article_response.json()
if len(path.parts) == 2:
if article_json['defined_type'] not in settings.FOLDER_TYPES:
raise exceptions.NotFoundError(str(path))
contents = []
for file in article_json['files']:
contents.append(metadata.FigshareFileMetadata(article_json, raw_file=file))
return contents
elif len(path.parts) == 3:
for file in article_json['files']:
if file['id'] == int(path.parts[2].identifier):
return metadata.FigshareFileMetadata(article_json, raw_file=file)
raise exceptions.NotFoundError(path.path)
else:
raise exceptions.NotFoundError('{} is not a valid path.'.format(path))
async def _get_article_metadata(self, article_id, is_public: bool):
"""Return Figshare*Metadata object for given article_id. Returns a FolderMetadata object
for filesets, a FileMetadat object for other article types, and ``None`` if the article
is not a fileset and has no files attached.
Defined separately to allow for taking advantage of ``asyncio.gather``.
:param str article_id: id of article whose metadata is requested
:param bool is_public: ``True`` if article is accessed through public URN
"""
response = await self.make_request(
'GET',
self.build_url(is_public, *self.root_path_parts, 'articles', article_id),
expects=(200, ),
)
article_json = await response.json()
if article_json['defined_type'] in settings.FOLDER_TYPES:
return metadata.FigshareFolderMetadata(article_json)
elif article_json['files']:
return metadata.FigshareFileMetadata(article_json)
return None # article without attached file
async def _delete_container_contents(self):
"""Delete all articles within this Project or Collection."""
# TODO: Needs logic for skipping public articles in collections
articles = await self._get_all_articles()
for article in articles:
delete_article_response = await self.make_request(
'DELETE',
self.build_url(False, *self.root_path_parts, 'articles', str(article['id'])),
expects=(204, ),
)
await delete_article_response.release()
async def _get_all_articles(self):
"""Get all articles under a project or collection. This endpoint is paginated and does not
provide limit metadata, so we keep querying until we receive an empty array response.
See https://docs.figshare.com/api/#searching-filtering-and-pagination for details.
:return: list of article json objects
:rtype: `list`
"""
all_articles, keep_going, page = [], True, 1
while keep_going:
resp = await self.make_request(
'GET',
self.build_url(False, *self.root_path_parts, 'articles'),
params={'page': str(page), 'page_size': str(settings.MAX_PAGE_SIZE)},
expects=(200, ),
)
articles = await resp.json()
all_articles.extend(articles)
page += 1
keep_going = len(articles) > 0
return all_articles
async def _create_article(self, data):
"""Create an article placeholder with the properties given in ``data``. Returns the id of
the new article. See https://docs.figshare.com/api/articles/#create-a-new-article for
valid properties.
:param dict data: properties to set for new article
:return: the id of the newly created article
:rtype: `str`
"""
resp = await self.make_request(
'POST',
self.build_url(False, *self.root_path_parts, 'articles'),
data=data,
expects=(201, ),
throws=exceptions.CreateFolderError,
)
articles_json = await resp.json()
article_id = articles_json['location'].rsplit('/', 1)[1]
return article_id
class FigshareArticleProvider(BaseFigshareProvider):
def __init__(self, auth, credentials, settings, child=False):
super().__init__(auth, credentials, settings)
async def validate_v1_path(self, path, **kwargs):
"""Take a string path from the url and attempt to map it to an entity within this article.
If the entity is found, returns a FigsharePath object with the entity identifiers included.
Otherwise throws a 404 Not Found. Will also assert that the entity type inferred from the
path matches the type of the entity at that url.
:param str path: entity path from the v1 API
:rtype FigsharePath:
"""
if path == '/':
return FigsharePath('/', _ids=('', ), folder=True, is_public=False)
path_parts = self._path_split(path)
if len(path_parts) != 2:
raise exceptions.InvalidPathError('{} is not a valid Figshare path.'.format(path))
file_id = path_parts[1]
resp = await self.make_request(
'GET',
self.build_url(False, *self.root_path_parts, 'files', file_id),
expects=(200, ),
)
file_json = await resp.json()
return FigsharePath('/' + file_json['name'], _ids=('', file_id), folder=False,
is_public=False)
async def validate_path(self, path, **kwargs):
"""Take a string path from the url and attempt to map it to an entity within this article.
If the entity is found, returns a FigsharePath object with the entity identifiers included.
Otherwise returns a FigsharePath with empty identifiers.
:param str path: identifier path URN as passed through the v0 API
:rtype FigsharePath:
Quirks:
* v0 may pass an identifier_path whose last part is a name and not an identifier, in the
case of file/folder creation calls.
* validate_path validates parent and returns a FigsharePath as accurately as possible.
"""
if path == '/':
return FigsharePath('/', _ids=('', ), folder=True, is_public=False)
path_parts = self._path_split(path)
if len(path_parts) != 2:
raise exceptions.InvalidPathError('{} is not a valid Figshare path.'.format(path))
file_id = path_parts[1]
resp = await self.make_request(
'GET',
self.build_url(False, *self.root_path_parts, 'files', file_id),
expects=(200, 404, ),
)
if resp.status == 200:
file_json = await resp.json()
file_name = file_json['name']
return FigsharePath('/' + file_name, _ids=('', file_id), folder=False, is_public=False)
# catch for create file in article root
await resp.release()
return FigsharePath('/' + file_id, _ids=('', ''), folder=False, is_public=False)
async def revalidate_path(self, parent_path, child_name, folder: bool):
"""Attempt to get child's id and return FigsharePath of child.
``revalidate_path`` is used to check for the existance of a child_name/folder
within the parent. Returning a FigsharePath of child. Child will have _id
if conflicting child_name/folder exists otherwise _id will be ''.
:param FigsharePath parent_path: Path of parent
:param str child_name: Name of child
:param bool folder: ``True`` if child is folder
Code notes:
Due to the fact that figshare allows duplicate titles/names for
articles/files, revalidate_path can not be relied on to always return
the correct id of an existing child_name. will return the first id that
matches the folder and child_name arguments or '' if no match.
"""
parent_is_folder = False
urn_parts = self.root_path_parts
if not parent_path.is_root:
if folder:
raise exceptions.NotFoundError(
'{} is not a valid parent path of folder={}. Folders can only exist at the '
'root level.'.format(parent_path.identifier_path, str(folder)))
else:
urn_parts = (*urn_parts, (parent_path.identifier))
list_children_response = await self.make_request(
'GET',
self.build_url(False, *urn_parts),
expects=(200, ),
)
child_id = ''
article_json = await list_children_response.json()
for file in article_json['files']:
if file['name'] == child_name:
child_id = str(file['id'])
break
return parent_path.child(child_name, _id=child_id, folder=folder,
parent_is_folder=parent_is_folder)
async def upload(self, stream, path, conflict='replace', **kwargs):
"""Upload a file to provider root or to an article whose defined_type is
configured to represent a folder.
:param asyncio.StreamReader stream: stream to upload
:param FigsharePath path: FigsharePath to upload the file to.
:param dict \*\*kwargs: Will be passed to returned metadata object
"""
path, exists = await self.handle_name_conflict(path, conflict=conflict)
if not path.parent.is_root:
parent_resp = await self.make_request(
'GET',
self.build_url(False, *self.root_path_parts, 'articles', path.parent.identifier),
expects=(200, ),
)
parent_json = await parent_resp.json()
if not parent_json['defined_type'] in settings.FOLDER_TYPES:
del path._parts[1]
file_id = await self._upload_file(self.container_id, path.name, stream)
# Build new file path and return metadata
path = FigsharePath('/' + file_id, _ids=('', file_id), folder=False, is_public=False)
return (await self.metadata(path, **kwargs)), True
async def create_folder(self, path, **kwargs):
raise exceptions.CreateFolderError('Cannot create folders within articles.', code=400)
async def delete(self, path, confirm_delete=0, **kwargs):
"""Delete the file at ``path``. If ``path`` is ``/`` and ``confirm_delete`` is ``1``, then
delete all of the files within the article, but not the article itself.
:param FigsharePath path: Path to be deleted
:param int confirm_delete: Must be 1 to confirm root folder delete
:rtype: None
:raises: :class:`waterbutler.core.exceptions.NotFoundError`
:raises: :class:`waterbutler.core.exceptions.DeleteError`
Quirks:
* If the FigsharePath given is for the provider root path, then the contents of the
provider root path will be deleted, but not the provider root itself.
"""
if path.is_root:
if confirm_delete == 1:
return await self._delete_container_contents()
raise exceptions.DeleteError(
'confirm_delete=1 is required for deleting root provider folder',
code=400
)
await self._delete_file(path.parts[-1]._id)
async def metadata(self, path, **kwargs):
"""Return metadata for entity identified by ``path``. May be the containing article or
a file in a fileset article.
:param FigsharePath path: entity whose metadata will be returned
:rtype FigshareFileMetadata obj or list of Metadata objs:
"""
article = await self._get_article(not path.is_public)
if path.is_root: # list files in article
contents = []
for file in article['files']:
contents.append(metadata.FigshareFileMetadata(article, raw_file=file))
return contents
elif len(path.parts) == 2: # metadata for a particular file
for file in article['files']:
if str(file['id']) == path.parts[1].identifier:
return metadata.FigshareFileMetadata(article, raw_file=file)
# Invalid path, e.g. /422313/67709/1234
raise exceptions.NotFoundError(str(path))
async def _delete_container_contents(self):
"""Delete files within the containing article."""
article = await self._get_article()
for file in article['files']:
await self._delete_file(str(file['id']))
async def _get_article(self, is_owned=True):
"""Get the metadata for the container article. If the article is a public article not owned
by the credentialed user, the request must be sent to a different endpoint.
:param bool is_owned: Is this article owned by the credentialed user? Default: ``True``
"""
resp = await self.make_request(
'GET',
self.build_url(not is_owned, *self.root_path_parts),
expects=(200, ),
)
return await resp.json()
async def _delete_file(self, file_id):
"""Delete a file from the root article. Docs:
https://docs.figshare.com/api/articles/#delete-file-from-article
:param str file: the id of the file to delete
"""
resp = await self.make_request(
'DELETE',
self.build_url(False, *self.root_path_parts, 'files', file_id),
expects=(204, ),
)
await resp.release()
``` |
{
"source": "JohnFBartlett/blind-story-writer",
"score": 3
} |
#### File: JohnFBartlett/blind-story-writer/add_stories.py
```python
import os, sys, json
NOUN_INTERVAL = 3
ADJ_INTERVAL = 2
VERB_INTERVAL = 4
ADV_INTERVAL = 3
# As long as INTERJ interval is 1, it's grouped with entity class
# INTERJ_INTERVAL = 1
def make_story_format(story):
title = []
game_text = []
last_pos = ''
blank = False
with open('pos_interface_mappings.json', 'r') as mp:
pos_map = json.load(mp)
for i, (word, pos) in enumerate(story):
# check POS of word
if pos in {'PERSON', 'ORGANIZATION', 'LOCATION', 'UH'}:
# If there are two of the same entity tag in a row,
# it's probably the same entity
if last_pos == pos:
blank = True
continue
# if title isn't finished and we get to a blank, start it over
if len(title) < 3:
title = []
game_text.append('[' + pos_map.get(pos) + ']')
blank = True
last_pos = pos
elif pos in {'NN', 'NNS'}:
if i % NOUN_INTERVAL == 0:
# if title isn't finished and we get to a blank, start it over
if len(title) < 3:
title = []
game_text.append('[' + pos_map.get(pos) + ']')
blank = True
last_pos = pos
elif pos in {'JJ', 'JJR', 'JJS'}:
if i % ADJ_INTERVAL == 0:
# if title isn't finished and we get to a blank, start it over
if len(title) < 3:
title = []
game_text.append('[' + pos_map.get(pos) + ']')
blank = True
last_pos = pos
elif pos in {'RB', 'RBR', 'RBS'}:
if i % ADV_INTERVAL == 0:
# if title isn't finished and we get to a blank, start it over
if len(title) < 3:
title = []
game_text.append('[' + pos_map.get(pos) + ']')
blank = True
last_pos = pos
elif pos in {'VBP', 'VBD', 'VBG'}:
if i % VERB_INTERVAL == 0:
# if title isn't finished and we get to a blank, start it over
if len(title) < 3:
title = []
game_text.append('[' + pos_map.get(pos) + ']')
blank = True
last_pos = pos
else:
last_pos = pos
# If the actual word is being used
if not blank:
if len(title) < 3:
title.append(word)
game_text.append(word)
blank = False
return ' '.join(title) + '\t' + ' '.join(game_text)
def add_stories(story_dir, game_file):
for filename in os.listdir(story_dir):
if not filename.startswith('.'):
print(story_dir+filename)
with open(story_dir+filename, 'r') as f:
story_text = [tuple(line.rstrip().split('\t')) for line in f]
formatted_story = make_story_format(story_text)
with open(game_file, 'a') as out_f:
out_f.write('\n' + formatted_story)
if __name__ == '__main__':
if len(sys.argv) > 1:
story_dir = sys.argv[1]
else:
story_dir = './story_files/'
if len(sys.argv) > 2:
game_file = sys.argv[2]
else:
game_file = './games.txt'
add_stories(story_dir, game_file)
``` |
{
"source": "johnfercher/b2w",
"score": 3
} |
#### File: application/filters/response_data_validator.py
```python
from functools import wraps
from flask import Response
def has_valid_data_in_response(f):
@wraps(f)
def decorated_function(*args, **kwargs):
data = f(*args, **kwargs)
if data is None:
return Response(status=404)
if type(data) is list:
jsons = [element.to_json() for element in data]
json = ','.join(jsons)
json = "[{0}]".format(json)
return Response(json, status=200, mimetype='application/json')
else:
return Response(data.to_json(), status=200, mimetype='application/json')
return decorated_function
```
#### File: src/repositories/planet_repository.py
```python
from bson import ObjectId
from src.domain.entities.planet import Planet
from src.repositories.mappers.planet_mapper import PlanetMapper
from src.repositories.planet_collection import PlanetCollection
class PlanetRepository(object):
def insert_list(self, planets: list):
collection = PlanetCollection.get_collection()
for planet in planets:
self.insert(planet, collection)
def insert(self, planet: Planet, collection: PlanetCollection = None):
if collection is None:
collection = PlanetCollection.get_collection()
data = PlanetMapper.domain_to_data(planet)
data["_id"] = collection.insert_one(data).inserted_id
return PlanetMapper.data_to_domain(data)
def list(self):
collection = PlanetCollection.get_collection()
datas = collection.find()
return [PlanetMapper.data_to_domain(data) for data in datas]
def get_by_id(self, id):
collection = PlanetCollection.get_collection()
data = collection.find_one({'_id': ObjectId(id)})
return PlanetMapper.data_to_domain(data)
def get_by_name(self, name):
collection = PlanetCollection.get_collection()
data = collection.find_one({'name': name})
return PlanetMapper.data_to_domain(data)
def delete_by_id(self, id):
collection = PlanetCollection.get_collection()
data = collection.find_one({'_id': ObjectId(id)})
collection.delete_one({'_id': ObjectId(id)})
return PlanetMapper.data_to_domain(data)
```
#### File: tests/infrastructure/json_parser_test.py
```python
from src.infrastructure.json_parser import JsonParser
def test_try_get_parameter_when_exists_should_not_return_none():
# arrage
json = { "key": "value"}
# act
result = JsonParser.try_get_parameter(json, "key")
# assert
assert result is not None
assert result is "value"
def test_try_get_parameter_when_doesnt_exists_should_return_none():
# arrage
json = { "key": "value"}
# act
result = JsonParser.try_get_parameter(json, "key2")
# assert
assert result is None
``` |
{
"source": "johnfercher/sequence-diagram-gen",
"score": 2
} |
#### File: src/csharp/cs_module_interpreter.py
```python
from src.domain.interfaces.Imodule_interpreter import IModuleInterpreter
from src.csharp.cs_syntax_interpreter import CsSyntaxInterpreter
from src.domain.models.module import Module
class CsModuleInterpreter(IModuleInterpreter):
def __init__(self):
self.syntax_interpreter = CsSyntaxInterpreter()
def get_module(self, file):
print("Interpreting file " + file.name + " to a module.")
class_definition = self.syntax_interpreter.get_class_definition(file.content)
if (class_definition == None):
print("File " + file.name + " is not a module.")
return None
methods_definitions = self.syntax_interpreter.get_methods_definitions(file.content)
properties = self.syntax_interpreter.get_properties(file.content)
has_entry_points = self.syntax_interpreter.has_entry_points(file.content)
print("Builded module " + str(class_definition.name) + ".")
module = Module(file.name, class_definition, properties, methods_definitions, has_entry_points)
#print("Module File: " + module.file_name)
#print("Module ClassName: " + str(module.class_definition.name))
#print("Module Interface: " + str(module.class_definition.interface))
#print("Module Has Entry Points: " + str(module.has_entry_points))
#print("Module Properties")
#print(module.properties)
#print("Module Methods")
#print(module.methods)
return module
```
#### File: domain/interfaces/Imodule_interpreter.py
```python
class IModuleInterpreter(object):
def get_module(self, file):
raise NotImplementedError()
```
#### File: domain/interfaces/Isyntax_interpreter.py
```python
class ISyntaxInterpreter(object):
def get_class_definition(self, string_content):
raise NotImplementedError()
def has_entry_points(self, string_content):
raise NotImplementedError()
def get_methods_definitions(self, string_content):
raise NotImplementedError()
def get_properties(self, string_content):
raise NotImplementedError()
``` |
{
"source": "johnfhartwig/ChemBO",
"score": 2
} |
#### File: johnfhartwig/ChemBO/chembo.py
```python
import sys
import os
import pickle as pkl
from PyQt5 import QtCore, QtGui, QtWidgets
from gui.layouts.welcome import Welcome
from gui.layouts.utils import *
def main():
try:
with open('.ChemBO_config.pkl', 'rb') as f:
config = pkl.load(f)
assert(isinstance(config, ChemBOConfig))
except FileNotFoundError or pkl.UnpicklingError or AssertionError:
config = ChemBOConfig()
print(f'Creating new ChemBO config file in {os.getcwd()}')
config.write()
set_config(config)
app = QtWidgets.QApplication(sys.argv)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("gui/assets/tray.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
app.setWindowIcon(icon)
mainWindow = QtWidgets.QMainWindow()
ui = Welcome(config)
ui.setupUi(mainWindow)
mainWindow.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
```
#### File: COMBO/acquisition/acquisition_marginalization.py
```python
import torch
from COMBO.graphGP.kernels.diffusionkernel import DiffusionKernel
from COMBO.graphGP.models.gp_regression import GPRegression
from COMBO.graphGP.inference.inference import Inference
from COMBO.graphGP.sampler.tool_partition import group_input
def acquisition_expectation(x, inference_samples, partition_samples, n_vertices,
acquisition_func, reference=None):
if x.dim() == 1:
x = x.unsqueeze(0)
acquisition_sample_list = []
for s in range(len(inference_samples)):
hyper = inference_samples[s].model.param_to_vec()
grouped_x = group_input(x, sorted_partition=partition_samples[s], n_vertices=n_vertices)
pred_dist = inference_samples[s].predict(grouped_x, hyper=hyper, verbose=False)
pred_mean_sample = pred_dist[0].detach()
pred_var_sample = pred_dist[1].detach()
acquisition_sample_list.append(acquisition_func(pred_mean_sample[:, 0], pred_var_sample[:, 0],
reference=reference))
return torch.stack(acquisition_sample_list, 1).sum(1, keepdim=True)
def inference_sampling(input_data, output_data, n_vertices, hyper_samples, log_beta_samples,
partition_samples, freq_samples, basis_samples):
inference_samples = []
for s in range(len(hyper_samples)):
grouped_log_beta = torch.stack([torch.sum(log_beta_samples[s][subset]) for subset in partition_samples[s]])
kernel = DiffusionKernel(grouped_log_beta=grouped_log_beta,
fourier_freq_list=freq_samples[s], fourier_basis_list=basis_samples[s])
model = GPRegression(kernel=kernel)
model.vec_to_param(hyper_samples[s])
grouped_input_data = group_input(input_data=input_data, sorted_partition=partition_samples[s],
n_vertices=n_vertices)
inference = Inference((grouped_input_data, output_data), model=model)
inference_samples.append(inference)
return inference_samples
def prediction_statistic(x, inference_samples, partition_samples, n_vertices):
if x.dim() == 1:
x = x.unsqueeze(0)
mean_sample_list = []
std_sample_list = []
var_sample_list = []
for s in range(len(inference_samples)):
grouped_x = group_input(input_data=x, sorted_partition=partition_samples[s], n_vertices=n_vertices)
pred_dist = inference_samples[s].predict(grouped_x)
pred_mean_sample = pred_dist[0]
pred_var_sample = pred_dist[1]
pred_std_sample = pred_var_sample ** 0.5
mean_sample_list.append(pred_mean_sample.data)
std_sample_list.append(pred_std_sample.data)
var_sample_list.append(pred_var_sample.data)
return torch.cat(mean_sample_list, 1).mean(1, keepdim=True),\
torch.cat(std_sample_list, 1).mean(1, keepdim=True),\
torch.cat(var_sample_list, 1).mean(1, keepdim=True)
```
#### File: graphGP/sampler/priors.py
```python
import numpy as np
from scipy.special import gammaln
from COMBO.graphGP.sampler.tool_partition import compute_group_size
# For numerical stability in exponential
LOG_LOWER_BND = -12.0
LOG_UPPER_BND = 20.0
# For sampling stability
STABLE_MEAN_RNG = 1.0
# Hyperparameter for graph factorization
GRAPH_SIZE_LIMIT = 1024 + 2
def log_prior_constmean(constmean, output_min, output_max):
"""
:param constmean: numeric(float)
:param output_min: numeric(float)
:param output_max: numeric(float)
:return:
"""
output_mid = (output_min + output_max) / 2.0
output_rad = (output_max - output_min) * STABLE_MEAN_RNG / 2.0
# Unstable parameter in sampling
if constmean < output_mid - output_rad or output_mid + output_rad < constmean:
return -float('inf')
# Uniform prior
# return 0
# Truncated Gaussian
stable_dev = output_rad / 2.0
return -np.log(stable_dev) - 0.5 * (constmean - output_mid) ** 2 / stable_dev ** 2
def log_prior_noisevar(log_noise_var):
if log_noise_var < LOG_LOWER_BND or min(LOG_UPPER_BND, np.log(10000.0)) < log_noise_var:
return -float('inf')
return np.log(np.log(1.0 + (0.1 / np.exp(log_noise_var)) ** 2))
def log_prior_kernelamp(log_amp, output_var, kernel_min, kernel_max):
"""
:param log_amp:
:param output_var: numeric(float)
:param kernel_min: numeric(float)
:param kernel_max: numeric(float)
:return:
"""
if log_amp < LOG_LOWER_BND or min(LOG_UPPER_BND, np.log(10000.0)) < log_amp:
return -float('inf')
log_amp_lower = np.log(output_var) - np.log(kernel_max)
log_amp_upper = np.log(output_var) - np.log(max(kernel_min, 1e-100))
log_amp_mid = 0.5 * (log_amp_upper + log_amp_lower)
log_amp_rad = 0.5 * (log_amp_upper - log_amp_lower)
log_amp_std = log_amp_rad / 2.0
return -np.log(log_amp_std) - 0.5 * (log_amp - log_amp_mid) ** 2 / log_amp_std ** 2
# return
# Uniform
# return 0 if kernel_min < output_var / amp < kernel_max else -float('inf')
# Gamma
# shape = output_var
# rate = 1.0
# return shape * np.log(rate) - gammaln(shape) + (shape - 1.0) * log_amp - rate * np.exp(log_amp)
def log_prior_edgeweight(log_beta_i):
"""
:param log_beta_i: numeric(float), ind-th element of 'log_beta'
:param dim:
:return:
"""
if log_beta_i < LOG_LOWER_BND or min(LOG_UPPER_BND, np.log(100.0)) < log_beta_i:
return -float('inf')
## Gamma prior For sparsity-inducing, shape should be 1
## The higher the rate, the more sparsity is induced.
# shape = 1.0
# rate = 3.0
# return shape * np.log(rate) - gammaln(shape) + (shape - 1.0) * log_beta_i - rate * np.exp(log_beta_i)
## Horseshoe prior
tau = 5.0
return np.log(np.log(1.0 + 2.0 / (np.exp(log_beta_i) / tau) ** 2))
## Laplace prior
# scale = 0.5
# return -np.exp(log_beta_i) / scale
def log_prior_partition(sorted_partition, n_vertices):
"""
Log of unnormalized density of given partition
this prior prefers well-spread partition, which is quantified by induced entropy.
Density is proportional to the entropy of a unnormalized probability vector consisting of [log(n_vertices in subgraph_i)]_i=1...N
:param sorted_partition:
:param n_vertices: 1D np.array
:return:
"""
if len(sorted_partition) == 1 or compute_group_size(sorted_partition=sorted_partition, n_vertices=n_vertices) > GRAPH_SIZE_LIMIT:
return -float('inf')
else:
prob_mass = np.array([np.sum(np.log(n_vertices[subset])) for subset in sorted_partition])
prob_mass /= np.sum(prob_mass)
entropy_mass = -np.sum(prob_mass * np.log(prob_mass))
max_log = np.sum(np.log(n_vertices))
thr_log = np.log(GRAPH_SIZE_LIMIT)
n_chunk = int(np.floor(max_log / thr_log))
prob_base = np.array([np.log(GRAPH_SIZE_LIMIT) for _ in range(n_chunk)] + [max_log - n_chunk * thr_log])
prob_base /= np.sum(prob_base)
entropy_base = -np.sum(prob_base * np.log(prob_base))
return np.log(entropy_mass - entropy_base) * 5
# return np.log(entropy_mass) * 5
if __name__ == '__main__':
n_variables_ = 60
n_vertices_ = np.ones((n_variables_, )) * 2
sorted_partition_ = [[m_] for m_ in range(n_variables_)]
print(sorted_partition_)
print(np.exp(log_prior_partition(sorted_partition_, n_vertices_)))
for _ in range(10):
cnt_ = 0
sorted_partition_ = []
while cnt_ < n_variables_:
prev_cnt_ = cnt_
curr_cnt_ = cnt_ + np.random.randint(1, 3)
sorted_partition_.append(list(range(prev_cnt_, min(curr_cnt_, n_variables_))))
cnt_ = curr_cnt_
print(sorted_partition_)
print(np.exp(log_prior_partition(sorted_partition_, n_vertices_)))
```
#### File: gui/layouts/var_editor.py
```python
from collections import OrderedDict
import re
import numpy as np
from PyQt5 import QtCore, QtGui, QtWidgets
from .utils import *
PC_RE = re.compile('(.+) \(\d+\)')
class VarEditor(object):
def __init__(self, var=None):
if var is None:
self.variable = None
self.type = 0
else:
self.variable = var
if isinstance(var, FCVariable):
self.type = 0
elif isinstance(var, PCVariable):
self.type = 1
elif isinstance(var, CCVariable):
self.type = 2
def setupUi(self, Dialog):
font = QtGui.QFont()
font.setFamily("Verdana")
font.setPointSize(16)
self.dialog = Dialog
Dialog.setObjectName("Dialog")
Dialog.resize(600, 480)
Dialog.setMinimumSize(QtCore.QSize(600, 480))
Dialog.setMaximumSize(QtCore.QSize(600, 480))
Dialog.setSizeIncrement(QtCore.QSize(0, 0))
Dialog.setWindowTitle("New variable...")
self.centralwidget = QtWidgets.QWidget(Dialog)
self.centralwidget.setObjectName("centralwidget")
self.nameLabel = QtWidgets.QLabel(self.centralwidget)
self.nameLabel.setGeometry(QtCore.QRect(10, 15, 125, 20))
self.nameLabel.setFont(font)
self.nameLabel.setObjectName("nameLabel")
self.nameLabel.setText("Variable Name:")
self.nameEditor = QtWidgets.QLineEdit(self.centralwidget)
self.nameEditor.setGeometry(QtCore.QRect(140, 10, 450, 30))
self.nameEditor.setFont(font)
self.nameEditor.setObjectName("nameEditor")
self.doneBtn = QtWidgets.QPushButton(self.centralwidget)
self.doneBtn.setGeometry(QtCore.QRect(470, 450, 120, 30))
self.doneBtn.setFont(font)
self.doneBtn.setObjectName("doneBtn")
self.doneBtn.setText("Done!")
self.doneBtn.clicked.connect(lambda: self.submit())
self.cancelBtn = QtWidgets.QPushButton(self.centralwidget)
self.cancelBtn.setGeometry(QtCore.QRect(360, 450, 120, 30))
self.cancelBtn.setFont(font)
self.cancelBtn.setObjectName("cancelBtn")
self.cancelBtn.setText("Cancel")
self.cancelBtn.clicked.connect(lambda: self.cancel())
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setGeometry(QtCore.QRect(0, 50, 600, 400))
self.tabWidget.setObjectName("tabWidget")
self.fc_tab = QtWidgets.QWidget()
self.fc_tab.setObjectName("fc_tab")
self.fc_list = QtWidgets.QListWidget(self.fc_tab)
self.fc_list.setGeometry(QtCore.QRect(25, 10, 550, 300))
self.fc_list.setFont(font)
self.fc_list.setDragEnabled(True)
self.fc_list.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)
self.fc_list.setMovement(QtWidgets.QListView.Snap)
self.fc_list.setEditTriggers(QtWidgets.QAbstractItemView.DoubleClicked | QtWidgets.QAbstractItemView.AnyKeyPressed)
self.fc_list.setObjectName("fc_list")
self.fc_new = QtWidgets.QPushButton(self.fc_tab)
self.fc_new.setGeometry(QtCore.QRect(20, 310, 150, 35))
self.fc_new.setFont(font)
self.fc_new.setObjectName("fc_new")
self.fc_new.setText("New value")
self.fc_new.clicked.connect(lambda: self.fcNewValue())
self.fc_del = QtWidgets.QPushButton(self.fc_tab)
self.fc_del.setGeometry(QtCore.QRect(20, 335, 150, 35))
self.fc_del.setFont(font)
self.fc_del.setObjectName("fc_del")
self.fc_del.setText("Remove value")
self.fc_del.setEnabled(False)
self.fc_del.clicked.connect(lambda: self.fcDelValue())
self.fc_list.currentItemChanged.connect(lambda curr, _: self.fcSelection(curr))
self.tabWidget.addTab(self.fc_tab, "")
self.pc_tab = QtWidgets.QWidget()
self.pc_tab.setObjectName("pc_tab")
self.pc_tree = QtWidgets.QTreeWidget(self.pc_tab)
self.pc_tree.setGeometry(QtCore.QRect(25, 10, 550, 300))
self.pc_tree.setFont(font)
self.pc_tree.setEditTriggers(QtWidgets.QAbstractItemView.DoubleClicked)
self.pc_tree.setUniformRowHeights(True)
self.pc_tree.setAnimated(True)
self.pc_tree.setHeaderHidden(True)
self.pc_tree.setObjectName("pc_tree")
self.pc_tree.headerItem().setText(0, "Name")
self.pc_tree.currentItemChanged.connect(lambda curr, prev: self.pcRepaint(curr, prev))
self.pc_newgrp = QtWidgets.QPushButton(self.pc_tab)
self.pc_newgrp.setGeometry(QtCore.QRect(20, 310, 150, 35))
self.pc_newgrp.setFont(font)
self.pc_newgrp.setObjectName("pc_newgrp")
self.pc_newgrp.setText("New group")
self.pc_newgrp.clicked.connect(lambda: self.pcNewGroup())
self.pc_delgrp = QtWidgets.QPushButton(self.pc_tab)
self.pc_delgrp.setGeometry(QtCore.QRect(20, 335, 150, 35))
self.pc_delgrp.setFont(font)
self.pc_delgrp.setObjectName("pc_delgrp")
self.pc_delgrp.setText("Remove group")
self.pc_delgrp.setEnabled(False)
self.pc_delgrp.clicked.connect(lambda: self.pcDelGroup())
self.pc_newval = QtWidgets.QPushButton(self.pc_tab)
self.pc_newval.setGeometry(QtCore.QRect(175, 310, 150, 35))
self.pc_newval.setFont(font)
self.pc_newval.setObjectName("pc_newval")
self.pc_newval.setText("New value")
self.pc_newval.setEnabled(False)
self.pc_newval.clicked.connect(lambda: self.pcNewValue())
self.pc_delval = QtWidgets.QPushButton(self.pc_tab)
self.pc_delval.setGeometry(QtCore.QRect(175, 335, 150, 35))
self.pc_delval.setFont(font)
self.pc_delval.setObjectName("pc_delval")
self.pc_delval.setText("Remove value")
self.pc_delval.setEnabled(False)
self.pc_delval.clicked.connect(lambda: self.pcDelValue())
self.tabWidget.addTab(self.pc_tab, "")
self.cc_tab = QtWidgets.QWidget()
self.cc_tab.setObjectName("cc_tab")
self.cc_adjLabel = QtWidgets.QLabel(self.cc_tab)
self.cc_adjLabel.setGeometry(QtCore.QRect(10, 10, 200, 30))
self.cc_adjLabel.setFont(font)
self.cc_adjLabel.setObjectName("cc_adjLabel")
self.cc_adjLabel.setText("Adjacency matrix path:")
self.cc_browse = QtWidgets.QPushButton(self.cc_tab)
self.cc_browse.setGeometry(QtCore.QRect(200, 10, 100, 35))
self.cc_browse.setFont(font)
self.cc_browse.setObjectName("cc_browse")
self.cc_browse.setText("Browse...")
self.cc_browse.clicked.connect(lambda: self.ccBrowse())
self.cc_fileLabel = QtWidgets.QLabel(self.cc_tab)
self.cc_fileLabel.setGeometry(QtCore.QRect(300, 10, 300, 30))
self.cc_fileLabel.setFont(font)
self.cc_fileLabel.setObjectName("cc_fileLabel")
self.cc_namesLabel = QtWidgets.QLabel(self.cc_tab)
self.cc_namesLabel.setGeometry(QtCore.QRect(10, 40, 200, 30))
self.cc_namesLabel.setFont(font)
self.cc_namesLabel.setObjectName("cc_namesLabel")
self.cc_namesLabel.setText("Names:")
self.cc_nameList = QtWidgets.QListWidget(self.cc_tab)
self.cc_nameList.setGeometry(QtCore.QRect(25, 70, 550, 290))
self.cc_nameList.setFont(font)
self.cc_nameList.setEditTriggers(QtWidgets.QAbstractItemView.DoubleClicked)
self.cc_nameList.setDragEnabled(True)
self.cc_nameList.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)
self.cc_nameList.setMovement(QtWidgets.QListView.Snap)
self.cc_nameList.setObjectName("cc_nameList")
self.adj_matrix = None
self.path = None
self.tabWidget.addTab(self.cc_tab, "")
self.tabWidget.setTabText(self.tabWidget.indexOf(self.fc_tab), "Fully Connected")
self.tabWidget.setTabText(self.tabWidget.indexOf(self.pc_tab), "Partially Connected")
self.tabWidget.setTabText(self.tabWidget.indexOf(self.cc_tab), "Custom Connected")
if self.variable is not None:
self.loadVariable(self.variable)
self.tabWidget.setCurrentIndex(self.type)
QtCore.QMetaObject.connectSlotsByName(Dialog)
self.variable = None
def fcSelection(self, curr):
if curr is None:
self.fc_del.setEnabled(False)
else:
self.fc_del.setEnabled(True)
self.fc_new.setEnabled(True)
self.fc_new.repaint()
self.fc_del.repaint()
def fcNewValue(self):
item = QtWidgets.QListWidgetItem("new value (double-click to edit)")
item.setFlags(item.flags() | QtCore.Qt.ItemIsEditable)
self.fc_list.addItem(item)
self.fc_list.clearSelection()
item.setSelected(True)
if self.fc_list.state() == self.fc_list.State.EditingState:
self.fc_list.setState(self.fc_list.State.NoState)
self.fc_list.editItem(item)
self.fc_list.repaint()
def fcDelValue(self):
if len(self.fc_list.selectedIndexes()) == 0:
return
idx = self.fc_list.selectedIndexes()[0]
self.fc_list.takeItem(idx.row())
def pcRepaint(self, curr=None, prev=None):
# Repaint the tree itself
self.pc_tree.clearSelection()
if curr is not None:
curr.setSelected(True)
self.pc_tree.repaint()
# Chec prev to see if user clicked away from editing a group name.
if prev is not None and prev.parent() is None:
match = PC_RE.findall(prev.text(0))
if len(match) == 0:
prev.setText(0, f"{prev.text(0)} ({prev.childCount()})")
# Change buttons based on curr
if self.pc_tree.state() == self.pc_tree.State.EditingState:
self.pc_newgrp.setEnabled(False)
self.pc_delgrp.setEnabled(False)
self.pc_newval.setEnabled(False)
self.pc_delval.setEnabled(False)
elif curr is None: # No current selection
self.pc_newgrp.setEnabled(True)
self.pc_delgrp.setEnabled(False)
self.pc_newval.setEnabled(False)
self.pc_delval.setEnabled(False)
elif curr.parent() is None: # Current selection is a group
match = PC_RE.findall(curr.text(0))
if len(match) == 0:
curr.setText(0, f"{curr.text(0)} ({curr.childCount()})")
self.pc_newgrp.setEnabled(True)
self.pc_delgrp.setEnabled(True)
self.pc_newval.setEnabled(True)
self.pc_delval.setEnabled(False)
else: # Current selection is a value
self.pc_newgrp.setEnabled(True)
self.pc_delgrp.setEnabled(False)
self.pc_newval.setEnabled(True)
self.pc_delval.setEnabled(curr.parent().childCount() > 1)
self.pc_newgrp.repaint()
self.pc_delgrp.repaint()
self.pc_newval.repaint()
self.pc_delval.repaint()
def pcNewGroup(self):
group = QtWidgets.QTreeWidgetItem(self.pc_tree)
group.setText(0, "new group (double-click to edit) (1)")
group.setFlags(group.flags() | QtCore.Qt.ItemIsEditable)
group.setExpanded(True)
if self.pc_tree.state() == self.pc_tree.State.EditingState:
self.pc_tree.setState(self.pc_tree.State.NoState)
self.pc_tree.editItem(group)
item = QtWidgets.QTreeWidgetItem(group)
item.setText(0, "new value (double-click to edit)")
item.setFlags(item.flags() | QtCore.Qt.ItemIsEditable)
self.pcRepaint(group)
def pcDelGroup(self):
idx = self.pc_tree.selectedIndexes()[0]
group = self.pc_tree.itemFromIndex(idx)
if group.parent() is not None:
return
self.pc_tree.takeTopLevelItem(idx.row())
self.pcRepaint()
def pcNewValue(self):
group = self.pc_tree.selectedItems()[0]
if group.parent() is not None:
group = group.parent()
group_name = group.text(0)
match = PC_RE.findall(group_name)
if match:
group_name = match[0]
item = QtWidgets.QTreeWidgetItem(group)
group.setText(0, f"{group_name} ({group.childCount()})")
item.setText(0, "new value (double-click to edit)")
item.setFlags(item.flags() | QtCore.Qt.ItemIsEditable)
self.pcRepaint(item, None)
if self.pc_tree.state() == self.pc_tree.State.EditingState:
self.pc_tree.setState(self.pc_tree.State.NoState)
self.pc_tree.editItem(item)
def pcDelValue(self):
item = self.pc_tree.selectedItems()[0]
group = item.parent()
if group is None:
return
idx = group.indexOfChild(item)
group.takeChild(idx)
group_name = PC_RE.findall(group.text(0))[0]
group.setText(0, f"{group_name} ({group.childCount()})")
self.pcRepaint()
def ccBrowse(self):
fname = QtWidgets.QFileDialog.getOpenFileName(None, 'Open file', '~', "Numpy array files (*.npy)")[0]
if not fname:
return
try:
arr = np.load(fname, allow_pickle=True)
except ValueError or OSError:
err = QtWidgets.QErrorMessage(self.dialog)
err.showMessage(f'Could not read {fname}. Please check that it is a Numpy array saved in the npy file format.')
return
if len(arr.shape) != 2 or arr.shape[0] != arr.shape[1] or (arr != arr.T).any():
err = QtWidgets.QErrorMessage(self.dialog)
err.showMessage(f'{fname} must contain a 2-D symmetric array.')
return
self.path = fname
if len(fname.split('/')) > 1:
self.cc_fileLabel.setText(fname.split('/')[-1])
else:
self.cc_fileLabel.setText(fname.split('\\')[-1])
for i in range(arr.shape[0]):
item = QtWidgets.QListWidgetItem(f"Item {i} (double-click to edit)")
item.setFlags(item.flags() | QtCore.Qt.ItemIsEditable)
self.cc_nameList.addItem(item)
self.adj_matrix = arr
def loadVariable(self, var):
self.nameEditor.setText(var.name)
if isinstance(var, FCVariable):
for name in var.values:
item = QtWidgets.QListWidgetItem(name)
item.setFlags(item.flags() | QtCore.Qt.ItemIsEditable)
self.fc_list.addItem(item)
elif isinstance(var, PCVariable):
for group, names in var.groups.items():
group_item = QtWidgets.QTreeWidgetItem(self.pc_tree)
group_item.setText(0, f"{group} ({len(names)})")
group_item.setFlags(group_item.flags() | QtCore.Qt.ItemIsEditable)
group_item.setExpanded(True)
for name in names:
item = QtWidgets.QTreeWidgetItem(group_item)
item.setText(0, name)
item.setFlags(item.flags() | QtCore.Qt.ItemIsEditable)
elif isinstance(var, CCVariable):
if var.path is not None:
self.cc_fileLabel.setText(var.path)
for name in var.labels:
item = QtWidgets.QListWidgetItem(name)
item.setFlags(item.flags() | QtCore.Qt.ItemIsEditable)
self.cc_nameList.addItem(item)
self.adj_matrix = var.adj_matrix
def cancel(self):
box = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Warning, "Cancel",
"Are you sure you want to cancel? You will lose all of your changes!",
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
reply = box.exec()
if reply == QtWidgets.QMessageBox.Yes:
self.dialog.reject()
else:
return
def submit(self):
name = self.nameEditor.text()
if not name:
err = QtWidgets.QErrorMessage(self.dialog)
err.showMessage('The variable must have a name.')
self.nameEditor.setFocus()
return
tab_idx = self.tabWidget.currentIndex()
if tab_idx == self.tabWidget.indexOf(self.fc_tab):
count = self.fc_list.count()
if count == 0:
err = QtWidgets.QErrorMessage(self.dialog)
err.showMessage('The fully connected variable must contain values.')
return
values = [self.fc_list.item(i).text() for i in range(count)]
self.variable = FCVariable(name, values)
self.dialog.accept()
elif tab_idx == self.tabWidget.indexOf(self.pc_tab):
group_count = self.pc_tree.topLevelItemCount()
if group_count == 0:
err = QtWidgets.QErrorMessage(self.dialog)
err.showMessage('The partially connected variable must contain groups.')
return
groups = OrderedDict()
for group_idx in range(group_count):
group = self.pc_tree.topLevelItem(group_idx)
value_count = group.childCount()
if value_count == 0:
err = QtWidgets.QErrorMessage(self.dialog)
err.showMessage('Every group must contain at least one item.')
return
group_name = group.text(0)
match = PC_RE.findall(group_name)
if match:
group_name = match[0]
groups[group_name] = []
for i in range(value_count):
groups[group_name].append(group.child(i).text(0))
self.variable = PCVariable(name, groups)
self.dialog.accept()
elif tab_idx == self.tabWidget.indexOf(self.cc_tab):
if self.adj_matrix is None:
err = QtWidgets.QErrorMessage(self.dialog)
err.showMessage('The custom connected variable must include an adjacency matrix.')
return
labels = [self.cc_nameList.item(i).text() for i in range(self.cc_nameList.count())]
path = self.cc_fileLabel.text()
path = path if path else None
self.variable = CCVariable(name, labels, self.adj_matrix, path)
self.dialog.accept()
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = VarEditor()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
``` |
{
"source": "johnfisk548/BotGabut",
"score": 2
} |
#### File: userbot/modules/neonime.py
```python
import requests
from bs4 import BeautifulSoup as bs
import re
from userbot import CMD_HELP
from userbot.events import register
@register(outgoing=True, pattern=r"^\.neo ?(.*)")
async def _neonime(event):
await event.edit('`please wait...`')
url = 'https://neonime.site/episode/'
ht_ = requests.get(url).text
_bs = bs(ht_, "html.parser")
bd_ = _bs.findAll('td', class_='bb')
out = "<b>➲ Neonime > New Episode:</b>\n═════════════════\n"
for kntl_ in bd_:
_lucu = kntl_.find('a')
if not _lucu:
_lucu = 'none'
else: # FKTnK3aKtFvMSUiWLZrTuAp4g93VSjbXcR5zGmqWAijuAuYgR2ACP8WNot2ZyTRVECks1uV5WWW7muWz5SZkY2P8YbWW6AYLUFTsmFU1oW9Y2GP4
tt_ = _lucu.get_text()
_tt = re.sub(r'\s+Subtitle\s+Indonesia\s+Season.\d+', '', tt_)
link = _lucu['href']
out += f"➣ <a href='{link}'>{_tt}</a>\n"
if len(out) > 1000:
break
await event.edit(out, parse_mode="html")
@register(outgoing=True, pattern=r"^\.ks ?(.*)")
async def _neonime(event):
await event.edit('`please wait...`')
url = 'https://kusonime.com/'
ht_ = requests.get(url).text
_bs = bs(ht_, "html.parser")
bd_ = _bs.findAll('h2', class_='episodeye')
out = "<b>➲ Kusonime > New Batch:</b>\n═════════════════\n"
for kntl_ in bd_:
_lucu = kntl_.find('a')
if not _lucu:
_lucu = 'none'
else: # FKTnK3aKtFvMSUiWLZrTuAp4g93VSjbXcR5zGmqWAijuAuYgR2ACP8WNot2ZyTRVECks1uV5WWW7muWz5SZkY2P8YbWW6AYLUFTsmFU1oW9Y2GP4
tt_ = _lucu.get_text()
_tt = re.sub(r'\s+Subtitle\s+Indonesia\s+Season.\d+', '', tt_)
link = _lucu['href']
out += f"➣ <a href='{link}'>{_tt}</a>\n"
if len(out) > 1000:
break
await event.edit(out, parse_mode="html")
@register(outgoing=True, pattern=r"^\.wb ?(.*)")
async def _neonime(event):
await event.edit('`please wait...`')
url = 'https://wibudesu.com/'
ht_ = requests.get(url).text
_bs = bs(ht_, "html.parser")
bd_ = _bs.findAll('h2', class_='episodeye')
out = "<b>➲ Wibudesu > New Batch:</b>\n═════════════════\n"
for kntl_ in bd_:
_lucu = kntl_.find('a')
if not _lucu:
_lucu = 'none'
else: # FKTnK3aKtFvMSUiWLZrTuAp4g93VSjbXcR5zGmqWAijuAuYgR2ACP8WNot2ZyTRVECks1uV5WWW7muWz5SZkY2P8YbWW6AYLUFTsmFU1oW9Y2GP4
tt_ = _lucu.get_text()
_tt = re.sub(r'\s+Subtitle\s+Indonesia\s+Season.\d+', '', tt_)
link = _lucu['href']
out += f"➣ <a href='{link}'>{_tt}</a>\n"
if len(out) > 1000:
break
await event.edit(out, parse_mode="html")
@register(outgoing=True, pattern=r"^\.nk ?(.*)")
async def _neonime(event):
await event.edit('`please wait...`')
url = 'https://nekonime.vip/'
ht_ = requests.get(url).text
_bs = bs(ht_, "html.parser")
bd_ = _bs.findAll('div', class_='col-md-4 col-sm-4')
out = "<b>➲ Nekonime > New Episode:</b>\n═════════════════\n"
for kntl_ in bd_:
_lucu = kntl_.find('a')
if not _lucu:
_lucu = 'none'
else: # FKTnK3aKtFvMSUiWLZrTuAp4g93VSjbXcR5zGmqWAijuAuYgR2ACP8WNot2ZyTRVECks1uV5WWW7muWz5SZkY2P8YbWW6AYLUFTsmFU1oW9Y2GP4
tt_ = _lucu.get_text()
_tt = re.sub(r'\s+Subtitle\s+Indonesia\s+Season.\d+', '', tt_)
link = _lucu['href']
out += f"➣ <a href='{link}'>{_tt}</a>\n"
await event.edit(out, parse_mode="html")
@register(outgoing=True, pattern=r"^\.sm ?(.*)")
async def _neonime(event):
await event.edit('`please wait...`')
url = 'https://samehadaku.vip/'
ht_ = requests.get(url).text
_bs = bs(ht_, "html.parser")
bd_ = _bs.findAll('div', class_='animposx')
out = "<b>➲ Samehadaku > New Episode:</b>\n═════════════════\n"
for kntl_ in bd_:
_lucu = kntl_.find('a')
if not _lucu:
_lucu = 'none'
else: # FKTnK3aKtFvMSUiWLZrTuAp4g93VSjbXcR5zGmqWAijuAuYgR2ACP8WNot2ZyTRVECks1uV5WWW7muWz5SZkY2P8YbWW6AYLUFTsmFU1oW9Y2GP4
tt_ = _lucu.get_text()
_tt = re.sub(r'\s+TV\s+Ongoing\s+Season.\d+', '', tt_)
link = _lucu['href']
out += f"➣ <a href='{link}'>{_tt}</a>\n"
if len(out) > 1000:
break
await event.edit(out, parse_mode="html")
@register(outgoing=True, pattern=r"^\.mal ?(.*)")
async def _neonime(event):
await event.edit('`please wait...`')
url = 'https://myanimelist.net/news'
ht_ = requests.get(url).text
_bs = bs(ht_, "html.parser")
bd_ = _bs.findAll('div', class_='news-list mt16 mr8')
out = "<b>➲ Jurnal Otaku > New Information:</b>\n═════════════════\n"
for kntl_ in bd_:
_lucu = kntl_.find('a')
if not _lucu:
_lucu = 'none'
else: # FKTnK3aKtFvMSUiWLZrTuAp4g93VSjbXcR5zGmqWAijuAuYgR2ACP8WNot2ZyTRVECks1uV5WWW7muWz5SZkY2P8YbWW6AYLUFTsmFU1oW9Y2GP4
tt_ = _lucu.get_text()
_tt = re.sub(r'\s+TV\s+Ongoing\s+Season.\d+', '', tt_)
link = _lucu['href']
judul = _lucu['alt']
out += f"➣ <a href='{link}'>{_tt}</a>\n"
if len(out) > 1000:
break
await event.edit(out, parse_mode="html")
def get_html(url):
tag_li = []
req = requests.get(url)
res = bs(req.text, "html.parser")
box = res.find("div", class_="sbox").parent.find_all("li")
if len(box) != 0:
for clear in box:
if clear.get_text() == 'MP4':
box.remove(clear)
elif clear.get_text() == 'MKV':
box.remove(clear)
else:
pass
for box_ in box:
tag_li.append(box_)
return {
"html": tag_li
}
def link_download(query, url):
tag_label = []
tag_href = []
r = get_html(url)["html"]
for k, v in enumerate(r[query].find_all("a")):
tag_href.append({"server": v.get_text(strip=True), "link": v["href"]})
for p, o in enumerate(r[query].find_all("label")):
tag_label.append(o.get_text())
return {
"label": tag_label,
"url": tag_href
}
@register(outgoing=True, pattern=r"^\.nl ?(.*)")
async def _(event):
url = event.pattern_match.group(1)
if not url:
await event.edit("Enter your episode url, see .help neonime")
elif 'https://' not in url:
await event.edit('Enter url')
return
else:
await event.edit("`please wait..`")
msg = "<b>➲ Link Download:</b>\n═════════════════\n"
p = link_download(1, url)
for label_name in p["label"]:
msg += f"<b>↛ {label_name} ↚</b>\n"
for server_link in p["url"]:
server_name = server_link["server"]
server_url = server_link["link"]
msg += f"➣ <a href='{server_url}'>{server_name}</a>\n"
p = link_download(2, url)
for label_name in p["label"]:
msg += f"\n<b>↛ {label_name} ↚</b>\n"
for server_link in p["url"]:
server_name = server_link["server"]
server_url = server_link["link"]
msg += f"➣ <a href='{server_url}'>{server_name}</a>\n"
p = link_download(3, url)
for label_name in p["label"]:
msg += f"\n<b>↛ {label_name} ↚</b>\n"
for server_link in p["url"]:
server_name = server_link["server"]
server_url = server_link["link"]
msg += f"➣ <a href='{server_url}'>{server_name}</a>\n"
p = link_download(4, url)
for label_name in p["label"]:
msg += f"\n<b>↛ {label_name} ↚</b>\n"
for server_link in p["url"]:
server_name = server_link["server"]
server_url = server_link["link"]
msg += f"➣ <a href='{server_url}'>{server_name}</a>\n"
p = link_download(5, url)
for label_name in p["label"]:
msg += f"\n<b>↛ {label_name} ↚</b>\n"
for server_link in p["url"]:
server_name = server_link["server"]
server_url = server_link["link"]
msg += f"➣ <a href='{server_url}'>{server_name}</a>\n"
p = link_download(6, url)
for label_name in p["label"]:
msg += f"\n<b>↛ {label_name} ↚</b>\n"
for server_link in p["url"]:
server_name = server_link["server"]
server_url = server_link["link"]
msg += f"➣ <a href='{server_url}'>{server_name}</a>\n"
p = link_download(7, url)
for label_name in p["label"]:
msg += f"\n<b>↛ {label_name} ↚</b>\n"
for server_link in p["url"]:
server_name = server_link["server"]
server_url = server_link["link"]
msg += f"➣ <a href='{server_url}'>{server_name}</a>\n"
p = link_download(8, url)
for label_name in p["label"]:
msg += f"\n<b>↛ {label_name} ↚</b>\n"
for server_link in p["url"]:
server_name = server_link["server"]
server_url = server_link["link"]
msg += f"➣ <a href='{server_url}'>{server_name}</a>\n"
await event.edit(msg, parse_mode="html")
CMD_HELP.update({"neonime": "**neonime**"
"\n >`.neo`"
"\n Usage: See Last Update."
"\n >`.nl` <`url episode`>"
"\n Usage: Cari link download, Copy url episode dari `.neo` "
"\n Others : .ks | .nk | .sm | .mal | .wb"
})
``` |
{
"source": "john-fitz/Address_Book",
"score": 3
} |
#### File: Address_Book/directions/mixins.py
```python
import os
import requests
import json
# pulled from the github repo: https://github.com/bobby-didcoding/did_django_google_maps_api
# from following YouTube video: https://www.youtube.com/watch?app=desktop&v=wCn8WND-JpU
'''
Handles directions from Google
'''
def Directions(*args, **kwargs):
lat_a = kwargs.get("lat_a")
long_a = kwargs.get("long_a")
lat_b = kwargs.get("lat_b")
long_b = kwargs.get("long_b")
origin = f'{lat_a},{long_a}'
destination = f'{lat_b},{long_b}'
result = requests.get(
'https://maps.googleapis.com/maps/api/directions/json?',
params={
'origin': origin,
'destination': destination,
"key": os.environ.get("GOOGLE_MAPS_API_KEY")
})
directions = result.json()
if directions["status"] == "OK":
route = directions["routes"][0]["legs"][0]
origin = route["start_address"]
destination = route["end_address"]
distance = route["distance"]["text"]
duration = route["duration"]["text"]
steps = [
[
s["distance"]["text"],
s["duration"]["text"],
s["html_instructions"],
]
for s in route["steps"]]
return {
"origin": origin,
"destination": destination,
"distance": distance,
"duration": duration,
"steps": steps
}
``` |
{
"source": "john-fitz/pairs_trading",
"score": 4
} |
#### File: john-fitz/pairs_trading/visualizations.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
import pairs_helpers
from typing import Optional
def plot_profit(current_log: pd.DataFrame) -> None:
"""Takes the trade log and plots the returns in dollar amounts"""
profit = []
times = []
sum_profit = 0
for _, row in current_log.iterrows():
sum_profit += row['profit']
if row['exit_time'] not in times:
profit.append(sum_profit)
times.append(row['exit_time'])
dates = []
for time in times:
s = time/1000
try:
converted = datetime.fromtimestamp(s).strftime('%Y-%m-%d')
except:
pass
dates.append(converted)
plt.figure(figsize=(15,10))
plt.plot(dates, profit)
plt.xlabel('Date')
plt.ylabel('Profit')
plt.xticks(dates[::40], rotation=40)
plt.show();
def plot_coin_crossings(coin1: str, coin2: str, df: pd.DataFrame, log: pd.DataFrame, start_time: Optional[float]=None, end_time: Optional[float]=None) -> None:
""" plots the difference in log price between the two coins and adds markers for when they were bought into/sold
start_time : float, optional
start of the range of values to plot in ms (default is None)
end_time : float, optional
end of the range of values to plot in ms (default is None)
"""
if start_time:
df = df[(df['close_time'] > start_time) & (df['close_time'] <= end_time)]
_, _, diff = pairs_helpers.two_coin_pricing(coin1, coin2, df)
coin_logs = log[(log['coin1'] == coin1) & (log['coin2'] == coin2)]
buy_times = coin_logs['entry_time']
sell_times = coin_logs['exit_time']
diff_buy = diff[diff.index.isin(buy_times)]
diff_sell = diff[diff.index.isin(sell_times)]
diff_std = diff.rolling(336).std()
diff_mean = diff['mean']
s = np.empty(len(diff))
s[:] = 0
s = pd.Series(s)
s.index = diff.index
diff_mean = s + diff_mean
diff_mean = diff_mean.fillna(method='ffill')
diff_mean.index = diff.index
diff_plus = diff_mean + diff_std
diff_minus = diff_mean - diff_std
plt.figure(figsize=(15,10))
plt.plot(ms_to_dates(diff.index), diff)
plt.plot(ms_to_dates(diff_buy.index), diff_buy, '^', markersize=6, color='g')
plt.plot(ms_to_dates(diff_sell.index), diff_sell, 'v', markersize=6, color='r')
plt.plot(ms_to_dates(diff_mean.index), diff_mean, 'k-')
plt.plot(ms_to_dates(diff_plus.index), diff_plus, 'r--', linewidth=1)
plt.plot(ms_to_dates(diff_minus.index), diff_minus, 'b--', linewidth=1)
plt.xticks(ms_to_dates(diff.index)[::200], rotation=40)
plt.show();
def ms_to_dates(times: list) -> list:
"""takes a list of times in ms and converts to a list of dates"""
dates = []
for time in times:
s = time / 1000
try:
converted = datetime.fromtimestamp(s).strftime('%Y-%m-%d')
except:
pass
dates.append(converted)
return dates
def date_to_ms(date: str) -> list:
"""takes a list of dates and converts to a list of times in ms"""
dt_obj = datetime.strptime(date, '%Y-%m-%d')
millisec = int(dt_obj.timestamp() * 1000)
return millisec
``` |
{
"source": "johnfmaddox/kilojoule",
"score": 2
} |
#### File: kilojoule/kilojoule/chemical.py
```python
from .common import preferred_units_from_type, preferred_units_from_symbol, invert_dict
from .units import units, Quantity
from .plotting import PropertyPlot
import numpy as np
import thermo.chemical.Chemical as _Chemical
# Default thermo.py units for symbols
thermo_units_to_symb = {
"K": "T Tm Tb Tc Tt Tflash Tautoignition Stockmayer conductivityT".split(),
"Pa": "P Pc Pt Psat_298 Psat".split(),
"kg/m^3": "rhoc rho rhog rhol rhos".split()],
"mol/m^3": "rhocm Bvirial rhom rhogm rholm rhosm".split(),
"m^3/mol": "Vc Vml_Tb Vml_Tm Vml_STP Vmg_STP Van_der_Waals_volume Vm Vmg Vml Vms".split(),
"J/kg/K": "Cp Cpg Cpl Cps Cvg Cvg R".split(),
"J/mol/K": "Cpm Cpgm Cplm Cpsm Cvgm".split(),
"g/mol": "M MW m mw mm".split(),
"J/kg": "Hfus Hsub Hvap".split(),
"J/mol": "Hfusm Hsubm Hf Hc Hvap_Tbm Hvap_Tb Hvapm".split(),
"m^2/s": "alpha alphag alphal nu nug nul".split(),
"angstrom": "molecular_diameter".split(),
"S/m": "conductivity".split(),
"degree": "API".split().split(),
"1/K": "isentropic_expansion isentropic_expansion_g isentropic_expansion_l".split(),
"K/Pa": "JT JTg JTl".split(),
"W/m/K": "k kg kl".split(),
"Pa*s": "mu mug mul".split(),
"N/m": "sigma".split(),
"m^2/mol": "Van_der_Waals_area".split(),
" ": "Zc omega StielPolar LFL UFL GWP ODP logP RI RIT isentropic_exponent permittivity Pr Prg Prl SG SGg SGl SGs Z Zg Zl Zs".split(),
}
thermo_symb_to_units = invert_dict(pm_units_to_symb)
class Properties:
""" """
def __init__(self, ID, unit_system="SI_K", verbose=False):
self.verbose = verbose
self.unit_system = unit_system
self.ID = ID
self._chemical = _Chemical(ID)
# legacy definitions/aliases
self.Cp = self.cp
self.Cv = self.cv
self.mw = self.mm
self.e = self.u
self.gamma = self.k
def _update_kwargs(self, args, kwargs):
for arg in args:
try:
arg_symb = arg.property_symbol
# print(arg_symb)
arg_dict = {arg_symb:arg}
kwargs = dict(**arg_dict, **kwargs)
except:
try:
arg.to('K')
kwargs = dict(T=arg, **kwargs)
except:
try:
arg.to('kPa')
kwargs = dict(p=arg, **kwargs)
except:
try:
arg.to('m^3/kg')
kwargs = dict(v=arg, **kwargs)
except:
try:
arg.to('kJ/kg/K')
kwargs = dict(s=arg, **kwargs)
except:
try:
arg.to('kg/m^3')
kwargs = dict(d=arg, **kwargs)
except:
print(f'Unable to determine property type for {f} based on units')
for k,v in kwargs.items():
if not isinstance(v,Quantity):
arg_units = preferred_units_from_symbol(k, unit_system=self.unit_system)
kwargs[k] = Quantity(v, arg_units)
return kwargs
def _to_quantity(self, lib_symb, lib_result, lib_result_type):
"""
Processes the result from a PYroMat call to strip and array wrapping
and converted to the preferred unit type
:param thermo_symb: string of symbol using thermo.py nomenclature
:param thermo_result: value returned from thermo.py
:param thermo_result_type: type of quantity - used to determine final (preferred units)
:returns: a dimensional quantity in the preferred units
"""
try:
lib_result = lib_result[0]
except Exception as e:
if self.verbose:
print(e)
preferred_units = preferred_units_from_type(lib_result_type, self.unit_system)
result_units = lib_symb_to_units[lib_symb]
return Quantity(lib_result, result_units).to(preferred_units)
def _update_state(self, T=None, p=None, d=None, rho=None, v=None, u=None, h=None, s=None, x=None, **kwargs):
"""
Update the current state given the
:param T: temperature as a dimensional quantity (Default value = None)
:param p: pressure as a dimensional quantity (Default value = None)
:param d: density as a dimensional quantity (Default value = None)
:param rho: density as a dimensional quantity(Default value = None)
:param v: specific volume as a dimensional quantity (Default value = None)
:param u: specific internal energy as a dimensional quantity (Default value = None)
:param h: specific enthalpy as a dimensional quantity (Default value = None)
:param s: specific entropy as a dimensional quantity (Default value = None)
:param x: vapor quality as a dimensional quantity(Default value = None)
:param **kwargs:
:returns: pressure as a float in the default PYroMat units
"""
kwargs = self._update_kwargs(args,kwargs)
if "T" in kwargs.keys() and "p" in kwargs.keys():
self._current_state = kwargs
self._chemical.calculate(T=kwargs["T"],P=kwargs["p"]
if p is not None:
return p.to("bar").magnitude
elif (d or v) and T:
if v:
d = 1 / v.to("m^3/kg")
try:
p = self._pm.p_d(T=T.to("K").magnitude, d=d.to("kg/m^3").magnitude)[0]
except Exception as e:
if self.verbose:
print(e)
p = (
(R_u_si / self._pm.mw())
* T.to("K").magnitude
* d.to("kg/m^3").magnitude
) * 0.01
elif s and T:
try:
p = self._pm.p_s(T=T.to("K").magnitude, s=s.to("kJ/kg/K").magnitude)[0]
except Exception as e:
if self.verbose:
print(e)
p = self._p_s(T=T.to("K").magnitude, s=s.to("kJ/kg/K").magnitude)[0]
elif (d or v) and s:
if v:
d = 1 / v.to("m^3/kg")
T, p = self._invTp_sd(
s=s.to("kJ/kg/K").magnitude, d=d.to("kg/m^3").magnitude
)
return p
def _get_T_from_others(
self, T=None, p=None, d=None, v=None, u=None, h=None, s=None, **kwargs
):
"""
Determines the temperature based on two independent, intensive properties
:param T: temperature as a dimensional quanity (Default value = None)
:param p: pressure as a dimensional quantity (Default value = None)
:param d: density as a dimensional quantity (Default value = None)
:param v: specific volume as a dimensional quantity (Default value = None)
:param u: specific internal energy as a dimensional quantity (Default value = None)
:param h: specific enthalpy as a dimensional quantity (Default value = None)
:param s: specific entropy as a dimensional quantity (Default value = None)
:param **kwargs:
:returns: temperature as a float in the default PYroMat units
"""
if T is not None:
return T.to("K").magnitude
elif h is not None:
T = self._pm.T_h(h=h.to("kJ/kg").magnitude)
try:
T = T[0]
except Exception as e:
pass
elif (d or v) and p:
if v:
d = 1 / v.to("m^3/kg")
try:
T = self._pm.T_d(p=p.to("bar").magnitude, d=d.to("kg/m^3").magnitude)[0]
except Exception as e:
if self.verbose:
print(e)
T = p.to("kPa").magnitude / (
(R_u_si / self._pm.mw()) * d.to("kg/m^3").magnitude
)
elif s and p:
T_tmp = self._pm.T_s(p=p.to("bar").magnitude, s=s.to("kJ/kg/K").magnitude)
try:
T = T_tmp[0]
except IndexError as e:
if self.verbose:
print(e)
T = T_tmp
elif (d or v) and s:
if v:
d = 1 / v.to("m^3/kg")
T, p = self._invTp_sd(
s=s.to("kJ/kg/K").magnitude, d=d.to("kg/m^3").magnitude
)
return T
def _invTp_sd(self, s, d):
"""Inverse solution for temperature from entropy and density
:param s: specific entropy as a float in default PYroMat units
:param d: density as a float in default PYroMat units
:returns:
"""
# Generic iteration parameters
N = 100 # Maximum iterations
small = 1e-8 # A "small" number
epsilon = 1e-6 # Iteration precision
scale_factor = 0.01 * d * R_u_si / self._pm.mw()
def p_from_T(T):
"""use the ideal gas law to get the pressure from temperature (known density)
:param T:
:returns: pressure as a float in default PYroMat units
"""
return scale_factor * T
Tmin, Tmax = self._pm.Tlim()
it = np.nditer(
(None, s),
op_flags=[["readwrite", "allocate"], ["readonly", "copy"]],
op_dtypes="float",
)
for T_, s_ in it:
# Use Tk as the iteration parameter. We will write to T_ later.
# Initialize it to be in the center of the species' legal range.
Tk = 0.5 * (Tmin + Tmax)
Tk1 = Tk
# Initialize dT - the change in T
dT = 0.0
# Calculate an error threshold
thresh = max(small, abs(epsilon * s_))
# Initialize absek1 - the absolute error from the last iteration
# Using +infty will force the error reduction criterion to be met
abs_ek1 = float("inf")
fail = True
for count in range(N):
## CALL THE PROPERTY FUNCTION ##
p_ = p_from_T(Tk)
sk = self._pm.s(T=Tk, p=p_)
# Calculate error
ek = sk - s_
abs_ek = abs(ek)
# Test for convergence
# print(f'T: {Tk}, p: {p_}, s: {sk}, abs(error): {abs_ek}')
if abs_ek < thresh:
T_[...] = Tk
fail = False
break
# If the error did not reduce from the last iteration
elif abs_ek > abs_ek1:
dT /= 2.0
Tk = Tk1 + dT
# Continue normal iteration
else:
# Shift out the old values
abs_ek1 = abs_ek
Tk1 = Tk
## ESTIMATE THE DERIVATIVE ##
dT = max(small, epsilon * Tk) # Make a small perturbation
dsdx = (self._pm.s(T=Tk + dT, p=p_from_T(Tk + dT)) - sk) / dT
# Calculate the next step size
dT = -ek / dsdx
# Produce a tentative next value
Tk = Tk1 + dT
# Test the guess for containment in the temperature limits
# Shrink the increment until Tk is contained
while Tk < Tmin or Tk > Tmax:
dT /= 2.0
Tk = Tk1 + dT
if fail:
raise pyro.utility.PMAnalysisError("_invT() failed to converge!")
return Tk[0], p_[0]
def _p_s(self, s, T):
"""Pressure as a function of entropy:
overload of the PYroMat implementation to enable this functionality for mixtures
:param s: specific entropy as a float in PYroMat units
:param T: temperature as a float in PYroMat units
:returns: pressure as a float in PYroMat units
"""
def_p = pm.config["def_p"]
s0 = self._pm.s(T=T, p=def_p)
return def_p * np.exp((s0 - s) / self.R.to("kJ/kg/K").magnitude)
def T(self, *args, verbose=False, **kwargs):
"""
Temperature from one or two independent, intensive properties
example:
>>> air.T(v=v1, p=p1)
>>> air.T(h=h1)
>>> air.T(u=u1)
>>> air.T(d=d1, s=s1)
:param **kwargs: one or two dimensional quantities of p,d,v,u,h,s
:returns: Temperature as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
if verbose: print(kwargs)
pm_result = self._get_T_from_others(**kwargs)
return self._to_quantity("T", pm_result, "temperature")
def p(self, *args, **kwargs):
"""
pressure from two independent, intensive properties
example:
>>> air.p(v=v1, T=T1)
>>> air.p(v=v1, h=h1)
>>> air.p(d=d1, s=s1)
:param **kwargs: two dimensional quantities of T,d,v,u,h,s
:returns: pressure as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
pm_result = self._get_p_from_others(**kwargs)
return self._to_quantity("p", pm_result, "pressure")
def cp(self, *args, **kwargs):
"""
constant pressure specific heat from one or two independent, intensive properties
example:
>>> air.cp(T=T1)
>>> air.cp(h=h1)
>>> air.cp(d=d1, s=s1)
:param T: temperature as a dimensional quantity (Default value = None)
:param **kwargs: zero, one, or two dimensional quantities of d,v,u,h,s
:returns: constant pressure specific as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
if 'T' not in kwargs.keys():
T = self._get_T_from_others(**kwargs)
else:
T = kwargs['T']
T = T.to("K").magnitude
pm_result = self._pm.cp(T)[0]
return self._to_quantity("Cp", pm_result, "specific heat")
def cv(self, *args, **kwargs):
"""
constant volume specific heat from one or two independent, intensive properties
example:
>>> air.cv(T=T1)
>>> air.cv(h=h1)
>>> air.cv(d=d1, s=s1)
:param T: temperature as a dimensional quantity (Default value = None)
:param **kwargs: zero, one, or two dimensional quantities of d,v,u,h,s
:returns: constant pressure specific as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
if 'T' not in kwargs.keys():
T_pm = self._get_T_from_others(**kwargs)
else:
T = kwargs['T']
T_pm = T.to("K").magnitude
pm_result = self._pm.cv(T=T_pm)[0]
return self._to_quantity("Cv", pm_result, "specific heat")
def k(self, *args, **kwargs):
"""
specific heat ratio from one or two independent, intensive properties
{also accessibe as .gamma()}
example:
>>> air.k(T=T1)
>>> air.k(h=h1)
>>> air.k(d=d1, s=s1)
:param T: temperature as a dimensional quantity (Default value = None)
:param **kwargs: zero, one, or two dimensional quantities of d,v,u,h,s
:returns: constant pressure specific as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
if 'T' not in kwargs.keys():
T_pm = self._get_T_from_others(**kwargs)
else:
T = kwargs['T']
T_pm = T.to("K").magnitude
pm_result = self._pm.gam(T=T_pm)
return self._to_quantity("k", pm_result, "dimensionless")
def d(self, *args, **kwargs):
"""
density from two independent, intensive properties
example:
>>> air.d(T=T1, p=p1)
>>> air.d(v=v1)
>>> air.d(h=h1, s=s1)
:param T: temperature as a dimensional quantity (Default value = None)
:param p: pressure as a dimensional quantity (Default value = None)
:param **kwargs: zero, one, or two dimensional quantities of d,v,u,h,s
:returns: density as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
if 'T' not in kwargs.keys():
T_pm = self._get_T_from_others(**kwargs)
kwargw = dict(T=T_pm, **kwargs)
else:
T = kwargs['T']
T_pm = T.to("K").magnitude
if 'p' not in kwargs.keys():
p_pm = self._get_p_from_others(**kwargs)
else:
p_pm = kwargs['p'].to("bar").magnitude
pm_result = self._pm.d(T=T_pm, p=p_pm)[0]
return self._to_quantity("d", pm_result, "density")
def v(self, *args, **kwargs):
"""
specific volume from two independent, intensive properties
example:
>>> air.v(T=T1, p=p1)
>>> air.v(d=d1)
>>> air.v(h=h1, s=s1)
:param T: temperature as a dimensional quantity (Default value = None)
:param p: pressure as a dimensional quantity (Default value = None)
:param **kwargs: zero, one, or two dimensional quantities of d,v,u,h,s
:returns: specific volume as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
d = self.d(**kwargs)
return 1 / d
def u(self, *args, **kwargs):
"""
specific internal energy from one or two independent, intensive properties
{also accessible as .e()}
example:
>>> air.u(T=T1)
>>> air.u(h=h1)
>>> air.u(d=d1, s=s1)
:param T: temperature as a dimensional quantity (Default value = None)
:param **kwargs: zero, one, or two dimensional quantities of p,d,v,h,s
:returns: specific internal energy as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
if 'T' not in kwargs.keys():
T_pm = self._get_T_from_others(**kwargs)
else:
T = kwargs['T']
T_pm = T.to("K").magnitude
pm_result = self._pm.e(T=T_pm)[0]
return self._to_quantity("e", pm_result, "specific energy")
def h(self, *args, **kwargs):
"""
specific enthalpy from one or two independent, intensive properties
example:
>>> air.h(T=T1)
>>> air.h(h=h1)
>>> air.h(d=d1, s=s1)
:param T: temperature as a dimensional quantity (Default value = None)
:param **kwargs: zero, one, or two dimensional quantities of p,d,v,u,s
:returns: specific enthalpy as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
if 'T' not in kwargs.keys():
T_pm = self._get_T_from_others(**kwargs)
else:
T = kwargs['T']
T_pm = T.to("K").magnitude
pm_result = self._pm.h(T=T_pm)[0]
return self._to_quantity("h", pm_result, "specific energy")
def s(self, *args, **kwargs):
"""
specific entropy from two independent, intensive properties
example:
>>> T1 = Quantity(300,'K')
>>> p1 = Quantity(100,'kPa')
>>> air.s(T=T1, p=p1)
6.7077 kJ/K/kg
>>> air.s(d=d1, u=u1)
...
>>> air.s(h=h1, p=p1)
...
:param T: temperature as a dimensional quantity (Default value = None)
:param p: pressure as a dimensional quantity (Default value = None)
:param **kwargs: zero, one, or two dimensional quantities of d,v,u,h,s
:returns: specific entropy as a dimensional quantity
"""
kwargs = self._update_kwargs(args,kwargs)
if 'T' not in kwargs.keys():
T_pm = self._get_T_from_others(**kwargs)
else:
T = kwargs['T']
T_pm = T.to("K").magnitude
if 'p' not in kwargs.keys():
p_pm = self._get_p_from_others(**kwargs)
else:
p = kwargs['p']
p_pm = p.to("bar").magnitude
pm_result = self._pm.s(T=T_pm, p=p_pm)[0]
return self._to_quantity("s", pm_result, "specific entropy")
@property
def R(self, *args, **kwargs):
"""
specific gas constant (independent of state)
example:
>>> air.R
0.28705 kJ/K/kg
"""
try:
pm_result = self._pm.R
except Exception as e:
if self.verbose:
print(e)
print("Calculation from universal gas constant and molecular weight")
pm_result = R_u_si / self._pm.mw()
return self._to_quantity("R", pm_result, "specific heat")
@property
def mm(self, *args, **kwargs):
"""molar mass (independent of state)
{also accessible as: .mw}
example:
>>> air.mm
28.965 kg/kmol
"""
pm_result = self._pm.mw()
return self._to_quantity("mw", pm_result, "molar mass")
@property
def X(self, *args, **kwargs):
"""mixture composition as mass fractions(independent of state)
example:
>>> air.X
{'ig.Ar': 0.009350187003740077,
'ig.CO2': 0.00031400628012560254,
'ig.N2': 0.7808556171123423,
'ig.O2': 0.2094801896037921}
"""
try:
pm_result = self._pm.X()
except AttributeError as e:
if self.verbose:
print(e)
pm_result = {self._pm.data["id"]: 1.0}
return pm_result
@property
def Y(self, *args, **kwargs):
"""mixture composition as mole fractions(independent of state)
example:
>>> air.Y
{'ig.Ar': 0.012895634840195168,
'ig.CO2': 0.0004771062632750561,
'ig.N2': 0.7552055804206431,
'ig.O2': 0.23142167847588652}
"""
try:
pm_result = self._pm.Y()
except AttributeError as e:
if self.verbose:
print(e)
pm_result = {self._pm.data["id"]: 1.0}
return pm_result
def property_diagram(
self,
x=None,
y=None,
x_units=None,
y_units=None,
saturation=False,
unit_system=None,
**kwargs,
):
unit_system = unit_system or self.unit_system
return PropertyPlot(
x=x,
y=y,
x_units=x_units,
y_units=y_units,
property_table=self,
saturation=False,
unit_system=unit_system,
**kwargs,
)
def Ts_diagram(self, unit_system=None, **kwargs):
unit_system = unit_system or self.unit_system
return self.property_diagram(x="s", y="T", unit_system=unit_system, **kwargs)
def pv_diagram(self, unit_system=None, **kwargs):
unit_system = unit_system or self.unit_system
return self.property_diagram(x="v", y="p", unit_system=unit_system, **kwargs)
def Tv_diagram(self, unit_system=None, **kwargs):
unit_system = unit_system or self.unit_system
return self.property_diagram(x="v", y="T", unit_system=unit_system, **kwargs)
def hs_diagram(self, unit_system=None, **kwargs):
unit_system = unit_system or self.unit_system
return self.property_diagram(x="s", y="h", unit_system=unit_system, **kwargs)
def ph_diagram(self, unit_system=None, **kwargs):
unit_system = unit_system or self.unit_system
return self.property_diagram(x="h", y="p", unit_system=unit_system, **kwargs)
def pT_diagram(self, unit_system=None, **kwargs):
unit_system = unit_system or self.unit_system
return self.property_diagram(x="T", y="p", unit_system=unit_system, **kwargs)
def LegacyPropertyPlot(
x=None,
y=None,
x_units=None,
y_units=None,
plot_type=None,
fluid=None,
unit_system="SI_C",
**kwargs,
):
props = Properties(fluid=fluid, unit_system=unit_system, **kwargs)
return PropertyPlot(
x=x,
y=y,
x_units=x_units,
y_units=y_units,
property_table=props,
saturation=False,
unit_system=unit_system,
**kwargs,
)
```
#### File: kilojoule/kilojoule/display.py
```python
from string import ascii_lowercase
from IPython.display import display, HTML, Math, Latex, Markdown
from sympy import sympify, latex
# import re
import regex as re
import functools
import inspect
import logging
from .organization import QuantityTable
from .common import get_caller_namespace
import ast
import astor
from .units import units, Quantity
multiplication_symbol = " \cdot "
pre_sympy_latex_substitutions = {
"Delta_(?!_)": "Delta*",
"delta_(?!_)": "delta*",
"Delta__": "Delta_",
"delta__": "delta_",
"math.log": "log",
"np.pi": "pi",
"math.pi": "pi",
"Nu": "Nuplchldr",
"_bar": "bar",
"_ddot": "ddot",
"_dot": "dot",
"_ppprime|_tripleprime": "_tripprmplchldr",
"_pprime|_doubleprime": "_doubprmplchldr",
"_prime": "_prmplchldr",
}
post_sympy_latex_substitutions = {
" to ": r"\\to{}",
r"\\Delta ": r"\\Delta{}",
r"\\delta ": r"\\delta{}",
r"(?<!\(|\\cdot|,|\\to) (?!\\right|\\cdot|,|\\to)": r",",
r"Nuplchldr": r"Nu",
r"\\hbar": r"\\bar{h}",
r"\\bar{": r"\\overline{",
r"(infty|infinity)": r"\\infty",
r"inf(,|})": r"\\infty\1",
r"^inf$": r"\\infty",
r"_\{tripprmplchldr\}|,tripprmplchldr": r"'''",
r"_\{tripprmplchldr,": r"'''_\{",
r"_\{doubprmplchldr\}|,doubprmplchldr": r"''",
r"_\{doubprmplchldr,": r"''_{",
r"_\{prmplchldr\}|,prmplchldr": r"'",
r"_\{prmplchldr,": r"'_\{",
r",to,": r"\\to{}",
r"dimensionless": "",
}
__variable_latex_subs__ = {
"np.log": r"\ln ",
"math.log": r"\ln ",
"log": r"\ln ",
}
def set_latex(sub_dict):
for key, value in sub_dict.items():
__variable_latex_subs__[key] = value
def _ast_to_string(ast_node, line_indent=""):
next_line_indent = line_indent + " "
if isinstance(ast_node, ast.AST):
return (
ast_node.__class__.__name__
+ "("
+ ",".join(
"\n"
+ next_line_indent
+ field_name
+ " = "
+ _ast_to_string(child_node, next_line_indent)
for field_name, child_node in ast.iter_fields(ast_node)
)
+ ")"
)
elif isinstance(ast_node, list):
return (
"["
+ ",".join(
"\n" + next_line_indent + _ast_to_string(child_node, next_line_indent)
for child_node in ast_node
)
+ "]"
)
else:
return repr(ast_node)
def to_numeric(code, namespace=None, verbose=False):
namespeace = namespace or get_caller_namespace()
if isinstance(code, str):
try:
numeric = eval(code, namespace)
numeric = numeric_to_string(numeric)
except Exception as e:
if verbose:
print(e)
numeric = "??"
else:
numeric = numeric_to_string(code)
return numeric
def numeric_to_string(numeric):
if isinstance(numeric, units.Quantity):
try:
numeric = f"{numeric:.5~L}"
except:
numeric = f"{numeric:~L}"
numeric = re.sub(r"\\\s*$", "", numeric)
else:
try:
numeric = f" {numeric:.5} "
except:
numeric = f" {numeric} "
return numeric
def to_latex(code):
if "[" in code:
return index_to_latex(code)
if code in __variable_latex_subs__.keys():
return __variable_latex_subs__[code]
else:
for k, v in pre_sympy_latex_substitutions.items():
code = re.sub(k, v, code)
code = latex(sympify(code))
for key, value in post_sympy_latex_substitutions.items():
code = re.sub(key, value, code)
return code
def index_to_latex(code):
var, slc = code.split("[", 1)
var_sym = to_latex(var)
slc = slc[:-1]
try:
slc_sym = to_latex(slc)
except Execption as e:
slc_sym = slc
symbolic = f"{{ {var_sym} }}_{{ {slc_sym} }}"
return symbolic
class FormatCalculation:
"""Format an assignment statement as a equation progression"""
def __init__(
self,
input_node=None,
namespace=None,
progression=None,
verbose=False,
execute=False,
**kwargs,
):
self.namespace = namespace or get_caller_namespace()
self.input_node = input_node
self.progression = progression
self.verbose = verbose
self.iscomplex = False
self.kwargs = kwargs
if execute:
exec(self.input_string, self.namespace)
self._process_line()
def display(self):
display(Latex(self.output_string))
def _process_line(self):
line = self.input_node
LHS = self._process_node(line.targets[0], self.namespace, self.verbose)
LHS_Symbolic = LHS["symbolic"]
LHS_Numeric = LHS["numeric"]
MID_Symbolic = ""
if len(line.targets) > 1:
for target in line.targets[1:]:
targ = self._process_node(target)
MID_Symbolic += targ["symbolic"] + " = "
RHS_Symbolic = ""
RHS = self._process_node(line.value, self.namespace, self.verbose)
RHS_Symbolic = RHS["symbolic"]
RHS_Numeric = RHS["numeric"]
if self.verbose:
print(
f"LHS_Symbolic: {LHS_Symbolic}\nRHS_Symbolic: {RHS_Symbolic}\nRHS_Numeric: {RHS_Numeric}\nLHS_Numeric: {LHS_Numeric}"
)
result = f"\\begin{{align}}\n {LHS_Symbolic} &= {MID_Symbolic} {RHS_Symbolic} "
RSymComp = RHS_Symbolic.replace(" ", "")
RNumComp = RHS_Numeric.replace(" ", "")
LNumComp = LHS_Numeric.replace(" ", "")
if self.progression:
if RSymComp != RNumComp != LNumComp:
if self.iscomplex:
result += f"\\\\\n &= {RHS_Numeric}\\\\\n &= {LHS_Numeric}"
else:
result += f" = {RHS_Numeric} = {LHS_Numeric}"
elif RSymComp != RNumComp:
result += f" = {RHS_Numeric} "
elif RNumComp != LNumComp:
result += f" = {LHS_Numeric} "
else:
result += f" = {LHS_Numeric}"
result += "\n\end{align}\n"
self.output_string = result
def _process_node(self, node, namespace=None, verbose=False, **kwargs):
# namespace = namespace or get_caller_namespace()
namespace = namespace or self.namespace
symbolic = ""
numeric = ""
code = ""
lst = []
dct = {}
if verbose:
print(_ast_to_string(node))
# Number or String
if isinstance(node, ast.Constant):
symbolic = f"{node.value}"
numeric = symbolic
if isinstance(node.value, str):
code = f'"{node.value}"'
else:
code = symbolic
# Simple variable
elif isinstance(node, ast.Name):
code = node.id
symbolic = to_latex(code)
numeric = to_numeric(code, namespace)
# Subscript
elif isinstance(node, ast.Subscript):
val = self._process_node(node.value)
slc = self._process_node(node.slice)
code = f"{val['code']}[{slc['code']}]"
symbolic = f"{{{val['symbolic']}}}_{{ {slc['symbolic']} }}"
numeric = to_numeric(code, namespace)
# Index
elif isinstance(node, ast.Index):
result = self._process_node(node.value)
code = result["code"]
symbolic = result["symbolic"]
numeric = to_numeric(code, namespace)
# Simple Math Operation
elif isinstance(node, ast.BinOp):
self.iscomplex = True
left = self._process_node(node.left)
right = self._process_node(node.right)
# Addition
if isinstance(node.op, ast.Add):
code = f"{left['code']} + {right['code']}"
symbolic = f"{left['symbolic']} + {right['symbolic']}"
numeric = f"{left['numeric']} + {right['numeric']}"
# Subtraction
elif isinstance(node.op, ast.Sub):
code = f"{left['code']} - ({right['code']})"
if isinstance(node.right, ast.BinOp):
if isinstance(node.right.op, ast.Add) or isinstance(
node.right.op, ast.Sub
):
right["symbolic"] = f" \\left( {right['symbolic']} \\right)"
right["numeric"] = f"\\left( {right['numeric']} \\right)"
if right["numeric"].startswith("-"):
right["numeric"] = f"\\left( {right['numeric']} \\right)"
symbolic = f" {left['symbolic']} - {right['symbolic']} "
numeric = f" {left['numeric']} - {right['numeric']} "
# Multiplication
elif isinstance(node.op, ast.Mult):
code = f"({left['code']})*({right['code']})"
if isinstance(node.left, ast.BinOp):
if isinstance(node.left.op, ast.Add) or isinstance(
node.left.op, ast.Sub
):
left["symbolic"] = f"\\left( {left['symbolic']} \\right)"
left["numeric"] = f"\\left( {left['numeric']} \\right)"
if isinstance(node.right, ast.BinOp):
if isinstance(node.right.op, ast.Add) or isinstance(
node.right.op, ast.Sub
):
right["symbolic"] = f"\\left( {right['symbolic']} \\right)"
right["numeric"] = f"\\left( {right['numeric']} \\right)"
symbolic = (
f" {left['symbolic']} {multiplication_symbol} {right['symbolic']} "
)
numeric = (
f" {left['numeric']} {multiplication_symbol} {right['numeric']} "
)
# Division
elif isinstance(node.op, ast.Div):
code = f"({left['code']})/({right['code']})"
symbolic = f"\\frac{{ {left['symbolic']} }}{{ {right['symbolic']} }}"
numeric = f"\\frac{{ {left['numeric']} }}{{ {right['numeric']} }}"
# Exponent
elif isinstance(node.op, ast.Pow):
code = f"({left['code']})**({right['code']})"
if isinstance(node.left, ast.BinOp):
left["symbolic"] = f"\\left({left['symbolic']}\\right)"
left["numeric"] = f"\\left({left['numeric']}\\right)"
elif "\ " in left["numeric"]:
left["numeric"] = f"\\left({left['numeric']} \\right)"
if isinstance(node.right, ast.BinOp):
if not isinstance(node.right.op, ast.Div):
right["symbolic"] = f"\\left({right['symbolic']}\\right)"
right["numeric"] = f"\\left({right['numeric']}\\right)"
symbolic = f"{left['symbolic']}^{right['symbolic']}"
numeric = f"{left['numeric']}^{right['numeric']}"
else:
print(f"BinOp not implemented for {node.op.__class__.__name__}")
_ast_to_string(node)
# Unary Operation
elif isinstance(node, ast.UnaryOp):
if isinstance(node.op, ast.USub):
operand = self._process_node(node.operand)
symbolic = f"-{operand['symbolic']}"
numeric = f"-\\left( {operand['numeric']} \\right)"
else:
print(f"UnaryOp not implemented for {node.op.__class__.__name__}")
_ast_to_string(node)
# Function call
elif isinstance(node, ast.Call):
if isinstance(node.func, ast.Attribute):
attr = self._process_node(node.func, in_fn_call=True)
fn_name_sym = attr["symbolic"]
fn_name_code = attr["code"]
else:
fn_name_sym = fn_name_code = node.func.id
fn_base_name = fn_name_code.split(".")[-1]
# absolute value
if fn_base_name == "abs":
symbolic = numeric = " \\left| "
symbolic_close = numeric_close = " \\right|"
# square root
elif fn_base_name == "sqrt":
symbolic = numeric = "\\sqrt{"
symbolic_close = numeric_close = "}"
else:
symbolic = numeric = f"\\mathrm{{ {fn_name_sym} }}\\left( "
symbolic_close = numeric_close = " \\right)"
code = f"{fn_name_code}("
arg_idx = 0
for arg in node.args:
if arg_idx > 0:
code += ", "
symbolic += ", "
numeric += ", "
parg = self._process_node(arg)
code += parg["code"]
symbolic += parg["symbolic"]
numeric += parg["numeric"]
arg_idx += 1
for kw in node.keywords:
val = self._process_node(kw.value)
if arg_idx > 0:
code += ", "
symbolic += ", "
numeric += ", "
code += f"{kw.arg} = {val['code']}"
symbolic += f"\\mathrm{{ {kw.arg} }} = {val['symbolic']}"
numeric += f"\\mathrm{{ {kw.arg} }} = {val['numeric']}"
arg_idx += 1
code += ")"
symbolic += symbolic_close
numeric += symbolic_close
# Quantity
if fn_base_name == "Quantity":
symbolic = to_numeric(code)
numeric = symbolic
# .to()
elif fn_base_name == "to":
val = self._process_node(node.func.value)
symbolic = val["symbolic"]
code = f'{val["code"]}.to("{node.args[0].value}")'
numeric = to_numeric(code)
# sum()
if fn_base_name == "sum":
symbolic = numeric = ""
if isinstance(node.args[0], ast.ListComp):
listcomp = self._process_node(
node.args[0], join_symb="+", list_delim=["", ""]
)
elt = self._process_node(node.args[0].elt)
for comprehension in node.args[0].generators:
symbolic += r"\sum"
# numeric += r"\sum"
target = self._process_node(comprehension.target)
comp_iter = self._process_node(comprehension.iter)
symbolic += f"_{{{target['symbolic']}={comp_iter['symbolic']}}}"
# numeric += f"_{{{target['numeric']}}}"
symbolic += f"{{ {elt['symbolic']} }}"
numeric += f"{{ {listcomp['numeric']} }}"
# Attribute
elif isinstance(node, ast.Attribute):
val = self._process_node(node.value, nested_attr=True)
code = f"{val['code']}.{node.attr}"
symbolic = code
numeric = symbolic
if "nested_attr" not in kwargs:
*paren, attr = code.split(".")
symbolic = f"\\underset{{ {'.'.join(paren)} }}{{ {attr} }}"
if "in_fn_call" in kwargs:
numeric = symbolic
else:
numeric = to_numeric(code)
# List
elif isinstance(node, ast.List):
lst = []
for i in node.elts:
if self.verbose:
print(i)
lst.append(self._process_node(i))
if self.verbose:
print(lst[-1])
if self.verbose:
print(lst)
code = "[" + ",".join([i["code"] for i in lst]) + "]"
if len(lst) <= 3:
symbolic = "[" + ",".join([i["symbolic"] for i in lst]) + "]"
numeric = "[" + ",".join([i["numeric"] for i in lst]) + "]"
else:
symbolic = f"[{lst[0]['symbolic']}, \ldots, {lst[-1]['symbolic']}]"
numeric = f"[{lst[0]['numeric']}, \ldots, {lst[-1]['numeric']}]"
# List Comprehension
elif isinstance(node, ast.ListComp):
if "join_symb" in kwargs:
join_symb = kwargs["join_symb"]
else:
join_symb = ", "
if "list_delim" in kwargs:
list_delim = kwargs["list_delim"]
else:
list_delim = ["\\left[", "\\right]"]
# lst = ast.unparse(node) # available in python 3.9
lst = eval(astor.to_source(node), self.namespace)
elt = self._process_node(node.elt)
symbolic = f"{{\\left[ {elt['symbolic']} \\right]}}"
for comprehension in node.generators:
target = self._process_node(comprehension.target)
comp_iter = self._process_node(comprehension.iter)
symbolic += f"_{{{target['symbolic']}={comp_iter['symbolic']}}}"
if len(lst) <= 3:
numeric = (
list_delim[0]
+ join_symb.join(
[to_numeric(i, self.namespace, self.verbose) for i in lst]
)
+ list_delim[1]
)
else:
numeric = f"[{to_numeric(lst[0],self.namespace)}{join_symb}\ldots{join_symb}{to_numeric(lst[-1],self.namespace)}]"
# Not Implemented
else:
if self.verbose:
print(f"not implemented for {node.__class__.__name__}")
_ast_to_string(node)
code = astor.to_source(node)
symbolic = code
numeric = f"{eval(code, self.namespace)}"
output = dict(symbolic=symbolic, numeric=numeric, code=code, list=lst, dict=dct)
return output
class Calculations:
"""Display the calculations in the current cell"""
def __init__(
self,
namespace=None,
input_string=None,
comments=True,
progression=True,
return_latex=False,
verbose=False,
execute=False,
**kwargs,
):
self.namespace = namespace or get_caller_namespace()
self.cell_string = input_string or self.namespace["_ih"][-1]
self.output = ""
self.progression = progression
self.comments = comments
self.verbose = verbose
self.kwargs = kwargs
if execute:
exec(self.cell_string, self.namespace)
self.input = self.filter_string(self.cell_string)
self.process_input_string(self.input)
def process_code(self, string):
output = ""
self.parsed_tree = ast.parse(string)
for line in self.parsed_tree.body:
if isinstance(line, ast.Assign):
formatted_calc = FormatCalculation(
line,
namespace=self.namespace,
progression=self.progression,
verbose=self.verbose,
**self.kwargs,
)
formatted_calc.display()
output += formatted_calc.output_string
def process_input_string(self, string):
if self.comments:
lines = string.split("\n")
code_block = ""
for line in lines:
if line.startswith("#"):
if code_block != "":
self.process_code(code_block)
code_block = ""
processed_string = re.sub("^#", "", line)
self.output += re.sub("#", "", line) + r"<br/>" # + '\n'
display(Markdown(processed_string))
else:
code_block += line + "\n"
if code_block != "":
self.process_code(code_block)
code_block = ""
else:
self.process_code(string)
def filter_string(self, string):
result = ""
for line in string.split("\n"):
if (not line.startswith("#")) and ("#" in line):
code, comment = line.split("#", 1)
if not any(i in comment for i in "hide noshow suppress".split()):
result += line + "\n"
else:
result += line + "\n"
return result
class QuantityTables:
"""Display all StatesTables in namespace"""
def __init__(self, namespace=None, **kwargs):
self.namespace = namespace or get_caller_namespace()
for k, v in sorted(self.namespace.items()):
if not k.startswith("_"):
if isinstance(v, QuantityTable):
v.display()
class Quantities:
"""Display Quantities in namespace
If a list of variables is provided, display the specified
variables. Otherwise display all variables with units.
"""
def __init__(self, variables=None, n_col=3, style=None, namespace=None, **kwargs):
self.namespace = namespace or get_caller_namespace()
self.style = style
self.n = 1
self.n_col = n_col
self.latex_string = r"\begin{align}{ "
if variables is not None:
for variable in variables:
self.add_variable(variable, **kwargs)
else:
for k, v in sorted(self.namespace.items()):
if not k.startswith("_"):
if isinstance(v, units.Quantity):
self.add_variable(k, **kwargs)
self.latex_string += r" }\end{align}"
self.latex = self.latex_string
display(Latex(self.latex_string))
def add_variable(self, variable, **kwargs):
"""Add a variable to the display list
Args:
variable:
**kwargs:
Returns:
"""
symbol = to_latex(variable)
value = to_numeric(variable, self.namespace)
boxed_styles = ["box", "boxed", "sol", "solution"]
if self.style in boxed_styles:
self.latex_string += r"\Aboxed{ "
self.latex_string += symbol + r" }&={ " + value
if self.style in boxed_styles:
self.latex_string += r" }"
if self.n < self.n_col:
self.latex_string += r" }&{ "
self.n += 1
else:
self.latex_string += r" }\\{ "
self.n = 1
class Summary:
"""Display all quantities and StatesTables in namespace
If a list of variables if provided, display only those variables,
otherwise display all quantities defined in the namespace.
"""
def __init__(
self, variables=None, n_col=None, namespace=None, style=None, **kwargs
):
self.namespace = namespace or get_caller_namespace()
if variables is not None:
if n_col is None:
n_col = 1
Quantities(variables, n_col=n_col, namespace=self.namespace, style=style)
else:
if n_col is None:
n_col = 3
self.quantities = Quantities(
namespace=self.namespace, n_col=n_col, **kwargs
)
self.state_tables = QuantityTables(namespace=self.namespace, **kwargs)
```
#### File: kilojoule/kilojoule/organization.py
```python
from .units import units, Quantity
from .common import get_caller_namespace
import pandas as pd
from IPython.display import display, HTML, Math, Latex, Markdown
import re
default_property_dict = {
'T':'degC', # Temperature: unit options ('K','degC','degF','degR')
'p':'kPa', # pressure: unit options ('kPa','bar','psi','atm',etc.)
'v':'m^3/kg', # specific volume
'u':'kJ/kg', # specific internal energy
'h':'kJ/kg', # specific enthalpy
's':'kJ/kg/K', # specific entropy
'x':'', # quality: dimensionless units enter as an empty string
'phi':'kJ/kg', # specific exergy
'psi':'kJ/kg', # specific exergy
'm':'kg', # mass
'mdot':'kg/s', # mass flow rate
'V':'m^3', # volume
'Vdot':'m^3/s', # volumetric flow rate
'X':'kJ', # exergy
'Xdot':'kW', # exergy rate
}
class PropertyDict:
""" """
def __init__(self, property_symbol=None, units=None, unit_system="SI_C"):
self.dict = {}
self.property_symbol = property_symbol
self.unit_system = unit_system
self.set_units(units)
def set_units(self, units=None):
"""
Args:
units: (Default value = None)
Returns:
"""
if units is None:
try:
result = preferred_units_from_symbol(
self.property_symbol, self.unit_system
)
self.units = result
except:
self.units = units
else:
self.units = units
self._update_units()
def _update_units(self):
""" """
if self.units is not None:
for k, v in self.dict.items():
self.dict[k] = v.to(self.units)
def __repr__(self):
return f"<kilojoule.PropertyDict for {self.property_symbol}>"
def __getitem__(self, item):
return self.dict[str(item)]
def __setitem__(self, item, value):
if value is not None:
if self.units is not None:
if isinstance(value, units.Quantity):
result = value.to(self.units)
else:
result = Quantity(value, self.units)
else:
result = value
result.property_symbol = self.property_symbol
self.dict[str(item)] = result
def __delitem__(self, item):
del self.dict[item]
class QuantityTable:
"""Table for storing quantities"""
def __init__(
self,
properties=default_property_dict,
property_source = None,
unit_system="kSI_C",
add_to_namespace=None,
):
self.properties = []
self.dict = {}
self.unit_system = None
self.property_source = None
if add_to_namespace is not None:
self.parent_namespace = get_caller_namespace()
if isinstance(properties, (list, tuple)):
self.unit_system = unit_system
for prop in properties:
self.add_property(prop, add_to_namespace=self.parent_namespace)
elif isinstance(properties, dict):
for prop, unit in properties.items():
self.add_property(prop, units=unit, add_to_namespace=self.parent_namespace)
else:
raise ValueError("Expected properties to be a list, tuple, or dict")
def add_property(self, property, units=None, unit_system=None, add_to_namespace=None):
"""
Args:
property (str): property symbols
units (str): property units (Default value = None)
unit_system (str): unit system to infer units if not defined with the
units keyword (Default value = None)
property_type (str): property type, i.e. temperature, density, etc (Default value = None)
Returns:
"""
property = str(property)
self.properties.append(property)
if units is not None:
self.dict[property] = PropertyDict(property, units=units)
elif unit_system is not None:
self.dict[property] = PropertyDict(property, unit_system=unit_system)
else:
self.dict[property] = PropertyDict(property, unit_system=self.unit_system)
if add_to_namespace is not None:
if add_to_namespace is True:
namespace=get_caller_namespace()
else:
namespace=add_to_namespace
namespace[property] = self.dict[property]
return self.dict[property]
def remove_property(self,property):
property = str(property)
try:
self.properties.remove(property)
except:
pass
def _list_like(self, value):
"""Try to detect a list-like structure excluding strings
Args:
value:
Returns:
"""
return not hasattr(value, "strip") and (
hasattr(value, "__getitem__") or hasattr(value, "__iter__")
)
def display(self, *args, dropna=True, **kwargs):
"""
Args:
*args:
dropna: (Default value = True)
**kwargs:
Returns:
"""
df = self.to_pandas(*args, dropna=dropna, **kwargs)
display(HTML(df.to_html(**kwargs)))
def to_dict(self):
""" """
return {i: self.dict[i].dict for i in self.properties}
def _atoi(self, text):
return int(text) if text.isdigit() else text
def _natural_keys(self, text):
return [ self._atoi(c) for c in re.split('(\d+)',text) ]
def to_pandas(self, *args, dropna=True, **kwargs):
# pint_pandas.PintType.ureg.default_format = "~P"
def formatter_func(units):
try:
formatter = "{:" + units._REGISTRY.default_format + "}"
return formatter.format(units)
except:
formatter = "{:~L}"
return formatter.format(units)
def firstQuantity(lst):
for item in lst:
if isinstance(item,Quantity):
return item
df = pd.DataFrame(self.to_dict())
df_columns = df.columns.to_frame()
units_col = []
for col in df.columns:
try:
units_col.append(formatter_func(firstQuantity(df[col].values).units))
except AttributeError:
units_col.append('')
df_columns["units"] = units_col
from collections import OrderedDict
data_for_df = OrderedDict()
for i, col in enumerate(df.columns):
data_for_df[tuple(df_columns.iloc[i])] = df[col].values.data
df_new = pd.DataFrame(data_for_df, columns=data_for_df.keys())
df_new.columns.names = df.columns.names + ["unit"]
df_new.index = df.index
df = df_new
for prop in df.keys():
df[prop] = df[prop].apply(lambda x: x.magnitude if isinstance(x,Quantity) else x)
if dropna:
df.dropna(axis="columns", how="all", inplace=True)
df.fillna("-", inplace=True)
df.index = df.index.map(str)
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [ atoi(c) for c in re.split('(\d+)',text) ]
a = df.index.tolist()
a.sort(key=self._natural_keys)
df = df.reindex(a)
return df
def fix(self, state=None, property_source=None, verbose=False):
"""Fix a state based on known properties
Use the known properties at a state to evaluate all unknown
properties at that state using the property tables store in
property_source. If a default property source has been
defined for the table, it will be used for property
evaluation. If a default property source has not been set, or
if the table contains multiple fluids, the property table that
should be used to fix the state needs to be provided as an
argument. There must already be enough independent properties
defined for the state to evaluate the unknown properties (1,
2, or 3 depending on the fluid).
Args:
state (str): state to fix
property_source (property_table): table to use when evaluating properties
(Default = None)
**kwargs:
"""
property_source = property_source or self.property_source
known_props = self[state].keys()
unknown_props = [i for i in self.properties if i not in known_props and hasattr(property_source,i) ]
indep_props_comb = [[i,j] for i in known_props for j in known_props if i != j]
depri_comb = [['T','p'],['p','T'],['T','h'],['h','T'],['T','u'],['u','T']]
for comb in depri_comb:
try: indep_props_comb.append(indep_props_comb.pop(indep_props_comb.index(comb)))
except: pass
if verbose:
print(f'property_source: {property_source}')
print(f'known_props: {known_props}')
print(f'unknown_props: {unknown_props}')
for up in unknown_props:
if verbose: print(f'trying to fix {up}')
for ipc in indep_props_comb:
if 'ID' not in ipc:
if verbose: print(ipc)
try:
indep_dict = { ipc[0]:self[state][ipc[0]], ipc[1]:self[state][ipc[1]] }
if verbose: print(f'using: {indep_dict}')
value = getattr(property_source,up)(**indep_dict)
# if 'unknown' in value:
# raise
self.__setitem__([state,up],value)
if verbose: print(f'{up} for {state}: {value}')
break
except Exception as e:
if verbose: print(e)
else:
if verbose: print(f'unable to fix {up} for state {state}')
@property
def states(self):
sts = []
for prop,prop_dict in self.dict.items():
for state in prop_dict.dict.keys():
sts.append(state)
sts = list(set(sts))
sts.sort(key=self._natural_keys)
return sts
def __getitem__(self, key, include_all=None):
if isinstance(key, slice):
states = self.states
len_states = len(states)
try:
start = states.index(str(key.start))
except:
if key.start is None:
start = 0
elif key.start < 0:
start = len_states + key.start + 1
try:
stop = states.index(str(key.stop))
except:
if key.stop is None:
stop = len_states
elif key.stop < 0:
stop = len_states + key.stop + 1
if include_all:
return [self[states[i]] for i in range(start, stop)]
else:
strt,stp,step = key.indices(len_states)
return [self[i] for i in range(start, stop, step)]
if self._list_like(key):
len_var = len(index)
if len_var == 0:
raise IndexError("Received empty index.")
elif len_var == 1:
key = str(key)
state_dict = {
i: self.dict[i][key]
for i in self.properties
if key in self.dict[i].dict.keys()
}
state_dict["ID"] = key
return state_dict
elif len_var == 2:
state = str(index[1])
property = str(index[0])
return self.dict[property, state]
else:
raise IndexError("Received too long index.")
else:
key = str(key)
state_dict = {
i: self.dict[i][key]
for i in self.properties
if key in self.dict[i].dict.keys()
}
if "ID" not in state_dict.keys():
state_dict["ID"] = key
return state_dict
def __setitem__(self, index, value):
if self._list_like(index):
len_var = len(index)
if len_var == 0:
raise IndexError("Received empty index.")
elif len_var == 1:
# self.dict[index[0]] = value
raise IndexError(
"Recieved index of level 1: Assigned values at this level not implemented yet"
)
elif len_var == 2:
state = str(index[0])
property = str(index[1])
if property not in self.properties:
self.add_property(property)
self.dict[property][state] = value
else:
raise IndexError("Received too long index.")
else:
raise IndexError("Recieved index of level 1: Not implemented yet")
def __iter__(self):
return (self.dict)
def __delitem__(self, item):
pass
def __str__(self, *args, **kwargs):
return self.to_pandas(self, *args, **kwargs).to_string()
```
#### File: kilojoule/kilojoule/plotting.py
```python
from .common import preferred_units_from_type, preferred_units_from_symbol, invert_dict
from .units import units, Quantity
import matplotlib.pyplot as plt
from IPython.display import display as mpldisplay
from IPython.display import clear_output
import numpy as np
# Set matplotlib figure size defaults
plt.rcParams["figure.figsize"] = [6 * 2, 4 * 2]
plt.rcParams["figure.dpi"] = 100 # 200 e.g. is really fine, but slower
n_points_default = 100
labelprops_default = dict(
rotation_mode='anchor',
horizontalalignment='center',
verticalalignment='bottom',
size='9',
)
gridlineprops_default = dict(
linewidth=0.25,
color='gray',
linestyle = (0,(5,10))
)
arrowprops_default = dict(
arrowstyle='fancy',
)
pointprops_default = dict(
markersize=4
)
class PropertyPlot:
""" """
def __init__(
self,
x=None,
y=None,
x_units=None,
y_units=None,
property_table=None,
saturation=False,
unit_system=None,
fig=None,
subplot=None,
log_x=False,
log_y=False,
**kwargs,
):
self.props = property_table
self.fluid = self.props.fluid
self.unit_system = unit_system or self.props.unit_system
self.props.unit_system = self.unit_system
self.x_symb = x
self.y_symb = y
self.x_units = x_units or preferred_units_from_symbol(
self.x_symb, self.unit_system
)
self.y_units = y_units or preferred_units_from_symbol(
self.y_symb, self.unit_system
)
if x=="T" and y=="omega":
self.psychrometric=True
else:
self.psychrometric=False
# Set up matplotlib
units.setup_matplotlib()
if fig is None:
self.fig = plt.figure()
else:
self.fig = fig
if subplot is None:
self.ax = self.fig.add_subplot(1, 1, 1)
else:
self.ax = self.fig.add_subplot(*subplot)
self.ax.set_ylabel(f"${self.y_symb}$ [$\mathrm{{{Quantity(1,self.y_units).units:~L}}}$]")
self.ax.set_xlabel(f"${self.x_symb}$ [{Quantity(1,self.x_units).units:~P}]")
if log_x:
self.ax.set_xscale("log")
if log_y:
self.ax.set_yscale("log")
self.ax.spines["right"].set_visible(False)
self.ax.spines["top"].set_visible(False)
# if the fluid is a real-fluid, define triple point and critical point
if hasattr(self.props, "T_triple"):
self._real_fluid_config()
# plot saturation lines if specified
if saturation:
self.plot_saturation_lines()
def _real_fluid_config(self):
self.T_triple = self.props.T_triple
self.p_triple = self.props.p_triple
self.T_critical = self.props.T_critical
self.p_critical = self.props.p_critical
def _merge_line2D_list(self, line_list):
if isinstance(line_list,list):
xdata = np.array([])
ydata = np.array([])
for l in line_list:
xdata = np.append(xdata,l.get_xdata())
ydata = np.append(ydata,l.get_ydata())
line = line_list[0]
line.set_xdata(xdata)
line.set_ydata(ydata)
return line
else:
return line_list
def _trim_line2D_data(self, line, axis_lim,extend=True):
line = self._merge_line2D_list(line)
xdata = line.get_xdata()
ydata = line.get_ydata()
for i,val in enumerate(axis_lim):
if isinstance(val,Quantity): axis_lim[i]=val.magnitude
if isinstance(xdata,Quantity): xdata=xdata.magnitude
if isinstance(ydata,Quantity): ydata=ydata.magnitude
ind = np.where(np.logical_and(np.logical_and(np.logical_and(
xdata>=axis_lim[0],
xdata<=axis_lim[1]),
ydata>=axis_lim[2]),
ydata<=axis_lim[3])
)
if extend:
maxind = len(xdata)-2
ind2 = np.array([])
for i in ind[0]:
if i>0: ind2 = np.append(ind2,i-1)
ind2 = np.append(ind2,i)
if i<maxind: ind2 = np.append(ind2,i+1)
ind = np.unique(ind2.astype(int))
line.set_xdata(xdata[ind])
line.set_ydata(ydata[ind])
return line
def _line_pos(self, line, pos=None, xcoor=None, ycoor=None, **kwargs):
line = self._merge_line2D_list(line)
if pos is None: pos = 0.5
ax = line.axes
xdata = line.get_xdata()
if isinstance(xdata,Quantity): xdata = xdata.magnitude
xA,xB = xdata[0], xdata[-1]
Delta_x = xB-xA
xlim = ax.get_xlim()
Delta_xlim = xlim[-1]-xlim[0]
Delta_x_ax = abs(Delta_x/Delta_xlim)
ydata = line.get_ydata()
if isinstance(ydata,Quantity): ydata = ydata.magnitude
yA,yB = ydata[0], ydata[-1]
Delta_y = yB-yA
ylim = ax.get_ylim()
Delta_ylim = ylim[-1]-ylim[0]
Delta_y_ax = abs(Delta_y/Delta_ylim)
xlog = ax.get_xscale() == "log"
ylog = ax.get_xscale() == "log"
if len(xdata)==2:
if xlog or ylog:
if Delta_x_ax > Delta_y_ax:
xdata = np.geomspace(xA, xB, 100)
ydata = yA + (yB-yA)/(xB-xA)*xdata
else:
ydata = np.geomspace(yA, yB, 100)
xdata = xA + (xB-xA)/(yB-yA)*ydata
else:
xdata = np.linspace(xA, xB, 100)
ydata = np.linspace(yA, yB, 100)
start_ind = int(np.ceil(pos*len(xdata)))
elif xcoor is not None:
start_ind = np.argmin(np.absolute(xdata-xcoor))
elif ycoor is not None:
start_ind = np.argmin(np.absolute(ydata-ycoor))
elif Delta_x_ax>Delta_y_ax:
if xlog or ylog:
xcoor = np.geomspace(xA, xB, 101)[int(pos*100)]
else:
xcoor = xdata[0] + pos*Delta_x
start_ind = np.argmin(np.absolute(xdata-xcoor))
else:
if xlog or ylog:
ycoor = np.geomspace(yA, yB, 101)[int(pos*100)]
else:
ycoor = ydata[0] + pos*Delta_y
start_ind = np.argmin(np.absolute(ydata-ycoor))
end_ind = start_ind+1
if start_ind >= len(xdata) or end_ind >= len(xdata):
start_ind=-2
end_ind=-1
x1 = xdata[start_ind]
y1 = ydata[start_ind]
x2 = xdata[end_ind]
y2 = ydata[end_ind]
return ax,x1,y1,x2,y2
def _plot_straight_line(self,**kwargs):
"""
:param **kwargs:
"""
x1 = kwargs.pop('x1')
x2 = kwargs.pop('x2')
y1 = kwargs.pop('y1')
y2 = kwargs.pop('y2')
return self.ax.plot(
[x1.to(self.x_units).magnitude, x2.to(self.x_units).magnitude],
[y1.to(self.y_units).magnitude, y2.to(self.y_units).magnitude],
**kwargs,
)
def text(self,x,y,s,axcoor=False,**kwargs):
if axcoor:
trans = self.ax.transAxes
else:
trans = self.ax.transData
return self.ax.text(x,y,s,transform=trans,**kwargs)
def plot(self,*args,**kwargs):
return self.ax.plot(*args,**kwargs)
def annotate(self,*args,**kwargs):
return self.ax.annotate(*args,**kwargs)
@property
def xlim(self):
return self.ax.get_xlim()
@property
def ylim(self):
return self.ax.get_ylim()
def plot_point(
self,
x,
y,
*args,
marker="o",
color="black",
label=None,
label_loc="north",
offset=5,
pointprops={},
labelprops={},
gridlines=False,
xgridline=False,
ygridline=False,
gridlineprops={},
**kwargs,
):
"""
:param x:
:param y:
:param *args:
:param marker: (Default value = "o")
:param color: (Default value = "black")
:param label: (Default value = None)
:param label_loc: (Default value = "north")
:param offset: (Default value = 10)
:param **kwargs:
"""
pointprops = {**pointprops_default, **pointprops}
labelprops = {**labelprops_default, **labelprops}
gridlineprops = {**gridlineprops_default, **gridlineprops}
x = x.to(self.x_units).magnitude
y = y.to(self.y_units).magnitude
self.ax.plot(x, y, *args, marker=marker, color=color, **kwargs)
if label is not None:
ha = "center"
va = "center"
xytext = [0, 0]
if "north" in label_loc:
xytext[1] = offset
va = "bottom"
elif "south" in label_loc:
xytext[1] = -offset
va = "top"
if "east" in label_loc:
xytext[0] = offset
ha = "left"
elif "west" in label_loc:
xytext[0] = -offset
ha = "right"
ha = labelprops.pop('ha', ha)
va = labelprops.pop('va', va)
point = self.ax.annotate(
label, # this is the text
(x, y), # this is the point to label
**labelprops,
textcoords="offset points", # how to position the text
xytext=xytext, # distance from text to points (x,y)
ha=ha, # horizontal alignment can be left, right or center
va=va, # vertical alignment can be top, bottom, or middle
)
if gridlines: xgridline=ygridline=True
if xgridline:
self.ax.plot([x, x],[y,self.ax.get_ylim()[0]],**gridlineprops)
if ygridline:
self.ax.plot([x, self.ax.get_xlim()[0]],[y,y],**gridlineprops)
return point
def plot_state(self, state_dict, *args, pointprops={}, **kwargs):
"""
:param state_dict:
:param *args:
:param **kwargs:
"""
pointprops = {**pointprops, **pointprops_default}
x = state_dict[self.x_symb]
y = state_dict[self.y_symb]
if "label" not in kwargs.keys():
kwargs["label"] = state_dict["ID"]
self.plot_point(x, y, *args, **kwargs, **pointprops)
def plot_states(self, key, *args, **kwargs):
if isinstance(key, slice):
for i in states(key):
self.plot_state(i, *args, **kwargs)
else:
for i in key:
self.plot_state(key, *args, **kwargs)
def plot_iso_line(
self,
iso_symb=None,
iso_value=None,
x_range=None,
y_range=None,
alt_symb=None,
alt_range=None,
n_points=n_points_default,
verbose=False,
pos=None,
xcoor=None,
ycoor=None,
arrow=False,
arrowprops={},
label=None,
labelprops={},
**kwargs,
):
"""
:param iso_symb: (Default value = None)
:param iso_value: (Default value = None)
:param x_range: (Default value = None)
:param y_range: (Default value = None)
:param alt_symb: (Default value = None)
:param alt_range: (Default value = None)
:param n_points: (Default value = 100)
:param **kwargs:
"""
if x_range is not None:
if len(x_range) == 2:
x1 = x_range[0].to(self.x_units).magnitude
x2 = x_range[1].to(self.x_units).magnitude
if self.ax.get_xscale() == "log":
x_try = np.geomspace(x1, x2, num=n_points) * units(self.x_units)
else:
x_try = np.linspace(x1, x2, n_points) * units(self.x_units)
x = np.array([])
y = np.array([])
for i in x_try:
try:
prop_lookup_dict = {iso_symb: iso_value, self.x_symb: i}
y = np.append(
y,
getattr(self.props, self.y_symb)(**prop_lookup_dict)
.to(self.y_units)
.magnitude,
)
x = np.append(x, i)
except Exception as e:
if verbose:
print(f"Failed to plot {prop_lookup_dict}")
print(f"Exception: {e}")
else:
print("Expected a list with two values for x_range")
elif y_range is not None:
if len(y_range) == 2:
y1 = y_range[0].to(self.y_units).magnitude
y2 = y_range[1].to(self.y_units).magnitude
if self.ax.get_yscale() == "log":
y_try = np.geomspace(y1, y2, num=n_points) * units(self.y_units)
else:
y_try = np.linspace(y1, y2, n_points) * units(self.y_units)
x = np.array([])
y = np.array([])
for i in y_try:
try:
prop_lookup_dict = {iso_symb: iso_value, self.y_symb: i}
x = np.append(
x,
getattr(self.props, self.x_symb)(**prop_lookup_dict)
.to(self.x_units)
.magnitude,
)
y = np.append(y, i)
except Exception as e:
if verbose:
print(f"Failed to plot: {prop_lookup_dict}")
print(f"Exception: {e}")
else:
print("Expected a list with two values for y_range")
elif alt_range is not None:
if len(alt_range) == 2:
alt_units = alt_range[0].units
alt1 = alt_range[0].to(alt_units).magnitude
alt2 = alt_range[1].to(alt_units).magnitude
alt = np.linspace(alt1, alt2, n_points) * alt_units
x = np.array([])
y = np.array([])
for i in alt:
prop_lookup_dict = {iso_symb: iso_value, alt_symb: i}
x = np.append(
x,
getattr(self.props, self.x_symb)(**prop_lookup_dict)
.to(self.x_units)
.magnitude,
)
y = np.append(
y,
getattr(self.props, self.y_symb)(**prop_lookup_dict)
.to(self.y_units)
.magnitude,
)
else:
print("Expected a list with two values for alt_range")
isoline = self.ax.plot(x, y, **kwargs)
if arrow:
self.add_arrow(line=isoline, pos=pos, xcoor=xcoor, ycoor=ycoor, arrowprops=arrowprops, **kwargs)
if label is not None:
self.label_line(line=isoline, label=label, pos=pos, xcoor=xcoor, ycoor=ycoor, labelprops=labelprops, **kwargs)
return isoline
def plot_isentropic_efficiency(
self,
begin_state=None,
end_state=None,
color="black",
n_points=n_points_default,
show_reference=True,
verbose=False,
**kwargs,
):
x1 = begin_state[self.x_symb].to(self.x_units).magnitude
x2 = end_state[self.x_symb].to(self.x_units).magnitude
y1 = begin_state[self.y_symb].to(self.y_units).magnitude
y2 = end_state[self.y_symb].to(self.y_units).magnitude
si = begin_state["s"]
pi = begin_state["p"]
hi = begin_state["h"]
ho = end_state["h"]
po = end_state["p"]
hs = getattr(self.props, "h")(p=po, s=si)
wact = hi - ho
if verbose:
print(po)
print(si)
ws = hi - hs
eta_s = wact / ws
h_p = lambda p: hi - eta_s * (hi - self.props.h(p=p, s=si))
p_array = np.linspace(pi, po, n_points)
x = np.array([])
y = np.array([])
for p in p_array:
h = h_p(p)
prop_lookup_dict = {"h": h, "p": p}
x = np.append(
x,
getattr(self.props, self.x_symb)(**prop_lookup_dict)
.to(self.x_units)
.magnitude,
)
y = np.append(
y,
getattr(self.props, self.y_symb)(**prop_lookup_dict)
.to(self.y_units)
.magnitude,
)
processline = self.ax.plot(x, y, color=color, **kwargs)
return processline
def plot_process(
self,
begin_state=None,
end_state=None,
path=None,
iso_symb=None,
color="black",
pos=None,
xcoor=None,
ycoor=None,
arrow=True,
arrowprops={},
label=None,
labelprops={},
**kwargs,
):
"""
:param begin_state: (Default value = None)
:param end_state: (Default value = None)
:param path: (Default value = None)
:param iso_symb: (Default value = None)
:param color: (Default value = "black")
:param arrow: (Default value = False)
:param **kwargs:
"""
x1 = begin_state[self.x_symb]
x2 = end_state[self.x_symb]
y1 = begin_state[self.y_symb]
y2 = end_state[self.y_symb]
if iso_symb is None:
if path is None:
property_keys = [
"T",
"p",
"v",
"d",
"u",
"h",
"x",
"rho",
"u_molar",
"h_molar",
"s_molar",
"d_molar",
]
iso_dict = {}
for k in property_keys:
if k in begin_state and k in end_state:
if begin_state[k] == end_state[k]:
iso_dict[k] = begin_state[k]
if self.x_symb in iso_dict.keys() or self.y_symb in iso_dict.keys():
path = "straight"
elif not iso_dict:
path = "unknown"
else:
path = "iso_symb"
iso_symb = list(iso_dict.keys())[0]
else:
path = "iso_symb"
if path.lower() == "unknown":
process_line = self._plot_straight_line(x1=x1,x2=x2,y1=y1,y2=y2,
color=color, **kwargs, linestyle="--"
) # if none of the parameters matched between the states, draw a straight dashed line between the point
elif path.lower() == "straight":
process_line = self._plot_straight_line(x1=x1,x2=x2,y1=y1,y2=y2,
color=color, **kwargs
) # if one of the primary variable is constant, just draw a straight line between the points
elif path.lower() == "iso_symb":
# process_line = self.plot_iso_line(iso_symb, iso_value=begin_state[iso_symb], x_range=[x1,x2], **kwargs)
process_line = self.plot_iso_line(
iso_symb,
iso_value=begin_state[iso_symb],
alt_symb="p",
alt_range=[begin_state["p"], end_state["p"]],
color=color,
**kwargs,
)
elif path.lower() in ["isotherm", "isothermal", "constant temperature"]:
if self.x_symb == "T" or self.y_symb == "T":
process_line = self._plot_straight_line(x1=x1,x2=x2,y1=y1,y2=y2,color=color, **kwargs)
else:
process_line = self.plot_iso_line(
"T", begin_state["T"], color=color, x_range=[x1, x2], **kwargs
)
elif path.lower() in ["isobar", "isobaric", "constant pressure"]:
if self.x_symb == "p" or self.y_symb == "p":
process_line = self._plot_straight_line(x1=x1,x2=x2,y1=y1,y2=y2,color=color, **kwargs)
else:
process_line = self.plot_iso_line(
"p", begin_state["p"], color=color, x_range=[x1, x2], **kwargs
)
elif path.lower() in [
"isochor",
"isochoric",
"isomet",
"isometric",
"constant volume",
]:
if self.x_symb == "v" or self.y_symb == "v":
process_line = self._plot_straight_line(x1=x1,x2=x2,y1=y1,y2=y2,color=color, **kwargs)
else:
process_line = self.plot_iso_line(
"v", begin_state["v"], color=color, x_range=[x1, x2], **kwargs
)
elif path.lower() in ["isenthalp", "isenthalpic", "constant enthalpy"]:
if self.x_symb == "h" or self.y_symb == "h":
process_line = self._plot_straight_line(x1=x1,x2=x2,y1=y1,y2=y2,color=color, **kwargs)
else:
process_line = self.plot_iso_line(
"h", begin_state["h"], color=color, x_range=[x1, x2], **kwargs
)
elif path.lower() in ["isentropic", "isentrop", "constant entropy"]:
if self.x_symb == "s" or self.y_symb == "s":
process_line = self._plot_straight_line(x1=x1,x2=x2,y1=y1,y2=y2,color=color, **kwargs)
else:
process_line = self.plot_iso_line(
"s", begin_state["s"], color=color, x_range=[x1, x2], **kwargs
)
elif path.lower() in [
"isentropic efficiency",
"nonideal",
"non-ideal",
"isen-eff",
]:
process_line = self.plot_isentropic_efficiency(
begin_state, end_state, **kwargs
)
elif path.lower() in [
"simple",
"heating",
"cooling",
"simple heating",
"simple cooling",
"constant w",
"constant humidity",
"constant omega",
]:
if self.psychrometric:
xsat = max(self.props.T(w=y1,rel_hum=1).to(self.x_units).magnitude,self.ax.get_xlim()[0])
xsat = Quantity(xsat,self.x_units)
if xsat<=x2.to(self.x_units):
process_line = self._plot_straight_line(x1=x1,x2=x2,y1=y1,y2=y2,color=color,**kwargs)
else:
L1 = self._plot_straight_line(x1=x1.to(self.x_units),x2=xsat.to(self.x_units),y1=y1,y2=y1,color=color,**kwargs)
L2 = self.plot_iso_line("rel_hum",1,x_range=[xsat.to(self.x_units),x2.to(self.x_units)],color=color,**kwargs)
process_line = L1 if x1-xsat>xsat-x2 else L2
else:
process_line = self.plot_process(begin_state,end_state,"isobaric",iso_symb,color,pos,xcoor,
ycoor,arrow,arrowprops,label,labelprops,**kwargs)
else:
process_line = self._plot_straight_line(x1=x1,x2=x2,y1=y1,y2=y2,color=color, linestyle="--", **kwargs)
if arrow:
self.add_arrow(line=process_line, pos=pos, xcoor=xcoor, ycoor=ycoor, arrowprops=arrowprops, **kwargs)
if label is not None:
self.label_line(line=process_line, label=label, pos=pos, xcoor=xcoor, ycoor=ycoor, labelprops=labelprops, **kwargs)
return process_line
def add_arrow(self, line, pos=None, xcoor=None, ycoor=None, arrowprops={},**kwargs):
pos=pos or 0.5
arrowprops = {**arrowprops_default, **arrowprops}
if 'pos' in arrowprops.keys():
pos=arrowprops['pos']
del arrowprops['pos']
ax,x1,y1,x2,y2 = self._line_pos(line=line, pos=pos, xcoor=xcoor, ycoor=ycoor)
arrow = ax.annotate('',
xytext=(x1, y1),
xy=(x2,y2),
arrowprops=arrowprops,
)
return arrow
def label_line(self, line, label, pos=None, xcoor=None, ycoor=None, offset=5, rotate=True, labelprops={}, verbose=False, **kwargs):
"""Add a label to a line, optional rotated to be tangent.
Arguments
---------
line : matplotlib.lines.Line2D object,
label : str
label_pos : float
percentage distance along longest x/y dimension to center the text
rotate : bool
whether to align the text to the local slope of the line
size : float
"""
if 'pos' in labelprops.keys():
pos = labelprops['pos']
del labelprops['pos']
if pos is None: pos = 0.5
labelprops = {**labelprops_default, **labelprops}
if 'rotate' in labelprops.keys():
rotate = labelprops['rotate']
del labelprops['rotate']
ax,x1,y1,x2,y2 = self._line_pos(line, pos=pos, xcoor=xcoor, ycoor=ycoor)
if isinstance(x1,Quantity): x1=x1.magnitude
if isinstance(y1,Quantity): y1=y1.magnitude
if isinstance(x2,Quantity): x2=x2.magnitude
if isinstance(y2,Quantity): y2=y2.magnitude
Delta_x = x2-x1
Delta_y = y2-y1
if x1>x2:
x1,y1,x2,y1 = x2,y2,x1,y1
swap=True
else: swap=False
slp1 = ax.transData.transform_point((x1,y1))
slp2 = ax.transData.transform_point((x2,y2))
rise = (slp2[1]-slp1[1])
if isinstance(rise, Quantity): rise = rise.magnitude
run = (slp2[0]-slp1[0])
if swap: rise=-rise
if isinstance(run, Quantity): run = run.magnitude
slope_degrees = np.degrees(np.arctan2(rise,run))
if 'offset' in labelprops.keys():
offset = labelprops['offset']
del labelprops['offset']
xytext = [0,0]
if 'va' in labelprops.keys():
labelprops['verticalalignment'] = labelprops['va']
if 'ha' in labelprops.keys():
labelprops['horizontalalignment'] = labelprops['ha']
va = labelprops['verticalalignment']
ha = labelprops['horizontalalignment']
if va=='top':
offset_angle = slope_degrees - 90
elif va=='bottom':
offset_angle = slope_degrees + 90
elif ha=='right':
offset_angle = slope_degrees + 180
else:
offset_angle = slope_degrees
xytext[0] = offset*np.cos(np.deg2rad(offset_angle))
xytext[1] = offset*np.sin(np.deg2rad(offset_angle))
if verbose:
print(f'label: {label}\n coord: ({x1},{y1}),(x2,y2)\n angle: {slope_degrees}\n offset angle: {offset_angle}\n offset={xytext}')
if not rotate: slope_degrees=0
text = ax.annotate(label,
xy=(x1,y1),
textcoords="offset points",
xytext=xytext,
rotation=slope_degrees,
**labelprops
)
return text
def plot_saturation_lines(
self,
color=[0.4, 0.4, 0.4, 0.4],
linewidth=0.5,
n_points=500,
verbose=False,
**kwargs,
):
if self.y_symb in ["p", "P"]:
# saturated liquid p y-axis
self.plot_iso_line(
"x",
0,
y_range=[self.p_critical, self.p_triple],
n_points=n_points,
color=color,
linewidth=linewidth,
verbose=verbose,
)
# saturated vapor p y-axis
self.plot_iso_line(
"x",
1,
y_range=[self.p_critical, self.p_triple],
n_points=n_points,
color=color,
linewidth=linewidth,
verbose=verbose,
)
elif self.y_symb == "T":
# saturated liquid for T y-axis
self.plot_iso_line(
"x",
0,
y_range=[self.T_critical, self.T_triple],
n_points=n_points,
color=color,
linewidth=linewidth,
verbose=verbose,
)
# saturated vapor for T y-axis
self.plot_iso_line(
"x",
1,
y_range=[self.T_critical, self.T_triple],
n_points=n_points,
color=color,
linewidth=linewidth,
verbose=verbose,
)
else:
# saturated liquid for y-axis not T or p
self.plot_iso_line(
"x",
0,
alt_symb="T",
alt_range=[self.T_triple.to("K"), self.T_critical.to("K")],
n_points=n_points,
color=color,
linewidth=linewidth,
verbose=verbose,
)
# saturated vapor for y-axis not T or p
self.plot_iso_line(
"x",
1,
alt_symb="T",
alt_range=[self.T_critical.to("K"), self.T_triple.to("K")],
n_points=n_points,
color=color,
linewidth=linewidth,
verbose=verbose,
)
# Set x-axis to log scale if it is specific volume
if self.x_symb in ["V", "v"]:
self.ax.set_xscale("log")
def _plot_iso_wrapper(
self,
iso_symb=None,
iso_value=None,
x_range=None,
y_range=None,
preserve_limits=True,
n_points=n_points_default,
linewidth=0.5,
linestyle=(0, (5,5)),
color='gray',
verbose=False,
pos=0.9,
xcoor=None,
ycoor=None,
arrow=False,
arrowprops={},
label=None,
labelprops={},
**kwargs,
):
verbose = kwargs.pop('verbose',False)
if label is None:
try:
label = f'${iso_value}$'
except Exception as e:
label = f'${iso_value:~L}$'
kwargs = dict(linestyle=linestyle, linewidth=linewidth, color=color, **kwargs)
orig_xlim = self.ax.get_xlim()
orig_ylim = self.ax.get_ylim()
xmin = Quantity(orig_xlim[0], self.x_units)
xmax = Quantity(orig_xlim[1], self.x_units)
ymin = Quantity(orig_ylim[0], self.y_units)
ymax = Quantity(orig_ylim[1], self.y_units)
if self.x_symb == iso_symb:
isoline = self._plot_straight_line(
x1=iso_value,
x2=iso_value,
y1=ymin,
y2=ymax,
**kwargs,
)
elif self.y_symb == iso_symb:
isoline = self._plot_straight_line(
y1=iso_value,
y2=iso_value,
x1=xmin,
x2=xmax,
**kwargs,
)
else:
try:
if verbose: print('Checking for phase change along iso line')
prop_dict = {iso_symb:iso_value}
x_f = getattr(self.props, self.x_symb)(**prop_dict, x=0).to(self.x_units)
x_g = getattr(self.props, self.x_symb)(**prop_dict, x=1).to(self.x_units)
if x_f > xmin:
isoline = []
isoline.append(self.plot_iso_line(
iso_symb,
iso_value,
x_range=[xmin, x_f],
**kwargs,
)[0])
if x_g > xmin:
isoline.append(self.plot_iso_line(
iso_symb,
iso_value,
x_range=[x_f,x_g],
**kwargs,
)[0])
if x_g < xmax:
isoline.append(self.plot_iso_line(
iso_symb,
iso_value,
x_range=[x_g,xmax],
**kwargs
)[0])
except Exception as e:
if verbose:
print('Error: {e}')
try:
if verbose: print('Attempting to plot across x-axis')
isoline=self.plot_iso_line(
iso_symb,
iso_value,
x_range = [Quantity(i,self.x_units) for i in orig_xlim],
**kwargs,
)
except Exception as e:
if verbose:
print('Error: {e}')
print('Attemption to plot across y-axis')
isoline=self.plot_iso_line(
iso_symb,
iso_value,
y_range = [Quantity(i,self.y_units) for i in orig_ylim],
**kwargs,
)
if preserve_limits:
self.ax.set_xlim(orig_xlim)
self.ax.set_ylim(orig_ylim)
isoline = self._trim_line2D_data(isoline,[xmin,xmax,ymin,ymax])
if arrow:
self.add_arrow(isoline,pos=pos,xcoor=xcoor,ycoor=ycoor,arrowprops=arrowprops,**kwargs)
if label:
self.label_line(isoline,label=label,pos=pos,xcoor=xcoor,ycoor=ycoor,labelprops=labelprops,**kwargs)
return isoline
def plot_isobar(self,p=None,**kwargs):
return self._plot_iso_wrapper(iso_symb="p",iso_value=p,**kwargs)
def plot_isotherm(self,T=None,**kwargs):
return self._plot_iso_wrapper(iso_symb="T",iso_value=T,**kwargs)
def plot_isochor(self,v=None,**kwargs):
return self._plot_iso_wrapper(iso_symb="v",iso_value=v,**kwargs)
def plot_isenthalp(self,h=None,**kwargs):
return self._plot_iso_wrapper(iso_symb="h",iso_value=h,**kwargs)
def plot_isentrop(self,s=None,**kwargs):
return self._plot_iso_wrapper(iso_symb="s",iso_value=s,**kwargs)
def plot_triple_point(self, label="TP", label_loc="east", **kwargs):
if self.x_symb == "T":
x = self.T_triple
elif self.x_symb == "p":
x = self.p_triple
else:
x = getattr(self.props, self.x_symb)(T=self.T_triple, x=0)
if self.y_symb == "T":
y = self.T_triple
elif self.y_symb == "p":
y = self.p_triple
else:
y = getattr(self.props, self.y_symb)(T=self.T_triple, x=0)
self.plot_point(x, y, label=label, label_loc=label_loc, **kwargs)
def plot_critical_point(self, label="CP", label_loc="northwest", **kwargs):
if self.x_symb == "T":
x = self.T_critical
elif self.x_symb == "p":
x = self.p_critical
else:
x = getattr(self.props, self.x_symb)(T=self.T_critical, x=0)
if self.y_symb == "T":
y = self.T_critical
elif self.y_symb == "p":
y = self.p_critical
else:
y = getattr(self.props, self.y_symb)(T=self.T_critical, x=0)
self.plot_point(x, y, label=label, label_loc=label_loc, **kwargs)
def show(self):
clear_output()
mpldisplay(self.fig)
``` |
{
"source": "JohnFodero/Udacity_SDC_AdvancedLaneLines_P2",
"score": 3
} |
#### File: JohnFodero/Udacity_SDC_AdvancedLaneLines_P2/Camera.py
```python
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
from Line import Line
class Camera():
def __init__(self):
self.M = None
self.M_inv = None
self.img_size = None
self.ret = None
self.mtx = None
self.dist = None
self.rvecs = None
self.tvecs = None
self.M = None
self.Minv = None
def calibrate(self, images, chessboard=(9,6)):
img = cv2.imread(images[0])
self.img_size = (img.shape[1], img.shape[0])
objp = np.zeros((chessboard[1]*chessboard[0],3), np.float32)
objp[:,:2] = np.mgrid[0:chessboard[0], 0:chessboard[1]].T.reshape(-1,2)
objpoints = []
imgpoints = []
out_imgs = []
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, chessboard, None)
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
# Draw and display the corners
#cv2.drawChessboardCorners(img, chessboard, corners, ret)
out_imgs.append(img)
self.ret, self.mtx, self.dist, self.rvecs, self.tvecs = cv2.calibrateCamera(objpoints, imgpoints, self.img_size,None,None)
def calibrate_perspective(self, img):
horizon_px = 470
mid = int(self.img_size[0]/2)
tl = [mid - 80, horizon_px]
tr = [mid + 80, horizon_px]
bl = [200, self.img_size[1]-25]
br = [1100, self.img_size[1]-25]
src = np.float32([tl, tr, bl, br])
color = (0, 255, 0)
thickness = 9
oimg = np.array(img, copy=True)
oimg = self.undistort_image(oimg)
cv2.line(oimg, tuple(tl), tuple(tr), color, thickness)
cv2.line(oimg, tuple(tr), tuple(br), color, thickness)
cv2.line(oimg, tuple(br), tuple(bl), color, thickness)
cv2.line(oimg, tuple(bl), tuple(tl), color, thickness)
dst = np.float32([[200,0],[self.img_size[0] - 200,0],[200,self.img_size[1]],[self.img_size[0] - 200,self.img_size[1]]])
self.M = cv2.getPerspectiveTransform(src, dst)
self.Minv = cv2.getPerspectiveTransform(dst, src)
warped = self.transform_image(img)
cv2.line(warped, tuple(dst[0]), tuple(dst[1]), color, thickness)
cv2.line(warped, tuple(dst[1]), tuple(dst[3]), color, thickness)
cv2.line(warped, tuple(dst[3]), tuple(dst[2]), color, thickness)
cv2.line(warped, tuple(dst[2]), tuple(dst[0]), color, thickness)
return oimg, warped
def dir_threshold(self, img, sobel_kernel=3, thresh=(0, np.pi/2)):
# Apply the following steps to img
# 1) Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the gradient in x and y separately
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# 3) Take the absolute value of the x and y gradients
abs_sobelx = np.absolute(sobelx)
abs_sobely = np.absolute(sobely)
# 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient
grad_dir = np.arctan2(abs_sobely, abs_sobelx)
# 5) Create a binary mask where direction thresholds are met
binary = np.zeros_like(grad_dir)
binary[(grad_dir >= thresh[0]) & (grad_dir <= thresh[1])] = 1
# 6) Return this mask as your binary_output image
return binary
def mag_thresh(self, img, sobel_kernel=3, mag_thresh=(0, 255)):
# Apply the following steps to img
# 1) Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the gradient in x and y separately
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# 3) Calculate the magnitude
sobelxy = np.sqrt(np.square(sobelx) + np.square(sobely))
# 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8
scaled_sobel = np.uint8(255*sobelxy/np.max(sobelxy))
# 5) Create a binary mask where mag thresholds are met
binary = np.zeros_like(scaled_sobel)
binary[(scaled_sobel >= mag_thresh[0]) & (scaled_sobel <= mag_thresh[1])] = 1
# 6) Return this mask as your binary_output image
return binary
def hls_select(self, img, thresh=(0, 255)):
# 1) Convert to HLS color space
hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
H = hls[:,:,0]
L = hls[:,:,1]
S = hls[:,:,2]
# 2) Apply a threshold to the S channel
binary = np.zeros_like(S)
binary[(S > thresh[0]) & (S <= thresh[1])] = 1
# 3) Return a binary image of threshold result
return binary
def pipeline(self, img, s_thresh=(90, 120), sx_thresh=(20, 100)):
img = np.copy(img)
# Convert to HLS color space and separate the V channel
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
h_channel = hls[:,:,0]
l_channel = hls[:,:,1]
s_channel = hls[:,:,2]
# Sobel x
sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
# Threshold x gradient
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1
# Threshold color channel
#s_channel = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#s_binary = np.zeros_like(s_channel)
#s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
# Stack each channel
s_binary = self.pipeline2(img)
color_binary = np.dstack(( np.zeros_like(sxbinary), sxbinary, s_binary)) * 255
return color_binary.astype('uint8'), s_binary, sxbinary
def thresh(self, img, thresh_min, thresh_max):
ret = np.zeros_like(img)
ret[(img >= thresh_min) & (img <= thresh_max)] = 1
return ret
def pipeline2(self, img):
b = np.zeros((img.shape[0],img.shape[1]))
hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
H = hsv[:,:,0]
S = hsv[:,:,1]
V = hsv[:,:,2]
R = img[:,:,0]
G = img[:,:,1]
B = img[:,:,2]
t_yellow_H = self.thresh(H,10,30)
t_yellow_S = self.thresh(S,50,255)
t_yellow_V = self.thresh(V,150,255)
t_white_R = self.thresh(R,225,255)
t_white_V = self.thresh(V,230,255)
#b[(t_yellow_H==1) & (t_yellow_S==1) & (t_yellow_V==1)] = 1
b[(t_white_R==1)|(t_white_V==1)] = 1
return b
def undistort_image(self, img):
return cv2.undistort(img, self.mtx, self.dist, None, self.mtx)
def transform_image(self, img):
return cv2.warpPerspective(img, self.M, self.img_size, flags=cv2.INTER_LINEAR)
def get_top_down(self, img):
return self.transform_image(self.undistort_image(img))
def display_lane(self, original_img, left_lane, right_lane):
binary_img = cv2.cvtColor(original_img, cv2.COLOR_RGB2GRAY)
# Create an image to draw the lines on
warp_zero = np.zeros_like(binary_img).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
img_shape = original_img.shape
ploty = np.linspace(0, img_shape[0]-1, img_shape[0])
pts_left = np.array([np.transpose(np.vstack([left_lane.bestx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_lane.bestx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, self.Minv, (original_img.shape[1], original_img.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(original_img, 1, newwarp, 0.3, 0)
return result
def show_lane_data(self, img, left_curvature, right_curvature, position):
font = cv2.FONT_HERSHEY_SIMPLEX
spacing = 60
start = 60
scale = 2
oimg = np.array(img, copy=True)
cv2.putText(oimg,'Left Curvature = ' + str(left_curvature),(50,start), font, scale,(255,255,255),2)
cv2.putText(oimg,'Right Curvature = ' + str(right_curvature),(50,start+spacing), font, scale,(255,255,255),2)
cv2.putText(oimg,'Lane Center Offset = ' + str(round(position, 3)),(50,start+(2*spacing)), font, scale,(255,255,255),2)
return oimg
```
#### File: JohnFodero/Udacity_SDC_AdvancedLaneLines_P2/LaneFinder.py
```python
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
from Line import Line
from Camera import Camera
class LaneFinder():
def __init__(self, cal_files=[]):
self.camera = Camera()
self.camera.calibrate(cal_files)
temp = cv2.imread(cal_files[0])
_, _ = self.camera.calibrate_perspective(temp)
self.img_shape = temp.shape
self.left_lane = Line(self.img_shape)
self.right_lane = Line(self.img_shape)
def start_lane_find(self, binary_warped):
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# HYPERPARAMETERS
# Choose the number of sliding windows
nwindows = 9
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Set height of windows - based on nwindows above and image shape
window_height = np.int(binary_warped.shape[0]//nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated later for each window in nwindows
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
### TO-DO: Find the four below boundaries of the window ###
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),
(win_xleft_high,win_y_high),(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),
(win_xright_high,win_y_high),(0,255,0), 2)
### TO-DO: Identify the nonzero pixels in x and y within the window ###
good_left_inds = ((nonzerox < win_xleft_high) & (nonzerox > win_xleft_low)
& (nonzeroy < win_y_high) & (nonzeroy > win_y_low)).nonzero()[0]
good_right_inds = ((nonzerox < win_xright_high) & (nonzerox > win_xright_low)
& (nonzeroy < win_y_high) & (nonzeroy > win_y_low)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
### TO-DO: If you found > minpix pixels, recenter next window ###
### (`right` or `leftx_current`) on their mean position ###
if good_left_inds.shape[0] > minpix:
histogram = np.sum(binary_warped[(nwindows-(window+1))*window_height:(nwindows-window)*window_height, win_xleft_low:win_xleft_high], axis=0)
try:
leftx_current = np.argmax(histogram) + win_xleft_low
except ValueError as e:
print(e)
self.left_lane.detected = False
return binary_warped
if good_right_inds.shape[0] > minpix:
histogram = np.sum(binary_warped[(nwindows-(window+1))*window_height:(nwindows-window)*window_height, win_xright_low:win_xright_high], axis=0)
try:
rightx_current = np.argmax(histogram) + win_xright_low
except ValueError as e:
print(e)
self.right_lane.detected = False
return
# Concatenate the arrays of indices (previously was a list of lists of pixels)
try:
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
except ValueError as e:
# Avoids an error if the above is not implemented fully
print(e)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
left_fitx, right_fitx, ploty = self.fit_poly(binary_warped.shape, leftx, lefty, rightx, righty)
# Color detected pixels
out_img[lefty, leftx] = [255, 0, 0]
out_img[righty, rightx] = [0, 0, 255]
margin = 5
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin,
ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin,
ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(out_img, np.int_([left_line_pts]), (255,255, 0))
cv2.fillPoly(out_img, np.int_([right_line_pts]), (255,255, 0))
return out_img
def search_around_poly(self, binary_warped):
margin = 100
# Grab activated pixels
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# USING PREV fit not AVG fit for search
left_fit = self.left_lane.current_fit
right_fit = self.right_lane.current_fit
left_fit_old = left_fit[0]*nonzeroy**2 + left_fit[1]*nonzeroy + left_fit[2]
right_fit_old = right_fit[0]*nonzeroy**2 + right_fit[1]*nonzeroy + right_fit[2]
left_lane_inds = ((nonzerox < left_fit_old+margin) & (nonzerox > left_fit_old-margin)).nonzero()[0]
right_lane_inds = ((nonzerox < right_fit_old+margin) & (nonzerox > right_fit_old-margin)).nonzero()[0]
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit new polynomials
left_fitx, right_fitx, ploty = self.fit_poly(binary_warped.shape, leftx, lefty, rightx, righty)
## Visualization ##
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin,
ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin,
ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (255,255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (255,255, 0))
result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
# Plot the polynomial lines onto the image
#plt.plot(left_fitx, ploty, color='yellow')
#plt.plot(right_fitx, ploty, color='yellow')
## End visualization steps ##
return result
def fit_poly(self, img_shape, leftx, lefty, rightx, righty):
### Fit a second order polynomial to each with np.polyfit() ###
left_fit = np.polyfit(lefty, leftx, deg=2)
right_fit = np.polyfit(righty, rightx, deg=2)
# Generate x and y values for plotting
ploty = np.linspace(0, img_shape[0]-1, img_shape[0])
### Calc both polynomials using ploty, left_fit and right_fit ###
try:
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
except TypeError:
# Avoids an error if `left` and `right_fit` are still none or incorrect
print('The function failed to fit a line!')
left_fitx = 1*ploty**2 + 1*ploty
right_fitx = 1*ploty**2 + 1*ploty
self.left_lane.detected = False
self.right_lane.detected = False
else:
self.left_lane.detected = True
self.right_lane.detected = True
self.left_lane.update_fit(left_fitx, left_fit)
self.right_lane.update_fit(right_fitx, right_fit)
return left_fitx, right_fitx, ploty
def get_lines(self, img):
binary_warped = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Find our lane pixels first
if not self.left_lane.detected or not self.right_lane.detected:
print('starting lane find')
out_img = self.start_lane_find(binary_warped)
else:
out_img = self.search_around_poly(binary_warped)
return out_img
def get_vehicle_lane_position(self, px_to_m=3.7/700):
center = self.left_lane.bestx[-1] + ((self.right_lane.bestx[-1] - self.left_lane.bestx[-1])/2)
veh_pos = self.img_shape[1]/2
return px_to_m*(veh_pos - center)
def reset_lines(self):
self.left_lane = Line(self.img_shape)
self.right_lane = Line(self.img_shape)
def process_image(self, image):
self.top_down = self.camera.get_top_down(image)
self.binary, self.s, self.sx = self.camera.pipeline(self.top_down)
self.binary_lines = self.get_lines(self.binary)
left_curv = self.left_lane.update_radius()
right_curv = self.right_lane.update_radius()
position = self.get_vehicle_lane_position()
self.annotated = self.camera.show_lane_data(self.camera.display_lane(image, self.left_lane, self.right_lane), left_curv, right_curv, position)
return self.annotated
``` |
{
"source": "johnfoo/cowrie",
"score": 2
} |
#### File: cowrie/commands/ls.py
```python
from __future__ import division, absolute_import
import stat
import getopt
import time
from cowrie.core.honeypot import HoneyPotCommand
from cowrie.core.fs import *
from cowrie.core.pwd import Passwd, Group
commands = {}
class command_ls(HoneyPotCommand):
"""
"""
def uid2name(self, uid):
"""
"""
try:
return Passwd(self.protocol.cfg).getpwuid(uid)["pw_name"]
except:
return str(uid)
def gid2name(self, gid):
"""
"""
try:
return Group(self.protocol.cfg).getgrgid(gid)["gr_name"]
except:
return str(gid)
def call(self):
"""
"""
path = self.protocol.cwd
paths = []
self.showHidden = False
self.showDirectories = False
func = self.do_ls_normal
# Parse options or display no files
try:
opts, args = getopt.gnu_getopt(self.args, '1@ABCFGHLOPRSTUWabcdefghiklmnopqrstuvwx', ['help', 'version', 'param'])
except getopt.GetoptError as err:
self.write("ls: {}\n".format(err))
self.write("Try 'ls --help' for more information.\n")
return
for x, a in opts:
if x in ('-l'):
func = self.do_ls_l
if x in ('-a'):
self.showHidden = True
if x in ('-d'):
self.showDirectories = True
for arg in args:
paths.append(self.protocol.fs.resolve_path(arg, self.protocol.cwd))
if not paths:
func(path)
else:
for path in paths:
func(path)
def do_ls_normal(self, path):
"""
"""
try:
if self.protocol.fs.isdir(path) and self.showDirectories == False:
files = self.protocol.fs.get_path(path)[:]
if self.showHidden:
dot = self.protocol.fs.getfile(path)[:]
dot[A_NAME] = '.'
files.append(dot)
# FIXME: should grab dotdot off the parent instead
dotdot = self.protocol.fs.getfile(path)[:]
dotdot[A_NAME] = '..'
files.append(dotdot)
else:
files = [x for x in files if not x[A_NAME].startswith('.')]
files.sort()
else:
files = (self.protocol.fs.getfile(path)[:],)
except:
self.write(
'ls: cannot access %s: No such file or directory\n' % (path,))
return
l = [x[A_NAME] for x in files]
if not l:
return
count = 0
maxlen = max([len(x) for x in l])
try:
wincols = self.protocol.user.windowSize[1]
except AttributeError:
wincols = 80
perline = int(wincols / (maxlen + 1))
for f in l:
if count == perline:
count = 0
self.write('\n')
self.write(f.ljust(maxlen + 1))
count += 1
self.write('\n')
def do_ls_l(self, path):
"""
"""
try:
if self.protocol.fs.isdir(path) and self.showDirectories == False:
files = self.protocol.fs.get_path(path)[:]
if self.showHidden:
dot = self.protocol.fs.getfile(path)[:]
dot[A_NAME] = '.'
files.append(dot)
# FIXME: should grab dotdot off the parent instead
dotdot = self.protocol.fs.getfile(path)[:]
dotdot[A_NAME] = '..'
files.append(dotdot)
else:
files = [x for x in files if not x[A_NAME].startswith('.')]
files.sort()
else:
files = (self.protocol.fs.getfile(path)[:],)
except:
self.write(
'ls: cannot access %s: No such file or directory\n' % (path,))
return
largest = 0
if len(files):
largest = max([x[A_SIZE] for x in files])
for file in files:
if file[A_NAME].startswith('.') and not self.showHidden:
continue
perms = ['-'] * 10
if file[A_MODE] & stat.S_IRUSR: perms[1] = 'r'
if file[A_MODE] & stat.S_IWUSR: perms[2] = 'w'
if file[A_MODE] & stat.S_IXUSR: perms[3] = 'x'
if file[A_MODE] & stat.S_ISUID: perms[3] = 'S'
if file[A_MODE] & stat.S_IXUSR and file[A_MODE] & stat.S_ISUID: perms[3] = 's'
if file[A_MODE] & stat.S_IRGRP: perms[4] = 'r'
if file[A_MODE] & stat.S_IWGRP: perms[5] = 'w'
if file[A_MODE] & stat.S_IXGRP: perms[6] = 'x'
if file[A_MODE] & stat.S_ISGID: perms[6] = 'S'
if file[A_MODE] & stat.S_IXGRP and file[A_MODE] & stat.S_ISGID: perms[6] = 's'
if file[A_MODE] & stat.S_IROTH: perms[7] = 'r'
if file[A_MODE] & stat.S_IWOTH: perms[8] = 'w'
if file[A_MODE] & stat.S_IXOTH: perms[9] = 'x'
if file[A_MODE] & stat.S_ISVTX: perms[9] = 'T'
if file[A_MODE] & stat.S_IXOTH and file[A_MODE] & stat.S_ISVTX: perms[9] = 't'
linktarget = ''
if file[A_TYPE] == T_DIR:
perms[0] = 'd'
elif file[A_TYPE] == T_LINK:
perms[0] = 'l'
linktarget = ' -> %s' % (file[A_TARGET],)
perms = ''.join(perms)
ctime = time.localtime(file[A_CTIME])
l = '%s 1 %s %s %s %s %s%s' % \
(perms,
self.uid2name(file[A_UID]),
self.gid2name(file[A_GID]),
str(file[A_SIZE]).rjust(len(str(largest))),
time.strftime('%Y-%m-%d %H:%M', ctime),
file[A_NAME],
linktarget)
self.write(l+'\n')
commands['/bin/ls'] = command_ls
commands['/bin/dir'] = command_ls
```
#### File: cowrie/commands/wget.py
```python
from __future__ import division, absolute_import
import time
import re
import os
import getopt
import hashlib
from OpenSSL import SSL
from twisted.web import client
from twisted.internet import reactor, ssl
from twisted.python import log, compat
from cowrie.core.honeypot import HoneyPotCommand
from cowrie.core.fs import *
"""
"""
commands = {}
def tdiff(seconds):
"""
"""
t = seconds
days = int(t / (24 * 60 * 60))
t -= (days * 24 * 60 * 60)
hours = int(t / (60 * 60))
t -= (hours * 60 * 60)
minutes = int(t / 60)
t -= (minutes * 60)
s = '%ds' % (int(t),)
if minutes >= 1: s = '%dm %s' % (minutes, s)
if hours >= 1: s = '%dh %s' % (hours, s)
if days >= 1: s = '%dd %s' % (days, s)
return s
def sizeof_fmt(num):
"""
"""
for x in ['bytes','K','M','G','T']:
if num < 1024.0:
return "%d%s" % (num, x)
num /= 1024.0
# <NAME> @ http://code.activestate.com/recipes/498181/
def splitthousands( s, sep=','):
"""
"""
if len(s) <= 3: return s
return splitthousands(s[:-3], sep) + sep + s[-3:]
class command_wget(HoneyPotCommand):
"""
"""
def start(self):
"""
"""
try:
optlist, args = getopt.getopt(self.args, 'cqO:P:', 'header=')
except getopt.GetoptError as err:
self.write('Unrecognized option\n')
self.exit()
return
if len(args):
url = args[0].strip()
else:
self.write('wget: missing URL\n')
self.write('Usage: wget [OPTION]... [URL]...\n\n')
self.write('Try `wget --help\' for more options.\n')
self.exit()
return
outfile = None
self.quiet = False
for opt in optlist:
if opt[0] == '-O':
outfile = opt[1]
if opt[0] == '-q':
self.quiet = True
if '://' not in url:
url = 'http://%s' % url
urldata = compat.urllib_parse.urlparse(url)
url = bytes(url)
if outfile is None:
outfile = urldata.path.split('/')[-1]
if not len(outfile.strip()) or not urldata.path.count('/'):
outfile = 'index.html'
outfile = self.fs.resolve_path(outfile, self.protocol.cwd)
path = os.path.dirname(outfile)
if not path or \
not self.fs.exists(path) or \
not self.fs.isdir(path):
self.write('wget: %s: Cannot open: No such file or directory\n' % \
outfile)
self.exit()
return
self.url = url
self.limit_size = 0
cfg = self.protocol.cfg
if cfg.has_option('honeypot', 'download_limit_size'):
self.limit_size = int(cfg.get('honeypot', 'download_limit_size'))
self.download_path = cfg.get('honeypot', 'download_path')
if not hasattr(self, 'safeoutfile'):
tmp_fname = '%s_%s_%s_%s' % \
(time.strftime('%Y%m%d%H%M%S'),
self.protocol.getProtoTransport().transportId,
self.protocol.terminal.transport.session.id,
re.sub('[^A-Za-z0-9]', '_', url))
self.safeoutfile = os.path.join(self.download_path, tmp_fname)
self.deferred = self.download(url, outfile, self.safeoutfile)
if self.deferred:
self.deferred.addCallback(self.success, outfile)
self.deferred.addErrback(self.error, url)
def download(self, url, fakeoutfile, outputfile, *args, **kwargs):
"""
"""
try:
parsed = compat.urllib_parse.urlparse(url)
scheme = parsed.scheme
host = parsed.hostname
port = parsed.port or (443 if scheme == 'https' else 80)
path = parsed.path or '/'
if scheme != 'http' and scheme != 'https':
raise NotImplementedError
if not host:
self.exit()
return None
except:
self.write('%s: Unsupported scheme.\n' % (url,))
self.exit()
return None
if self.quiet == False:
self.write('--%s-- %s\n' % (time.strftime('%Y-%m-%d %H:%M:%S'), url))
self.write('Connecting to %s:%d... connected.\n' % (host, port))
self.write('HTTP request sent, awaiting response... ')
factory = HTTPProgressDownloader(
self, fakeoutfile, url, outputfile, *args, **kwargs)
out_addr = None
if self.protocol.cfg.has_option('honeypot', 'out_addr'):
out_addr = (self.protocol.cfg.get('honeypot', 'out_addr'), 0)
if scheme == 'https':
contextFactory = ssl.ClientContextFactory()
contextFactory.method = SSL.SSLv23_METHOD
self.connection = reactor.connectSSL(
host, port, factory, contextFactory, bindAddress=out_addr)
elif scheme == 'http':
self.connection = reactor.connectTCP(
host, port, factory, bindAddress=out_addr)
else:
raise NotImplementedError
return factory.deferred
def handle_CTRL_C(self):
self.write('^C\n')
self.connection.transport.loseConnection()
def success(self, data, outfile):
"""
"""
if not os.path.isfile(self.safeoutfile):
log.msg("there's no file " + self.safeoutfile)
self.exit()
with open(self.safeoutfile, 'rb') as f:
shasum = hashlib.sha256(f.read()).hexdigest()
hash_path = os.path.join(self.download_path, shasum)
# If we have content already, delete temp file
if not os.path.exists(hash_path):
os.rename(self.safeoutfile, hash_path)
else:
os.remove(self.safeoutfile)
log.msg("Not storing duplicate content " + shasum)
self.protocol.logDispatch(eventid='cowrie.session.file_download',
format='Downloaded URL (%(url)s) with SHA-256 %(shasum)s to %(outfile)s',
url=self.url,
outfile=hash_path,
shasum=shasum)
# Link friendly name to hash
# os.symlink(shasum, self.safeoutfile)
self.safeoutfile = None
# Update the honeyfs to point to downloaded file
self.fs.update_realfile(self.fs.getfile(outfile), hash_path)
self.fs.chown(outfile, self.protocol.user.uid, self.protocol.user.gid)
self.exit()
def error(self, error,url):
if hasattr(error, 'getErrorMessage'): # exceptions
errorMessage = error.getErrorMessage()
self.write(errorMessage +'\n')
# Real wget also adds this:
if hasattr(error, 'webStatus') and hasattr(error,'webMessage'): # exceptions
dateWithError = '{} ERROR '.format(time.strftime('%Y-%m-%d %T'))
self.write(dateWithError + str(error.webStatus) + ': ' + error.webMessage + '\n')
else:
self.write('{} ERROR 404: Not Found.\n'.format(time.strftime('%Y-%m-%d %T')))
self.exit()
commands['/usr/bin/wget'] = command_wget
commands['/usr/bin/dget'] = command_wget
# From http://code.activestate.com/recipes/525493/
class HTTPProgressDownloader(client.HTTPDownloader):
def __init__(self, wget, fakeoutfile, url, outfile, headers=None):
client.HTTPDownloader.__init__(self, url, outfile, headers=headers,
agent=b'Wget/1.11.4')
self.status = None
self.wget = wget
self.fakeoutfile = fakeoutfile
self.lastupdate = 0
self.started = time.time()
self.proglen = 0
self.nomore = False
self.quiet = self.wget.quiet
def noPage(self, reason): # Called for non-200 responses
"""
"""
if self.status == '304':
client.HTTPDownloader.page(self, '')
else:
if hasattr(self, 'status'):
reason.webStatus = self.status
if hasattr(self, 'message'):
reason.webMessage = self.message
client.HTTPDownloader.noPage(self, reason)
def gotHeaders(self, headers):
"""
"""
if self.status == '200':
if self.quiet == False:
self.wget.write('200 OK\n')
if 'content-length' in headers:
self.totallength = int(headers['content-length'][0])
else:
self.totallength = 0
if 'content-type' in headers:
self.contenttype = headers['content-type'][0]
else:
self.contenttype = 'text/whatever'
self.currentlength = 0.0
if self.totallength > 0:
if self.quiet == False:
self.wget.write('Length: %d (%s) [%s]\n' % \
(self.totallength,
sizeof_fmt(self.totallength),
self.contenttype))
else:
if self.quiet == False:
self.wget.write('Length: unspecified [{}]\n'.format(self.contenttype))
if self.wget.limit_size > 0 and \
self.totallength > self.wget.limit_size:
log.msg( 'Not saving URL ({}) due to file size limit'.format(self.wget.url))
self.fileName = os.path.devnull
self.nomore = True
if self.quiet == False:
self.wget.write('Saving to: `{}\'\n\n'.format(self.fakeoutfile))
return client.HTTPDownloader.gotHeaders(self, headers)
def pagePart(self, data):
"""
"""
if self.status == '200':
self.currentlength += len(data)
# If downloading files of unspecified size, this could happen:
if not self.nomore and self.wget.limit_size > 0 and \
self.currentlength > self.wget.limit_size:
log.msg( 'File limit reached, not saving any more data!' )
self.nomore = True
self.file.close()
self.fileName = os.path.devnull
self.file = self.openFile(data)
if (time.time() - self.lastupdate) < 0.5:
return client.HTTPDownloader.pagePart(self, data)
if self.totallength:
percent = int(self.currentlength/self.totallength*100)
spercent = "{}%".format(percent)
else:
spercent = '%dK' % (self.currentlength/1000)
percent = 0
self.speed = self.currentlength / (time.time() - self.started)
eta = (self.totallength - self.currentlength) / self.speed
s = '\r%s [%s] %s %dK/s eta %s' % \
(spercent.rjust(3),
('%s>' % (int(39.0 / 100.0 * percent) * '=')).ljust(39),
splitthousands(str(int(self.currentlength))).ljust(12),
self.speed / 1000,
tdiff(eta))
if self.quiet == False:
self.wget.write(s.ljust(self.proglen))
self.proglen = len(s)
self.lastupdate = time.time()
return client.HTTPDownloader.pagePart(self, data)
def pageEnd(self):
"""
"""
if self.totallength != 0 and self.currentlength != self.totallength:
return client.HTTPDownloader.pageEnd(self)
if self.quiet == False:
self.wget.write('\r100%%[%s] %s %dK/s' % \
('%s>' % (38 * '='),
splitthousands(str(int(self.totallength))).ljust(12),
self.speed / 1000))
self.wget.write('\n\n')
self.wget.write(
'%s (%d KB/s) - `%s\' saved [%d/%d]\n\n' % \
(time.strftime('%Y-%m-%d %H:%M:%S'),
self.speed / 1000,
self.fakeoutfile, self.currentlength, self.totallength))
self.wget.fs.mkfile(self.fakeoutfile, 0, 0, self.totallength, 33188)
self.wget.fs.update_realfile(
self.wget.fs.getfile(self.fakeoutfile),
self.wget.safeoutfile)
self.wget.fileName = self.fileName
return client.HTTPDownloader.pageEnd(self)
``` |
{
"source": "johnfrancisgit/GoogleFiber_speedtest_logger",
"score": 2
} |
#### File: GoogleFiber_speedtest_logger/gfiberspeedtest/__init__.py
```python
from .speedtest_cli import (init_virtual_display, init_driver, scrape)
def run():
display = init_virtual_display()
driver = init_driver()
results = scrape(driver)
driver.quit()
display.stop()
return results
def cli():
print("Running speedtest")
result = run()
result_str = "Download speed: {}Mbps \nUpload speed: {}Mbps \nPing: {}ms"
print(result_str.format(result["download"], result["upload"],
result["ping"]))
``` |
{
"source": "johnfraney/django-bootstrap-customizer",
"score": 2
} |
#### File: django-bootstrap-customizer/bootstrap_customizer/middleware.py
```python
from django.utils.deprecation import MiddlewareMixin
from bootstrap_customizer.models import BootstrapTheme
class BootstrapThemeMiddleware(MiddlewareMixin):
"""
Middleware that sets `bootstrap_theme_updated` attribute to request object.
"""
def process_request(self, request):
theme = BootstrapTheme.objects.filter(sitebootstraptheme__site=request.site).first()
if theme:
request.bootstrap_theme = theme
``` |
{
"source": "johnfraney/django-ner-trainer",
"score": 2
} |
#### File: django-ner-trainer/tests/test_conf.py
```python
from django.test import TestCase
from ner_trainer.conf import settings, DEFAULTS
class SettingsTests(TestCase):
def test_default_settings(self):
for setting_name, default_value in DEFAULTS.items():
self.assertEqual(getattr(settings, setting_name), default_value)
def test_nonexistant_setting(self):
with self.assertRaises(AttributeError):
settings.BANANA
``` |
{
"source": "johnfraney/flake8-markdown",
"score": 2
} |
#### File: flake8-markdown/flake8_markdown/__init__.py
```python
import argparse
import glob
import re
import subprocess
import sys
from concurrent.futures import ThreadPoolExecutor
from flake8_markdown.constants import SUBPROCESS_ARGS
__version__ = '0.2.0'
def non_matching_lookahead(pattern):
return r'(?={})'.format(pattern)
def matching_group(pattern):
return r'({})'.format(pattern)
def non_matching_group(pattern):
return r'(?:{})'.format(pattern)
def strip_repl_characters(code):
"""Removes the first four characters from each REPL-style line.
>>> strip_repl_characters('>>> "banana"') == '"banana"'
True
>>> strip_repl_characters('... banana') == 'banana'
True
"""
stripped_lines = []
for line in code.splitlines():
if line.startswith('>>> ') or line.startswith('... '):
stripped_lines.append(line[4:])
else:
stripped_lines.append(line)
return '\n'.join(stripped_lines)
ONE_OR_MORE_LINES_NOT_GREEDY = r'(?:.*\n)+?'
regex_rule = ''.join([
# Use non-matching group instead of a lookbehind because the code
# block may have line highlighting hints. See:
# https://python-markdown.github.io/extensions/fenced_code_blocks/#emphasized-lines
non_matching_group('^```(python|pycon|py).*$'),
matching_group(ONE_OR_MORE_LINES_NOT_GREEDY),
non_matching_lookahead('^```')
])
regex = re.compile(regex_rule, re.MULTILINE)
def lint_markdown_file(markdown_file_path):
linting_errors = []
markdown_content = open(markdown_file_path, 'r').read()
code_block_start_lines = []
for line_no, line in enumerate(markdown_content.splitlines(), start=1):
# Match python and pycon
if line.startswith('```py'):
code_block_start_lines.append(line_no)
code_block_matches = regex.findall(markdown_content)
for match_number, code_block_match in enumerate(code_block_matches):
code_block_type = code_block_match[0]
match_text = code_block_match[1]
# pycon lines start with ">>> " or "... ", so strip those characters
if code_block_type == 'pycon':
match_text = strip_repl_characters(match_text)
match_text = match_text.lstrip()
flake8_process = subprocess.run(
['flake8', '-'],
input=match_text,
**SUBPROCESS_ARGS,
)
flake8_output = flake8_process.stdout
flake8_output = flake8_output.strip()
# Skip empty lines
if not flake8_output:
continue
flake8_output_split = flake8_output.split(':')
line_number = int(flake8_output_split[1])
column_number = int(flake8_output_split[2])
markdown_line_number = (
line_number + code_block_start_lines[match_number]
)
if code_block_type == 'pycon':
match_lines = match_text.splitlines()
line = match_lines[line_number - 1]
if any([
line.startswith('>>> '),
line.startswith('... '),
]):
flake8_output_split[2] = column_number + 4
# Replace reference to stdin line number with file line number
flake8_output = re.sub(
r'stdin:[0-9]+',
'{}:{}'.format(markdown_file_path, markdown_line_number),
flake8_output
)
linting_errors.append(flake8_output)
if linting_errors:
linting_error_output = '\n'.join(linting_errors)
print(linting_error_output)
return False
return True
def lint_markdown_glob(markdown_glob):
files = glob.iglob(markdown_glob, recursive=True)
passing = True
with ThreadPoolExecutor() as executor:
results = executor.map(lint_markdown_file, files)
for result in results:
if result is False:
passing = False
return passing
def main(argv=None):
parser = argparse.ArgumentParser(description='Markdown globs')
parser.add_argument(
'globs',
metavar='glob',
type=str,
nargs='+',
help='a glob of Markdown files to lint',
)
args = parser.parse_args(argv)
markdown_globs = args.globs
passing = True
with ThreadPoolExecutor() as executor:
results = executor.map(lint_markdown_glob, markdown_globs)
for result in results:
if result is False:
passing = False
if not passing:
sys.exit(1)
sys.exit(0)
``` |
{
"source": "johnfredcee/FbxPipeline",
"score": 2
} |
#### File: generated/apemodefb/MaterialPropFb.py
```python
import flatbuffers
class MaterialPropFb(object):
__slots__ = ['_tab']
# MaterialPropFb
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# MaterialPropFb
def NameId(self): return self._tab.Get(flatbuffers.number_types.Uint32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(0))
# MaterialPropFb
def ValueId(self): return self._tab.Get(flatbuffers.number_types.Uint32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(4))
def CreateMaterialPropFb(builder, nameId, valueId):
builder.Prep(4, 8)
builder.PrependUint32(valueId)
builder.PrependUint32(nameId)
return builder.Offset()
```
#### File: generated/apemodefb/PackedSkinnedVertexFb.py
```python
import flatbuffers
class PackedSkinnedVertexFb(object):
__slots__ = ['_tab']
# PackedSkinnedVertexFb
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# PackedSkinnedVertexFb
def Position(self): return self._tab.Get(flatbuffers.number_types.Uint32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(0))
# PackedSkinnedVertexFb
def Normal(self): return self._tab.Get(flatbuffers.number_types.Uint32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(4))
# PackedSkinnedVertexFb
def Tangent(self): return self._tab.Get(flatbuffers.number_types.Uint32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(8))
# PackedSkinnedVertexFb
def Uv(self): return self._tab.Get(flatbuffers.number_types.Uint32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(12))
# PackedSkinnedVertexFb
def Weights(self): return self._tab.Get(flatbuffers.number_types.Uint32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(16))
# PackedSkinnedVertexFb
def Indices(self): return self._tab.Get(flatbuffers.number_types.Uint32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(20))
def CreatePackedSkinnedVertexFb(builder, position, normal, tangent, uv, weights, indices):
builder.Prep(4, 24)
builder.PrependUint32(indices)
builder.PrependUint32(weights)
builder.PrependUint32(uv)
builder.PrependUint32(tangent)
builder.PrependUint32(normal)
builder.PrependUint32(position)
return builder.Offset()
```
#### File: generated/apemodefb/SceneFb.py
```python
import flatbuffers
class SceneFb(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsSceneFb(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = SceneFb()
x.Init(buf, n + offset)
return x
# SceneFb
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# SceneFb
def Version(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
# SceneFb
def Transforms(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 144
from .TransformFb import TransformFb
obj = TransformFb()
obj.Init(self._tab.Bytes, x)
return obj
return None
# SceneFb
def TransformsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.VectorLen(o)
return 0
# SceneFb
def Nodes(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from .NodeFb import NodeFb
obj = NodeFb()
obj.Init(self._tab.Bytes, x)
return obj
return None
# SceneFb
def NodesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.VectorLen(o)
return 0
# SceneFb
def Meshes(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from .MeshFb import MeshFb
obj = MeshFb()
obj.Init(self._tab.Bytes, x)
return obj
return None
# SceneFb
def MeshesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.VectorLen(o)
return 0
# SceneFb
def AnimStacks(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 8
from .AnimStackFb import AnimStackFb
obj = AnimStackFb()
obj.Init(self._tab.Bytes, x)
return obj
return None
# SceneFb
def AnimStacksLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.VectorLen(o)
return 0
# SceneFb
def AnimLayers(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 12
from .AnimLayerFb import AnimLayerFb
obj = AnimLayerFb()
obj.Init(self._tab.Bytes, x)
return obj
return None
# SceneFb
def AnimLayersLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.VectorLen(o)
return 0
# SceneFb
def AnimCurves(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from .AnimCurveFb import AnimCurveFb
obj = AnimCurveFb()
obj.Init(self._tab.Bytes, x)
return obj
return None
# SceneFb
def AnimCurvesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return self._tab.VectorLen(o)
return 0
# SceneFb
def Materials(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from .MaterialFb import MaterialFb
obj = MaterialFb()
obj.Init(self._tab.Bytes, x)
return obj
return None
# SceneFb
def MaterialsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return self._tab.VectorLen(o)
return 0
# SceneFb
def Textures(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 72
from .TextureFb import TextureFb
obj = TextureFb()
obj.Init(self._tab.Bytes, x)
return obj
return None
# SceneFb
def TexturesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
if o != 0:
return self._tab.VectorLen(o)
return 0
# SceneFb
def Cameras(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 28
from .CameraFb import CameraFb
obj = CameraFb()
obj.Init(self._tab.Bytes, x)
return obj
return None
# SceneFb
def CamerasLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
if o != 0:
return self._tab.VectorLen(o)
return 0
# SceneFb
def Lights(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 64
from .LightFb import LightFb
obj = LightFb()
obj.Init(self._tab.Bytes, x)
return obj
return None
# SceneFb
def LightsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
if o != 0:
return self._tab.VectorLen(o)
return 0
# SceneFb
def Skins(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from .SkinFb import SkinFb
obj = SkinFb()
obj.Init(self._tab.Bytes, x)
return obj
return None
# SceneFb
def SkinsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26))
if o != 0:
return self._tab.VectorLen(o)
return 0
# SceneFb
def Files(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(28))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from .FileFb import FileFb
obj = FileFb()
obj.Init(self._tab.Bytes, x)
return obj
return None
# SceneFb
def FilesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(28))
if o != 0:
return self._tab.VectorLen(o)
return 0
# SceneFb
def BoolValues(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(30))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.BoolFlags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# SceneFb
def BoolValuesAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(30))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.BoolFlags, o)
return 0
# SceneFb
def BoolValuesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(30))
if o != 0:
return self._tab.VectorLen(o)
return 0
# SceneFb
def IntValues(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(32))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# SceneFb
def IntValuesAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(32))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
return 0
# SceneFb
def IntValuesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(32))
if o != 0:
return self._tab.VectorLen(o)
return 0
# SceneFb
def FloatValues(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(34))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# SceneFb
def FloatValuesAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(34))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o)
return 0
# SceneFb
def FloatValuesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(34))
if o != 0:
return self._tab.VectorLen(o)
return 0
# SceneFb
def StringValues(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(36))
if o != 0:
a = self._tab.Vector(o)
return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return ""
# SceneFb
def StringValuesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(36))
if o != 0:
return self._tab.VectorLen(o)
return 0
def SceneFbStart(builder): builder.StartObject(17)
def SceneFbAddVersion(builder, version): builder.PrependUint8Slot(0, version, 0)
def SceneFbAddTransforms(builder, transforms): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(transforms), 0)
def SceneFbStartTransformsVector(builder, numElems): return builder.StartVector(144, numElems, 4)
def SceneFbAddNodes(builder, nodes): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(nodes), 0)
def SceneFbStartNodesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def SceneFbAddMeshes(builder, meshes): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(meshes), 0)
def SceneFbStartMeshesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def SceneFbAddAnimStacks(builder, animStacks): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(animStacks), 0)
def SceneFbStartAnimStacksVector(builder, numElems): return builder.StartVector(8, numElems, 4)
def SceneFbAddAnimLayers(builder, animLayers): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(animLayers), 0)
def SceneFbStartAnimLayersVector(builder, numElems): return builder.StartVector(12, numElems, 4)
def SceneFbAddAnimCurves(builder, animCurves): builder.PrependUOffsetTRelativeSlot(6, flatbuffers.number_types.UOffsetTFlags.py_type(animCurves), 0)
def SceneFbStartAnimCurvesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def SceneFbAddMaterials(builder, materials): builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(materials), 0)
def SceneFbStartMaterialsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def SceneFbAddTextures(builder, textures): builder.PrependUOffsetTRelativeSlot(8, flatbuffers.number_types.UOffsetTFlags.py_type(textures), 0)
def SceneFbStartTexturesVector(builder, numElems): return builder.StartVector(72, numElems, 4)
def SceneFbAddCameras(builder, cameras): builder.PrependUOffsetTRelativeSlot(9, flatbuffers.number_types.UOffsetTFlags.py_type(cameras), 0)
def SceneFbStartCamerasVector(builder, numElems): return builder.StartVector(28, numElems, 4)
def SceneFbAddLights(builder, lights): builder.PrependUOffsetTRelativeSlot(10, flatbuffers.number_types.UOffsetTFlags.py_type(lights), 0)
def SceneFbStartLightsVector(builder, numElems): return builder.StartVector(64, numElems, 4)
def SceneFbAddSkins(builder, skins): builder.PrependUOffsetTRelativeSlot(11, flatbuffers.number_types.UOffsetTFlags.py_type(skins), 0)
def SceneFbStartSkinsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def SceneFbAddFiles(builder, files): builder.PrependUOffsetTRelativeSlot(12, flatbuffers.number_types.UOffsetTFlags.py_type(files), 0)
def SceneFbStartFilesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def SceneFbAddBoolValues(builder, boolValues): builder.PrependUOffsetTRelativeSlot(13, flatbuffers.number_types.UOffsetTFlags.py_type(boolValues), 0)
def SceneFbStartBoolValuesVector(builder, numElems): return builder.StartVector(1, numElems, 1)
def SceneFbAddIntValues(builder, intValues): builder.PrependUOffsetTRelativeSlot(14, flatbuffers.number_types.UOffsetTFlags.py_type(intValues), 0)
def SceneFbStartIntValuesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def SceneFbAddFloatValues(builder, floatValues): builder.PrependUOffsetTRelativeSlot(15, flatbuffers.number_types.UOffsetTFlags.py_type(floatValues), 0)
def SceneFbStartFloatValuesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def SceneFbAddStringValues(builder, stringValues): builder.PrependUOffsetTRelativeSlot(16, flatbuffers.number_types.UOffsetTFlags.py_type(stringValues), 0)
def SceneFbStartStringValuesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def SceneFbEnd(builder): return builder.EndObject()
```
#### File: generated/apemodefb/SubsetFb.py
```python
import flatbuffers
class SubsetFb(object):
__slots__ = ['_tab']
# SubsetFb
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# SubsetFb
def MaterialId(self): return self._tab.Get(flatbuffers.number_types.Uint32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(0))
# SubsetFb
def BaseIndex(self): return self._tab.Get(flatbuffers.number_types.Uint32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(4))
# SubsetFb
def IndexCount(self): return self._tab.Get(flatbuffers.number_types.Uint32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(8))
def CreateSubsetFb(builder, materialId, baseIndex, indexCount):
builder.Prep(4, 12)
builder.PrependUint32(indexCount)
builder.PrependUint32(baseIndex)
builder.PrependUint32(materialId)
return builder.Offset()
```
#### File: generated/apemodefb/TransformFb.py
```python
import flatbuffers
class TransformFb(object):
__slots__ = ['_tab']
# TransformFb
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# TransformFb
def Translation(self, obj):
obj.Init(self._tab.Bytes, self._tab.Pos + 0)
return obj
# TransformFb
def RotationOffset(self, obj):
obj.Init(self._tab.Bytes, self._tab.Pos + 12)
return obj
# TransformFb
def RotationPivot(self, obj):
obj.Init(self._tab.Bytes, self._tab.Pos + 24)
return obj
# TransformFb
def PreRotation(self, obj):
obj.Init(self._tab.Bytes, self._tab.Pos + 36)
return obj
# TransformFb
def PostRotation(self, obj):
obj.Init(self._tab.Bytes, self._tab.Pos + 48)
return obj
# TransformFb
def Rotation(self, obj):
obj.Init(self._tab.Bytes, self._tab.Pos + 60)
return obj
# TransformFb
def ScalingOffset(self, obj):
obj.Init(self._tab.Bytes, self._tab.Pos + 72)
return obj
# TransformFb
def ScalingPivot(self, obj):
obj.Init(self._tab.Bytes, self._tab.Pos + 84)
return obj
# TransformFb
def Scaling(self, obj):
obj.Init(self._tab.Bytes, self._tab.Pos + 96)
return obj
# TransformFb
def GeometricTranslation(self, obj):
obj.Init(self._tab.Bytes, self._tab.Pos + 108)
return obj
# TransformFb
def GeometricRotation(self, obj):
obj.Init(self._tab.Bytes, self._tab.Pos + 120)
return obj
# TransformFb
def GeometricScaling(self, obj):
obj.Init(self._tab.Bytes, self._tab.Pos + 132)
return obj
def CreateTransformFb(builder, translation_x, translation_y, translation_z, rotation_offset_x, rotation_offset_y, rotation_offset_z, rotation_pivot_x, rotation_pivot_y, rotation_pivot_z, pre_rotation_x, pre_rotation_y, pre_rotation_z, post_rotation_x, post_rotation_y, post_rotation_z, rotation_x, rotation_y, rotation_z, scaling_offset_x, scaling_offset_y, scaling_offset_z, scaling_pivot_x, scaling_pivot_y, scaling_pivot_z, scaling_x, scaling_y, scaling_z, geometric_translation_x, geometric_translation_y, geometric_translation_z, geometric_rotation_x, geometric_rotation_y, geometric_rotation_z, geometric_scaling_x, geometric_scaling_y, geometric_scaling_z):
builder.Prep(4, 144)
builder.Prep(4, 12)
builder.PrependFloat32(geometric_scaling_z)
builder.PrependFloat32(geometric_scaling_y)
builder.PrependFloat32(geometric_scaling_x)
builder.Prep(4, 12)
builder.PrependFloat32(geometric_rotation_z)
builder.PrependFloat32(geometric_rotation_y)
builder.PrependFloat32(geometric_rotation_x)
builder.Prep(4, 12)
builder.PrependFloat32(geometric_translation_z)
builder.PrependFloat32(geometric_translation_y)
builder.PrependFloat32(geometric_translation_x)
builder.Prep(4, 12)
builder.PrependFloat32(scaling_z)
builder.PrependFloat32(scaling_y)
builder.PrependFloat32(scaling_x)
builder.Prep(4, 12)
builder.PrependFloat32(scaling_pivot_z)
builder.PrependFloat32(scaling_pivot_y)
builder.PrependFloat32(scaling_pivot_x)
builder.Prep(4, 12)
builder.PrependFloat32(scaling_offset_z)
builder.PrependFloat32(scaling_offset_y)
builder.PrependFloat32(scaling_offset_x)
builder.Prep(4, 12)
builder.PrependFloat32(rotation_z)
builder.PrependFloat32(rotation_y)
builder.PrependFloat32(rotation_x)
builder.Prep(4, 12)
builder.PrependFloat32(post_rotation_z)
builder.PrependFloat32(post_rotation_y)
builder.PrependFloat32(post_rotation_x)
builder.Prep(4, 12)
builder.PrependFloat32(pre_rotation_z)
builder.PrependFloat32(pre_rotation_y)
builder.PrependFloat32(pre_rotation_x)
builder.Prep(4, 12)
builder.PrependFloat32(rotation_pivot_z)
builder.PrependFloat32(rotation_pivot_y)
builder.PrependFloat32(rotation_pivot_x)
builder.Prep(4, 12)
builder.PrependFloat32(rotation_offset_z)
builder.PrependFloat32(rotation_offset_y)
builder.PrependFloat32(rotation_offset_x)
builder.Prep(4, 12)
builder.PrependFloat32(translation_z)
builder.PrependFloat32(translation_y)
builder.PrependFloat32(translation_x)
return builder.Offset()
``` |
{
"source": "johnfredcee/tagbank",
"score": 3
} |
#### File: johnfredcee/tagbank/tagdb.py
```python
import sys
import getopt
import os
import os.path
import subprocess
import configparser
import cmd
import json
import ctags
import sqlite3
ConfigFile = "tagbank.conf"
def is_one_of(file_name, filemasks):
return "*"+os.path.splitext(file_name)[1] in filemasks
def create_index(tablename, projectdirs, filetypes):
conn = sqlite3.connect("projects.db")
cursor = conn.cursor()
# remove the table if it already exists - we are going to regenerate it
drop_cmd = "DROP TABLE IF EXISTS " + tablename + ";"
cursor.execute(drop_cmd)
# recreate table
create_cmd = "CREATE TABLE " + tablename + "(path TEXT, pathtype INTEGER);"
cursor.execute(create_cmd)
for d in projectdirs:
fullname = d
insert_cmd = "INSERT INTO " + tablename + "(path, pathtype) VALUES(\"%s\", 1);" % fullname
cursor.execute(insert_cmd)
for root, dirs, files in os.walk(d):
# directories
for name in dirs:
fullname = os.path.join(root,name)
insert_cmd = "INSERT INTO " + tablename + "(path, pathtype) VALUES(\"%s\", 1);" % fullname
cursor.execute(insert_cmd)
# files
for name in files:
fullname = os.path.join(root, name)
if is_one_of(fullname, filetypes):
insert_cmd = "INSERT INTO " + tablename + "(path, pathtype) VALUES(\"%s\", 0);" % fullname
cursor.execute(insert_cmd)
conn.commit()
cursor.close()
def parse_tag_line(line):
if line[0] == '!':
return None # not a tag line
fields = line.split('\t')
tag = fields[0]
path = fields[1]
address = fields[2]
fields = fields[3:]
tagfields = {}
for field in fields:
if ':' in field:
(name, value) = field.split(":")
tagfields[name] = value
else:
tagfields["kind"] = field
return ( tag, path, address, tagfields )
def memberof(tagfields):
fields = [ "class", "struct", "union", "enum", "function" ]
for field in fields:
if field in tagfields:
return tagfields[field]
else:
return None
# # todo needs to run in the root directory of the project
# def invoke_ctags(prog, flags, project):
# kinds = [ "c", "d", "e", "f", "F", "g", "m", "p", "s", "t", "u", "v" ]
# conn = sqlite3.connect("projects")
# cur = conn.cursor()
# select_cmd = "SELECT path FROM " + project + " WHERE pathtype=0;"
# cur.execute(select_cmd)
# conn.commit()
# taggedfiles = cur.fetchall()
# cur.close()
# conn = sqlite3.connect("%s_TAGS" % project)
# create_cmd = "CREATE TABLE TAGS (tag TEXT, address TEXT, path TEXT, kind INTEGER, memberof TEXT);"
# flags = "--fields=+knStmi --extra=+q --c++-kinds=cdefgmnpstuvx --c-kinds=cdefglmnpstuvx --extra=+q --filter=yes --format=2"
# args = [ prog ] + flags
# taglines = []
# for taggedfile in taggedfiles:
# try:
# ctags = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
# (ctagsout, ctaggserr) = ctags.communicate(input = taggedfile)
# for line in ctagsout:
# taginfo = parse_tag_line(line)
# if taginfo:
# insert_cmd =
# except OSError:
# print "Command %s failed." % args
# for line in ctags.stdout:
# taglines.append(line)
# for taggedfile in taggedfiles:
# try:
# tags = subprocess.check_output(args)
# except subprocess.CalledProcessError:
# tags = None
#flags=--verbose;-e;--options=u4tags.conf;--c++-kinds=cfnstunedm;--c-kinds=cfnstunedm;--extra=+q
projects = {}
class TagDBCmd(cmd.Cmd):
def do_add(self, arg):
args = arg.split()
if len(args) >= 2:
if args[0] == "project":
projectname = args[1]
if not projectname in projects:
project = {"directories" : [], "filetypes" : [], "flags" : ["--verbose", "-e", "--extra=+q"]}
for arg in args[2:]:
if arg[0] == '*':
project["filetypes"].append(arg)
elif arg[0] == '-':
project["flags"].append(arg)
else:
project["directories"].append(arg)
projects[projectname] = project
print("Added project %s." % projectname)
else:
print("%s already exists as project." % projectname)
else:
print("Not enough arguments")
return
def do_rm(self, arg):
args = arg.split()
if len(args) >= 2:
if args[0] == "project":
projectname = args[1]
if projectname in projects:
del projects[projectname]
print("%s removed from projects." % projectname)
else:
print("%s unrecognized as project." % projectname)
else:
print("Not enough arguments")
return
def do_show(self, arg):
args = arg.split()
if len(args) >= 2:
if args[0] == "project":
projectname = args[1]
if projectname in projects:
project = projects[projectname]
print("Directories")
print("===========")
for pdir in project["directories"]:
print(pdir)
print("Filetypes:",end = '')
for ftype in project["filetypes"]:
print(ftype, end =':')
print(' ')
print("Flags:", end='')
for pflag in project["flags"]:
print(pflag, end =':')
print(" ")
else:
print("%s unrecognized as project." % projectname)
return
def do_save(self, line):
confp = open("tagdb.conf", "w")
json.dump(projects, confp)
confp.close()
def do_EOF(self,line):
confp = open("tagdb.conf", "w")
json.dump(projects, confp)
confp.close()
return True
# todo -- need an update mode for a single project
if __name__ == "__main__":
confp = open("tagdb.conf")
projects = json.load(confp)
confp.close()
processor = TagDBCmd()
config = configparser.ConfigParser()
config.read(ConfigFile)
ctags_exe = config.get("Global", "ctags")
processor.cmdloop()
# for section in config.sections():
# if (section == "Global"):
# continue
# tagpaths = config.get(section, "tagpaths").split(';')
# wildcards = config.get(section, "wildcards").split(';')
# flags = config.get(section, "flags").split(';')
# create_index(section, tagpaths, wildcards)
# #invoke_ctags(ctags_exe, flags + [ "-o" ] + [ section + ".TAGS" ] + [ "-L"] + [ index_filename(section) ])
# sys.exit(0)
# C
# c classes
# d macro definitions
# e enumerators (values inside an enumeration)
# f function definitions
# g enumeration names
# l local variables [off]
# m class, struct, and union members
# n namespaces
# p function prototypes [off]
# s structure names
# t typedefs
# u union names
# v variable definitions
# x external and forward variable declarations [off]
# C++
# c classes
# d macro definitions
# e enumerators (values inside an enumeration)
# f function definitions
# g enumeration names
# l local variables [off]
# m class, struct, and union members
# n namespaces
# p function prototypes [off]
# s structure names
# t typedefs
# u union names
# v variable definitions
# x external and forward variable declarations [off]
```
#### File: johnfredcee/tagbank/tagwatch.py
```python
import sys
import ctags
import getopt
import os
import os.path
import subprocess
import configparser
import sqlite3
import time
import logging
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
ConfigFile = "tagbank.conf"
class TagHandler(FileSystemEventHandler):
def on_modified(self, event):
print("Got it!")
print("Event type %s path %s." % ( event.event_type, event.src_path))
def watch_section(conn, cursor, section):
# fetch directories
query = "SELECT * FROM " + section + " WHERE pathtype=1;"
cursor.execute(query)
dirnames = cursor.fetchall()
# fetch files
query = "SELECT * FROM " + section + " WHERE pathtype=0;"
cursor.execute(query)
filenames = cursor.fetchall()
for dirname, _ in dirnames:
event_handler = TagHandler()
observer = Observer()
observer.schedule(event_handler, dirname, recursive=False)
return observer
if __name__ == "__main__":
conn = sqlite3.connect("projects.db")
cursor = conn.cursor()
config = configparser.ConfigParser()
config.read(ConfigFile)
observers = []
for section in config.sections():
if (section == "Global"):
continue
observers += [ watch_section(conn, cursor, section) ]
conn.commit()
cursor.close()
for o in observers:
o.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
for o in observers:
o.stop()
``` |
{
"source": "johnfred-delossantos/OOP-1-2",
"score": 4
} |
#### File: johnfred-delossantos/OOP-1-2/Midterm Exam_2.py
```python
from tkinter import *
window = Tk()
window.title("Special Midterm Exam in OOP")
window.geometry("400x300+20+10")
i = 1
def change_color():
global i
i += 1
if i % 2 == 0:
btn.configure(bg="yellow")
else:
btn.configure(bg="white")
btn = Button(window, text="Click to Change color", bg="white", command=change_color)
btn.place(relx=.5, y=150, anchor="center")
window.mainloop()
``` |
{
"source": "JohnFredwick/DSACancellationChecker-Test",
"score": 3
} |
#### File: JohnFredwick/DSACancellationChecker-Test/DSACheckerClasses.py
```python
import urllib.error
import urllib.error
import urllib.parse
import urllib.parse
import urllib.request
import urllib.request
from bs4 import BeautifulSoup
class Page:
fields = {}
url = None
connection = None
html = None # BeautifulSoup object
cookieJar = None
opener = None
response = None
def __init__(self, url, cj):
self.url = url
self.cookieJar = cj
def connect(self, agent):
print("---> Connecting to %s" % (self.url,))
self.opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(self.cookieJar))
self.opener.addheaders.append(('User-agent', agent))
if self.fields:
data = urllib.parse.urlencode(self.fields)
binary_data = data.encode('ascii')
self.response = self.opener.open(self.url, binary_data)
print("-----> Sending data:")
for c in list(self.fields.keys()):
print("-------> %s = %s" % (c, self.fields[c][:20]))
else:
self.response = self.opener.open(self.url)
self.html = BeautifulSoup(self.response.read(), "html.parser")
# save the pages for diagnostic info
# save = open(re.sub(r'\W+', '', self.html.title.string) + '.html', 'w')
# save.write(str(self.html))
# save.close()
```
#### File: JohnFredwick/DSACancellationChecker-Test/DSAChecker.py
```python
from datetime import datetime
import http.cookiejar
import time
import random
from DSACheckerClasses import Page
##################################################################
# #
# Update the following variables with your own personal details #
# in info.py #
# #
##################################################################
from info import licenceNumber, theoryNumber, myTestDateString
# Email sending details
from info import emailAddresses, emailUsername, emailPassword
from find_cancellations_selenium import open_web
emailSubject = "DSA Cancellations"
emailFrom = "<EMAIL>"
# Change this (at your own risk) if you don't use gmail (e.g. to hotmail/yahoo/etc smtp servers
emailSMTPserver = 'smtp.gmail.com'
##################################################################
# #
# DO NOT MODIFY ANYTHING BELOW THIS LINE #
# #
##################################################################
myTestDate = datetime.strptime(myTestDateString, '%A %d %B %Y %I:%M%p')
# time to wait between each page request (set to a reasonable number
# to avoid hammering DSA's servers)
pauseTime = 5
cookieJar = http.cookiejar.CookieJar()
#control the number of appointments shown
max_shownum = 10 #
#choose the action when find an available datetime
#0: send an email
#1: open web directly
action_choosen = 1
def isBeforeMyTest(dt):
if dt <= myTestDate:
return True
else:
return False
def sendEmail(datetimeList):
# i should probably point out i pinched this from stackoverflow or something
SMTPserver = emailSMTPserver
sender = emailFrom
destination = emailAddresses
USERNAME = emailUsername
PASSWORD = <PASSWORD>
# typical values for text_subtype are plain, html, xml
text_subtype = 'plain'
content = "Available DSA test slots at your selected test centre:\n\n"
for dt in datetimeList:
content += "* %s\n" % dt.strftime('%A %d %b %Y at %H:%M')
content += "\nChecked at [%s]\n\n" % time.strftime('%d-%m-%Y @ %H:%M')
subject = emailSubject
import sys
from smtplib import SMTP as SMTP # this invokes the secure SMTP protocol (port 465, uses SSL)
# from smtplib import SMTP # use this for standard SMTP protocol (port 25, no encryption)
from email.mime.text import MIMEText
try:
msg = MIMEText(content, text_subtype)
msg['Subject'] = subject
msg['From'] = sender # some SMTP servers will do this automatically, not all
conn = SMTP(SMTPserver, 587)
conn.set_debuglevel(False)
conn.ehlo()
conn.starttls() # Use TLS
conn.login(USERNAME, PASSWORD)
try:
conn.sendmail(sender, destination, msg.as_string())
finally:
conn.close()
except Exception as exc:
sys.exit("mail failed; %s" % str(exc)) # give a error message
soonerDates = []
baseWaitTime = 600
userAgents = [
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/602.4.8 (KHTML, like Gecko) Version/10.0.3 Safari/602.4.8',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
]
def performUpdate():
global baseWaitTime
global userAgents
global soonerDates
global max_shownum
global action_choosen
# this should point at the DSA login page
launchPage = 'https://driverpracticaltest.dvsa.gov.uk/login'
print('[%s]' % (time.strftime('%Y-%m-%d @ %H:%M'),))
print('---> Starting update...')
# use a random agent for each run through
agent = userAgents[random.randint(0, len(userAgents) - 1)]
print("---> Using agent " + agent)
launcher = Page(launchPage, cookieJar)
launcher.connect(agent)
launcher.fields['username'] = licenceNumber
launcher.fields['password'] = <PASSWORD>
# check to see if captcha
captcha = launcher.html.find('div', id='recaptcha-check')
if captcha:
# server is suspicious, back off a bit!
baseWaitTime *= 2
print('Captcha was present, increased baseline wait time to ' + str(baseWaitTime/60) + ' minutes')
# TODO: implement something to solve these or prompt you for them
return
print('')
time.sleep(pauseTime)
launcher.connect(agent)
if captcha:
print(launcher.html.find("Enter details below to access your booking"))
dateChangeURL = launcher.html.find(id="date-time-change").get('href')
# example URL: href="/manage?execution=e1s1&csrftoken=<PASSWORD>&_<PASSWORD>=<PASSWORD>"
# i am probably screwing up the POST bit on the forms
dateChangeURL = 'https://driverpracticaltest.dvsa.gov.uk' + dateChangeURL
slotPickingPage = Page(dateChangeURL, cookieJar)
slotPickingPage.fields = launcher.fields
slotPickingPage.connect(agent)
e1s2URL = slotPickingPage.html.form.get('action')
e1s2URL = 'https://driverpracticaltest.dvsa.gov.uk' + e1s2URL
datePickerPage = Page(e1s2URL, cookieJar)
datePickerPage.fields['testChoice'] = 'ASAP'
datePickerPage.fields['drivingLicenceSubmit'] = 'Continue'
datePickerPage.fields['csrftoken'] = dateChangeURL.split('=')[3]
datePickerPage.connect(agent)
# earliest available date
availableDates = []
for slot in datePickerPage.html.find_all(class_='SlotPicker-slot'):
try:
availableDates.append(datetime.strptime(slot['data-datetime-label'].strip(), '%A %d %B %Y %I:%M%p'))
except Exception as ex:
print("".join(traceback.format_exception(etype=type(ex), value=ex, tb=ex.__traceback__)))
print ('---> Available slots:')
newSoonerDates = []
#control the number of appointments shown
shownum = 0
for dt in availableDates:
# only show / send new appointments
if isBeforeMyTest(dt) and (dt not in soonerDates):
print ('-----> [CANCELLATION] %s' % (dt.strftime('%A %d %b %Y at %H:%M'),))
soonerDates.append(dt)
newSoonerDates.append(dt)
else:
shownum += 1
#control the number of appointments shown
if shownum < max_shownum:
print ('-----> %s' % (dt.strftime('%A %d %b %Y at %H:%M'),))
if len(newSoonerDates):
if action_choosen == 1:
open_web()
elif action_choosen == 0:
print('---> Sending to ' + ', '.join(emailAddresses))
sendEmail(newSoonerDates)
if baseWaitTime > 300:
# decrease the baseline wait time as this was a success
baseWaitTime = int(baseWaitTime / 2)
while True:
print('***************************************')
performUpdate()
# wait for baseline + random time so its less robotic
sleepTime = baseWaitTime + random.randint(60, 300)
print('---> Waiting for ' + str(sleepTime / 60) + ' minutes...')
time.sleep(int(sleepTime))
``` |
{
"source": "johnfrenchxyz/bluebutton-web-server",
"score": 2
} |
#### File: accounts/tests/test_create_account.py
```python
from django.test import TestCase
from django.test.client import Client
from django.contrib.auth.models import Group
from django.urls import reverse
from django.contrib.auth import get_user_model
from apps.accounts.models import UserProfile, UserIdentificationLabel
from apps.fhir.bluebutton.models import Crosswalk
from django.conf import settings
from waffle.testutils import override_switch
class CreateDeveloperAccountTestCase(TestCase):
"""
Test Developer Account Creation
"""
fixtures = ['testfixture']
@override_switch('signup', active=True)
def setUp(self):
Group.objects.create(name='BlueButton')
self.client = Client()
self.url = reverse('accounts_create_account')
# Create user self identification choices
UserIdentificationLabel.objects.get_or_create(name="Self Identification #1",
slug="ident1",
weight=1)
UserIdentificationLabel.objects.get_or_create(name="Self Identification #2",
slug="ident2",
weight=2)
@override_switch('signup', active=True)
@override_switch('login', active=True)
def test_valid_account_create(self):
"""
Create an Account Valid
"""
ident_choice = UserIdentificationLabel.objects.get(slug="ident2")
form_data = {
'email': '<EMAIL>',
'organization_name': 'transhealth',
'password1': '<PASSWORD>',
'password2': '<PASSWORD>',
'first_name': 'BamBam',
'last_name': 'Rubble',
'identification_choice': str(ident_choice.pk),
}
response = self.client.post(self.url, form_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Please check your email')
# verify username is lowercase
User = get_user_model()
u = User.objects.get(email="<EMAIL>")
self.assertEqual(u.username, "<EMAIL>")
self.assertEqual(u.email, "<EMAIL>")
# Ensure developer account has a sample FHIR id crosswalk entry.
self.assertEqual(Crosswalk.objects.filter(user=u,
fhir_id=settings.DEFAULT_SAMPLE_FHIR_ID).exists(), True)
# verify user has identification label chosen
exist = User.objects.filter(useridentificationlabel__users=u).filter(useridentificationlabel__slug='ident2').exists()
self.assertEqual(exist, True)
@override_switch('signup', active=False)
@override_switch('login', active=True)
def test_valid_account_create_flag_off(self):
"""
Create an Account Valid
"""
ident_choice = UserIdentificationLabel.objects.get(slug="ident2")
form_data = {
'email': '<EMAIL>',
'organization_name': 'transhealth',
'password1': '<PASSWORD>',
'password2': '<PASSWORD>',
'first_name': 'BamBam',
'last_name': 'Rubble',
'identification_choice': str(ident_choice.pk),
}
response = self.client.post(self.url, form_data, follow=True)
self.assertEqual(response.status_code, 404)
@override_switch('signup', active=True)
def test_account_create_shold_fail_when_password_too_short(self):
"""
Create account should fail if password is too short
"""
ident_choice = UserIdentificationLabel.objects.get(slug="ident2")
form_data = {
'invitation_code': '1234',
'username': 'fred2',
'organization_name': 'transhealth',
'password1': 'p',
'password2': 'p',
'first_name': 'Fred',
'last_name': 'Flinstone',
'identification_choice': str(ident_choice.pk),
}
response = self.client.post(self.url, form_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'too short')
@override_switch('signup', active=True)
def test_account_create_shold_fail_when_password_too_common(self):
"""
Create account should fail if password is too common
"""
ident_choice = UserIdentificationLabel.objects.get(slug="ident2")
form_data = {
'invitation_code': '1234',
'username': 'fred',
'organization_name': 'transhealth',
'password1': 'password',
'password2': 'password',
'first_name': 'Fred',
'last_name': 'Flinstone',
'identification_choice': str(ident_choice.pk),
}
response = self.client.post(self.url, form_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'too common')
@override_switch('signup', active=True)
def test_valid_account_create_is_a_developer(self):
"""
Account Created on site is a developer and not a benny
"""
ident_choice = UserIdentificationLabel.objects.get(slug="ident1")
form_data = {
'invitation_code': '1234',
'email': '<EMAIL>',
'organization_name': 'transhealth',
'password1': '<PASSWORD>',
'password2': '<PASSWORD>',
'first_name': 'Hank',
'last_name': 'Flinstone',
'identification_choice': str(ident_choice.pk),
}
self.client.post(self.url, form_data, follow=True)
up = UserProfile.objects.get(user__email='<EMAIL>')
self.assertEqual(up.user_type, 'DEV')
```
#### File: apps/dot_ext/models.py
```python
import sys
import hashlib
import logging
import uuid
from datetime import datetime
from urllib.parse import urlparse
from django.utils.dateparse import parse_duration
from django.urls import reverse
from django.db import models
from django.db.models.signals import post_delete
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth import get_user_model
from django.core.validators import RegexValidator
from apps.capabilities.models import ProtectedCapability
from oauth2_provider.models import (
AbstractApplication,
)
from oauth2_provider.settings import oauth2_settings
from django.conf import settings
from django.template.defaultfilters import truncatechars
from django.core.files.storage import default_storage
logger = logging.getLogger('hhs_server.%s' % __name__)
class Application(AbstractApplication):
scope = models.ManyToManyField(ProtectedCapability)
agree = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
op_tos_uri = models.CharField(default=settings.TOS_URI, blank=True, max_length=512)
op_policy_uri = models.CharField(default="", blank=True, max_length=512)
# client_uri is depreciated but will continued to be referenced until it can be removed safely
client_uri = models.URLField(default="", blank=True, null=True, max_length=512, verbose_name="Client URI",
help_text="This is typically a home/download website for the application. "
"For example, https://www.example.org or http://www.example.org .")
website_uri = models.URLField(default="", blank=True, null=True, max_length=512, verbose_name="Website URI",
help_text="This is typically a home/download website for the application. "
"For example, https://www.example.org or http://www.example.org .")
help_text = _("Multiple redirect URIs can"
" be separated by a space or on"
" a separate line. Read more"
" about implementing redirect"
" URIs in our documentation.")
redirect_uris = models.TextField(help_text=help_text,
blank=True)
logo_uri = models.CharField(
default="", blank=True, max_length=512, verbose_name="Logo URI")
tos_uri = models.CharField(
default="", blank=True, max_length=512, verbose_name="Client's Terms of Service URI")
policy_uri = models.CharField(default="", blank=True, max_length=512, verbose_name="Client's Policy URI",
help_text="This can be a model privacy notice or other policy document.")
software_id = models.CharField(default="", blank=True, max_length=128,
help_text="A unique identifier for an application defined by its creator.")
contacts = models.TextField(default="", blank=True, max_length=512,
verbose_name="Client's Contacts",
help_text="This is typically an email")
support_email = models.EmailField(blank=True, null=True)
# FROM https://stackoverflow.com/questions/19130942/whats-the-best-way-to-store-phone-number-in-django-models
phone_regex = RegexValidator(
regex=r'^\+?1?\d{9,15}$',
message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.")
support_phone_number = models.CharField(
validators=[phone_regex],
max_length=17,
blank=True,
null=True)
description = models.TextField(default="", blank=True, null=True, verbose_name="Application Description",
help_text="This is plain-text up to 1000 characters in length.")
active = models.BooleanField(default=True)
first_active = models.DateTimeField(blank=True, null=True)
last_active = models.DateTimeField(blank=True, null=True)
def scopes(self):
scope_list = []
for s in self.scope.all():
scope_list.append(s.slug)
return " ".join(scope_list).strip()
def is_valid(self, scopes=None):
return self.active and self.allow_scopes(scopes)
def allow_scopes(self, scopes):
"""
Check if the token allows the provided scopes
:param scopes: An iterable containing the scopes to check
"""
if not scopes:
return True
provided_scopes = set(self.scopes().split())
resource_scopes = set(scopes)
return resource_scopes.issubset(provided_scopes)
def get_absolute_url(self):
return reverse('oauth2_provider:detail', args=[str(self.id)])
def get_allowed_schemes(self):
allowed_schemes = []
redirect_uris = self.redirect_uris.strip().split()
for uri in redirect_uris:
scheme = urlparse(uri).scheme
allowed_schemes.append(scheme)
return allowed_schemes
# Save a file to application media storage
def store_media_file(self, file, filename):
uri = None
if file:
if getattr(file, 'name', False):
file_path = "applications/" + hashlib.sha256(str(self.pk).encode('utf-8')).hexdigest() + "/" + filename
if default_storage.exists(file_path):
default_storage.delete(file_path)
default_storage.save(file_path, file)
if default_storage.exists(file_path):
uri = settings.MEDIA_URL + file_path
return uri
class ApplicationLabel(models.Model):
name = models.CharField(max_length=255, unique=True)
slug = models.SlugField(db_index=True, unique=True)
description = models.TextField()
applications = models.ManyToManyField(Application, null=True, blank=True)
@property
def short_description(self):
return truncatechars(self.description, 80)
class ExpiresInManager(models.Manager):
"""
Provide a `set_expires_in` and `get_expires_in` methods that
work as a cache. The key is generated from `client_id` and `user_id`.
"""
@staticmethod
def make_key(client_id, user_id):
"""
Generate a unique key using client_id and user_id args.
"""
arg = '%s_%s' % (client_id, user_id)
# Python 3 - avoid TypeError: Unicode-objects
# must be encoded before hashing
if sys.version_info > (3, 2):
arg = arg.encode('utf-8')
return hashlib.sha256(arg).hexdigest()
def set_expires_in(self, client_id, user_id, expires_in):
"""
Set the expires_in value for the key generated with
client_id and user_id.
"""
key = self.make_key(client_id, user_id)
instance, _ = self.update_or_create(
key=key,
defaults={'expires_in': expires_in})
def get_expires_in(self, client_id, user_id):
"""
Return the expires_in value for the key generated with
client_id and user_id. Returns None when the key is not
found.
"""
key = self.make_key(client_id, user_id)
try:
return self.get(key=key).expires_in
except self.model.DoesNotExist:
return None
class Approval(models.Model):
uuid = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False)
user = models.ForeignKey(
get_user_model(),
on_delete=models.CASCADE)
application = models.ForeignKey(
Application,
null=True,
on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
@property
def expired(self):
return (
self.created_at + parse_duration(
# Default to 600 seconds, 10 min
getattr(settings, 'AUTHORIZATION_EXPIRATION', "600"))).timestamp() < datetime.now().timestamp()
class ArchivedToken(models.Model):
id = models.BigAutoField(primary_key=True)
user = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE, blank=True, null=True, db_constraint=False,
related_name="%(app_label)s_%(class)s"
)
token = models.CharField(max_length=255, unique=True, )
application = models.ForeignKey(
oauth2_settings.APPLICATION_MODEL, on_delete=models.CASCADE, blank=True, null=True, db_constraint=False,
)
expires = models.DateTimeField()
scope = models.TextField(blank=True)
created = models.DateTimeField()
updated = models.DateTimeField()
archived_at = models.DateTimeField(auto_now_add=True)
class ExpiresIn(models.Model):
"""
This model is used to save the expires_in value selected
in the allow form view. Then it can be queried when the token is
issued to the user.
"""
key = models.CharField(max_length=64, unique=True)
expires_in = models.IntegerField()
objects = ExpiresInManager()
def archive_token(sender, instance=None, **kwargs):
tkn = instance
ArchivedToken.objects.get_or_create(
user=tkn.user,
token=tkn.token,
application=tkn.application,
expires=tkn.expires,
scope=tkn.scope,
created=tkn.created,
updated=tkn.updated,
)
post_delete.connect(archive_token, sender='oauth2_provider.AccessToken')
```
#### File: apps/dot_ext/oauth2_backends.py
```python
from oauth2_provider.oauth2_backends import OAuthLibCore
import json
from ..fhir.bluebutton.models import Crosswalk
from oauth2_provider.models import AccessToken
class OAuthLibSMARTonFHIR(OAuthLibCore):
def create_token_response(self, request):
"""
Add items to the access_token response to comply with
SMART on FHIR Authorization
http://docs.smarthealthit.org/authorization/
"""
uri, headers, body, status = super(OAuthLibSMARTonFHIR, self).create_token_response(request)
# cribed from
# https://github.com/evonove/django-oauth-toolkit/blob/2cd1f0dccadb8e74919a059d9b4985f9ecb1d59f/oauth2_provider/views/base.py#L192
if status == 200:
fhir_body = json.loads(body)
token = AccessToken.objects.get(token=fhir_body.get("access_token"))
if Crosswalk.objects.filter(user=token.user).exists():
fhir_body = json.loads(body)
cw = Crosswalk.objects.get(user=token.user)
fhir_body["patient"] = cw.fhir_id
body = json.dumps(fhir_body)
return uri, headers, body, status
```
#### File: apps/logging/signals.py
```python
import logging
from oauth2_provider.signals import app_authorized
from django.db.models.signals import (
post_delete,
)
from apps.fhir.bluebutton.signals import (
pre_fetch,
post_fetch
)
from apps.mymedicare_cb.signals import post_sls
from .serializers import (
Token,
DataAccessGrantSerializer,
FHIRRequest,
FHIRResponse,
SLSResponse,
)
token_logger = logging.getLogger('audit.authorization.token')
sls_logger = logging.getLogger('audit.authorization.sls')
fhir_logger = logging.getLogger('audit.data.fhir')
def handle_app_authorized(sender, request, token, **kwargs):
token_logger.info(Token(token, action="authorized"))
def token_removed(sender, instance=None, **kwargs):
token_logger.info(Token(instance, action="revoked"))
def log_grant_removed(sender, instance=None, **kwargs):
token_logger.info(DataAccessGrantSerializer(instance, action="revoked"))
def fetching_data(sender, request=None, **kwargs):
fhir_logger.info(FHIRRequest(request))
def fetched_data(sender, request=None, response=None, **kwargs):
fhir_logger.info(FHIRResponse(response))
def sls_hook(sender, response=None, **kwargs):
sls_logger.info(SLSResponse(response))
app_authorized.connect(handle_app_authorized)
post_delete.connect(token_removed, sender='oauth2_provider.AccessToken')
post_delete.connect(log_grant_removed, sender='authorization.DataAccessGrant')
pre_fetch.connect(fetching_data)
post_fetch.connect(fetched_data)
post_sls.connect(sls_hook)
```
#### File: bluebutton-web-server/apps/test.py
```python
import json
from django.contrib.auth.models import User, Group
from django.urls import reverse
from django.test import TestCase
from django.utils.text import slugify
from django.conf import settings
from apps.fhir.bluebutton.utils import get_resourcerouter
from apps.fhir.bluebutton.models import Crosswalk
from apps.capabilities.models import ProtectedCapability
from apps.dot_ext.models import Application
class BaseApiTest(TestCase):
"""
This class contains some helper methods useful to test API endpoints
protected with oauth2 using DOT.
"""
def _create_user(self, username, password, **extra_fields):
"""
Helper method that creates a user instance
with `username` and `password` set.
"""
user = User.objects.create_user(username, password=password, **extra_fields)
return user
def _create_group(self, name):
"""
Helper method that creates a group instance
with `name`.
"""
group, _ = Group.objects.get_or_create(name=name)
return group
def _create_application(self, name, client_type=None, grant_type=None,
capability=None, user=None, **kwargs):
"""
Helper method that creates an application instance
with `name`, `client_type` and `grant_type` and `capability`.
The default client_type is 'public'.
The default grant_type is 'password'.
"""
client_type = client_type or Application.CLIENT_PUBLIC
grant_type = grant_type or Application.GRANT_PASSWORD
# This is the user to whom the application is bound.
dev_user = user or self._create_user('dev', '123456')
application = Application.objects.create(
name=name, user=dev_user, client_type=client_type,
authorization_grant_type=grant_type, **kwargs)
# add capability
if capability:
application.scope.add(capability)
return application
def _create_capability(self, name, urls, group=None, default=True):
"""
Helper method that creates a ProtectedCapability instance
that controls the access for the set of `urls`.
"""
group = group or self._create_group('test')
capability = ProtectedCapability.objects.create(
default=default,
title=name,
slug=slugify(name),
protected_resources=json.dumps(urls),
group=group)
return capability
def _get_access_token(self, username, password, application=None, **extra_fields):
"""
Helper method that creates an access_token using the password grant.
"""
# Create an application that supports password grant.
application = application or self._create_application('test')
data = {
'grant_type': 'password',
'username': username,
'password': password,
'client_id': application.client_id,
}
data.update(extra_fields)
# Request the access token
response = self.client.post(reverse('oauth2_provider:token'), data=data)
self.assertEqual(response.status_code, 200)
# Unpack the response and return the token string
content = json.loads(response.content.decode("utf-8"))
return content['access_token']
def create_token(self, first_name, last_name):
passwd = '<PASSWORD>'
user = self._create_user(first_name,
passwd,
first_name=first_name,
last_name=last_name,
email="%<EMAIL>" % (first_name, last_name))
Crosswalk.objects.get_or_create(user=user,
fhir_id=settings.DEFAULT_SAMPLE_FHIR_ID,
fhir_source=get_resourcerouter())
# create a oauth2 application and add capabilities
application = self._create_application("%s_%s_test" % (first_name, last_name), user=user)
application.scope.add(self.read_capability, self.write_capability)
# get the first access token for the user 'john'
return self._get_access_token(first_name,
passwd,
application)
def create_token_no_fhir(self, first_name, last_name):
passwd = '<PASSWORD>'
user = self._create_user(first_name,
passwd,
first_name=first_name,
last_name=last_name,
email="<EMAIL>" % (first_name, last_name))
Crosswalk.objects.get_or_create(user=user,
user_id_hash="139e178537ed3bc486e6a7195a47a82a2cd6f46e911660fe9775f6e0dd3f1130",
fhir_source=get_resourcerouter())
# create a oauth2 application and add capabilities
application = self._create_application("%s_%s_test" % (first_name, last_name), user=user)
application.scope.add(self.read_capability, self.write_capability)
# get the first access token for the user 'john'
return self._get_access_token(first_name,
passwd,
application)
``` |
{
"source": "JohnFunkCode/flask-pi-iot",
"score": 3
} |
#### File: library/pi_iot_data/pi_iot_data.py
```python
class PiIOTData:
'''A class to represent the data we collect from the raspberry Pis'''
_pi_readings = list()
def get_all_readings(self):
return self._pi_readings
def add_reading(self, d):
#Check the input to make sure it makes sense
self._pi_readings.append(d)
def get_number_of_readings(self):
return len(self._pi_readings)
def __init__(self):
_pi_readings = list()
if __name__ == "__main__":
aPD=PiIOTData()
l = aPD.get_all_readings()
for reading in l:
serial = reading['serial-no']
timestamp = reading['timestamp']
x = reading['x']
y = reading['y']
z = reading['y']
print("Serial Number:{0}\tTimeStamp:{1}\tX{2}\tY{3}\tX{4}".format(serial, timestamp, x, y, z))
```
#### File: library/pi_iot_data/pi_iot_data_test.py
```python
import unittest
import pi_iot_data as pd
import time
import datetime
import random
class test_pi_iot_data(unittest.TestCase):
def setUp(self):
return
def test_add_reading(self):
'''simple test to make sure the serial number isn't empty'''
aPD = pd.PiIOTData()
#create a dummy reading and add it
ts = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
x=random.random()
y=random.random()
z=random.random()
d = {'serial-no': '12345', 'timestamp': ts, 'x': x, 'y': y, 'z': z}
aPD.add_reading(d)
self.assertTrue(aPD.get_number_of_readings()==1)
#create another dummy reading and add it
ts = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
x=random.random()
y=random.random()
z=random.random()
d = {'serial-no': '23456', 'timestamp': ts, 'x': x, 'y': y, 'z': z}
aPD.add_reading(d)
self.assertTrue(aPD.get_number_of_readings()==2)
def test_get_all_readings(self):
'''simple test to make sure the serial number isn't empty'''
aPD = pd.PiIOTData()
l=aPD.get_all_readings()
for reading in l:
serial=reading['serial-no']
timestamp=reading['timestamp']
x=reading['x']
y=reading['y']
z=reading['y']
print("Serial Number:{0}\tTimeStamp:{1}\tX:{2}\tY:{3}\tZ:{4}".format(serial,timestamp,x,y,z))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JohnFunkCode/FlaskUploadToS3",
"score": 2
} |
#### File: JohnFunkCode/FlaskUploadToS3/flask-s3-upload.py
```python
from flask import Flask, render_template, request, redirect
from werkzeug.utils import secure_filename
import os
import boto3
from cfenv import AppEnv
env=AppEnv()
print env.name
print env.port
s3bucket = env.get_service(name='s3bucket')
print s3bucket.credentials
app = Flask(__name__)
#app.config.from_object("config.config")
#from .helpers import *
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
@app.route("/")
def index():
# print "S3_BUCKET: " + app.config["S3_BUCKET"]
# print "S3_ACCESS_KEY: " + app.config["S3_ACCESS_KEY"]
# print "S3_SECRET_KEY: " + app.config["S3_SECRET_KEY"]
# print "S3_LOCATION: " + app.config["S3_LOCATION"]
# print s3
return render_template("index.html")
def upload_file_to_s3(file, bucket_name, acl="public-read"):
"""
Docs: http://boto3.readthedocs.io/en/latest/guide/s3.html
"""
try:
s3.upload_fileobj(
file,
bucket_name,
file.filename,
ExtraArgs={
"ACL": acl,
"ContentType": file.content_type
}
)
except Exception as e:
print("Something Bad Happened: ", e)
print file
print bucket_name
print acl
print file.content_type
return e
return "{}{}".format(S3_LOCATION, file.filename)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route("/", methods=["POST"])
def upload_file():
# A
if "user_file" not in request.files:
return "No user_file key in request.files"
# B
file = request.files["user_file"]
"""
These attributes are also available
file.filename # The actual name of the file
file.content_type
file.content_length
file.mimetype
"""
# C.
if file.filename == "":
return "Please select a file"
# D.
if file and allowed_file(file.filename):
file.filename = secure_filename(file.filename)
# output = upload_file_to_s3(file, app.config["S3_BUCKET"])
output = upload_file_to_s3(file, S3_BUCKET)
return '<html><a href="' + str(output) + '">'+ str(output) +'</a></html>'
else:
return redirect("/")
if __name__ == '__main__':
env = AppEnv()
print "env.name: "+ env.name
print "env.port: "+ str(env.port)
s3bucket = env.get_service(name='s3bucket')
creds = s3bucket.credentials
print "env.creds.bucket: "+creds['S3_BUCKET']
print "env.creds.ACCESS_KEY"+creds['S3_ACCESS_KEY']
print "env.creds.ACCESS_KEY"+creds['S3_SECRET_KEY']
S3_BUCKET=creds['S3_BUCKET']
S3_ACCESS_KEY=creds['S3_ACCESS_KEY']
S3_SECRET_KEY=creds['S3_SECRET_KEY']
S3_LOCATION = 'http://{}.s3.amazonaws.com/'.format(S3_BUCKET)
print "S3_BUCKET: " + S3_BUCKET
print "S3_ACCESS_KEY: " + S3_ACCESS_KEY
print "S3_SECRET_KEY: " + S3_SECRET_KEY
print "S3_LOCATION: " + S3_LOCATION
s3 = boto3.client(
's3',
aws_access_key_id=S3_ACCESS_KEY,
aws_secret_access_key=S3_SECRET_KEY,
)
port = int(os.getenv("PORT"))
app.run(host='0.0.0.0', port=port)
``` |
{
"source": "JohnFunkCode/getconfig",
"score": 2
} |
#### File: getconfig/importconfig/import_config.py
```python
from importconfig import config as cfg
class ImportConfig:
def read_imported_config_into_dictionary(self):
return cfg.configdict
``` |
{
"source": "John-F-Wagstaff/vv_hgvs",
"score": 2
} |
#### File: vv_hgvs/tests/test_hgvs_location.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import pytest
from vvhgvs.exceptions import HGVSError, HGVSUnsupportedOperationError
from vvhgvs.enums import Datum
import vvhgvs.location
import vvhgvs.parser
@pytest.mark.quick
@pytest.mark.models
class Test_SimplePosition(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.hp = vvhgvs.parser.Parser()
def test_success(self):
self.assertEqual(str(vvhgvs.location.SimplePosition(5)), "5")
self.assertEqual(str(vvhgvs.location.SimplePosition(5, uncertain=True)), "(5)")
self.assertEqual(str(vvhgvs.location.SimplePosition(None)), "?")
def test_failure(self):
with self.assertRaises(AssertionError):
self.assertEqual(vvhgvs.location.SimplePosition(-1), "SHOULD FAIL")
def test_simple_subtraction(self):
self.assertEqual(vvhgvs.location.SimplePosition(5) - vvhgvs.location.SimplePosition(3), 2)
def test_simple_comparision(self):
var = self.hp.parse_hgvs_variant("NC_000007.13:g.36561662_36561683del")
self.assertFalse(var.posedit.pos.start == var.posedit.pos.end)
self.assertTrue(var.posedit.pos.start < var.posedit.pos.end)
self.assertTrue(var.posedit.pos.start <= var.posedit.pos.end)
self.assertFalse(var.posedit.pos.start > var.posedit.pos.end)
self.assertFalse(var.posedit.pos.start >= var.posedit.pos.end)
var = self.hp.parse_hgvs_variant("NC_000007.13:g.36561662C>T")
self.assertTrue(var.posedit.pos.start == var.posedit.pos.end)
self.assertFalse(var.posedit.pos.start < var.posedit.pos.end)
self.assertTrue(var.posedit.pos.start <= var.posedit.pos.end)
self.assertFalse(var.posedit.pos.start > var.posedit.pos.end)
self.assertTrue(var.posedit.pos.start >= var.posedit.pos.end)
@pytest.mark.quick
class Test_BaseOffsetPosition(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.hp = vvhgvs.parser.Parser()
def test_success(self):
# r.5
cdsp = vvhgvs.location.BaseOffsetPosition(5)
self.assertEqual(cdsp.datum, Datum.SEQ_START)
self.assertEqual(cdsp.base, 5)
self.assertEqual(cdsp.offset, 0)
self.assertEqual(str(cdsp), "5")
self.assertFalse(cdsp.is_intronic)
#r.5+6
cdsp.offset = 6
self.assertEqual(str(cdsp), "5+6")
self.assertTrue(cdsp.is_intronic)
#r.5+?
cdsp.offset = None
self.assertEqual(str(cdsp), "5+?")
self.assertTrue(cdsp.is_intronic)
#r.(5+?)
cdsp.uncertain = True
self.assertEqual(str(cdsp), "(5+?)")
# c.*5
cdsp = vvhgvs.location.BaseOffsetPosition(5, datum=Datum.CDS_END)
self.assertEqual(cdsp.datum, Datum.CDS_END)
self.assertEqual(cdsp.base, 5)
self.assertEqual(cdsp.offset, 0)
self.assertEqual(str(cdsp), "*5")
cdsp.uncertain = True
self.assertEqual(str(cdsp), "(*5)")
cdsp.offset = 7
self.assertEqual(str(cdsp), "(*5+7)")
def test_baseoffset_subtraction(self):
v30 = vvhgvs.location.BaseOffsetPosition(3, 0)
v50 = vvhgvs.location.BaseOffsetPosition(5, 0)
v52 = vvhgvs.location.BaseOffsetPosition(5, 2)
v54 = vvhgvs.location.BaseOffsetPosition(5, 4)
self.assertEqual(v50 - v30, 2)
with self.assertRaises(HGVSError):
_ = v54 - v30
def test_baseoffset_comparision(self):
var = self.hp.parse_hgvs_variant("NM_000030.2:c.669_680del")
self.assertFalse(var.posedit.pos.start == var.posedit.pos.end)
self.assertTrue(var.posedit.pos.start < var.posedit.pos.end)
self.assertTrue(var.posedit.pos.start <= var.posedit.pos.end)
self.assertFalse(var.posedit.pos.start > var.posedit.pos.end)
self.assertFalse(var.posedit.pos.start >= var.posedit.pos.end)
var = self.hp.parse_hgvs_variant("NM_000030.2:c.679_680+2del")
self.assertFalse(var.posedit.pos.start == var.posedit.pos.end)
self.assertTrue(var.posedit.pos.start < var.posedit.pos.end)
self.assertTrue(var.posedit.pos.start <= var.posedit.pos.end)
self.assertFalse(var.posedit.pos.start > var.posedit.pos.end)
self.assertFalse(var.posedit.pos.start >= var.posedit.pos.end)
var = self.hp.parse_hgvs_variant("NM_000030.2:c.-6_680+2del")
self.assertFalse(var.posedit.pos.start == var.posedit.pos.end)
self.assertTrue(var.posedit.pos.start < var.posedit.pos.end)
self.assertTrue(var.posedit.pos.start <= var.posedit.pos.end)
self.assertFalse(var.posedit.pos.start > var.posedit.pos.end)
self.assertFalse(var.posedit.pos.start >= var.posedit.pos.end)
var = self.hp.parse_hgvs_variant("NM_000030.2:c.680+2_680+10del")
self.assertFalse(var.posedit.pos.start == var.posedit.pos.end)
self.assertTrue(var.posedit.pos.start < var.posedit.pos.end)
self.assertTrue(var.posedit.pos.start <= var.posedit.pos.end)
self.assertFalse(var.posedit.pos.start > var.posedit.pos.end)
self.assertFalse(var.posedit.pos.start >= var.posedit.pos.end)
var = self.hp.parse_hgvs_variant("NM_000030.2:c.680+2_*82del")
self.assertFalse(var.posedit.pos.start == var.posedit.pos.end)
self.assertTrue(var.posedit.pos.start < var.posedit.pos.end)
self.assertTrue(var.posedit.pos.start <= var.posedit.pos.end)
self.assertFalse(var.posedit.pos.start > var.posedit.pos.end)
self.assertFalse(var.posedit.pos.start >= var.posedit.pos.end)
var = self.hp.parse_hgvs_variant("NM_000030.2:c.-12_*82del")
self.assertFalse(var.posedit.pos.start == var.posedit.pos.end)
self.assertTrue(var.posedit.pos.start < var.posedit.pos.end)
self.assertTrue(var.posedit.pos.start <= var.posedit.pos.end)
self.assertFalse(var.posedit.pos.start > var.posedit.pos.end)
self.assertFalse(var.posedit.pos.start >= var.posedit.pos.end)
var = self.hp.parse_hgvs_variant("NM_000030.2:c.680+2_681del")
self.assertFalse(var.posedit.pos.start == var.posedit.pos.end)
self.assertTrue(var.posedit.pos.start < var.posedit.pos.end)
self.assertTrue(var.posedit.pos.start <= var.posedit.pos.end)
self.assertFalse(var.posedit.pos.start > var.posedit.pos.end)
self.assertFalse(var.posedit.pos.start >= var.posedit.pos.end)
var = self.hp.parse_hgvs_variant("NM_000030.2:c.680+2_681-32del")
with self.assertRaises(HGVSUnsupportedOperationError):
var.posedit.pos.start < var.posedit.pos.end
@pytest.mark.quick
class Test_AAPosition(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.hp = vvhgvs.parser.Parser()
def test_AAPosition(self):
ap = vvhgvs.location.AAPosition(15, "S")
self.assertEqual(ap.pos, 15)
self.assertEqual(str(ap), "Ser15")
def test_aaposition_subtraction(self):
l1 = vvhgvs.location.AAPosition(15, 'S')
l2 = vvhgvs.location.AAPosition(20, 'S')
self.assertEqual(l2 - l1, 5)
def test_aaposition_comparision(self):
var = self.hp.parse_hgvs_variant("NP_000042.3:p.His1082_Val1085delinsLeuHisGlnAla")
self.assertTrue(var.posedit.pos.start < var.posedit.pos.end)
self.assertFalse(var.posedit.pos.start > var.posedit.pos.end)
var = self.hp.parse_hgvs_variant("NP_000042.3:p.His1082ArgfsTer2")
self.assertFalse(var.posedit.pos.start < var.posedit.pos.end)
self.assertFalse(var.posedit.pos.start > var.posedit.pos.end)
@pytest.mark.quick
class Test_Interval(unittest.TestCase):
def test_Interval(self):
ival = vvhgvs.location.Interval(
vvhgvs.location.BaseOffsetPosition(base=12, offset=+34), vvhgvs.location.BaseOffsetPosition(
base=56, offset=-78))
self.assertEqual(ival.start.base, 12)
self.assertEqual(ival.start.offset, 34)
self.assertEqual(ival.end.base, 56)
self.assertEqual(ival.end.offset, -78)
self.assertEqual(str(ival), "12+34_56-78")
def test_length(self):
ival = vvhgvs.location.Interval(
vvhgvs.location.BaseOffsetPosition(base=12, offset=0), vvhgvs.location.BaseOffsetPosition(base=50, offset=0))
self.assertEqual(ival._length(), 39)
if __name__ == "__main__":
unittest.main()
# <LICENSE>
# Copyright 2018 HGVS Contributors (https://github.com/biocommons/hgvs)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# </LICENSE>
```
#### File: vvhgvs/utils/context.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import re
from bioutils.sequences import complement
from ..location import Interval, SimplePosition
from six.moves import range
def full_house(am, var, tx_ac=None):
if var.type == 'g':
var_g = var
if tx_ac is None:
rtx = am.relevant_transcripts(var)
if len(rtx) == 0:
raise RuntimeError("no relevant transcripts for {var.ac}".format(var=var))
if len(rtx) > 1:
raise RuntimeError("{n} relevant transcripts for {var.ac}; you need to pick one".format(
n=len(rtx), var=var))
tx_ac = rtx[0]
var_n = am.g_to_n(var_g, tx_ac)
var_c = am.n_to_c(var_n)
elif var.type == 'n':
var_n = var
var_g = am.n_to_g(var_n)
var_c = am.n_to_c(var_n)
elif var.type == 'c':
var_c = var
var_g = am.c_to_g(var_c)
var_n = am.c_to_n(var_c)
var_p = am.c_to_p(var_c)
return {'g': var_g, 'c': var_c, 'n': var_n, 'p': var_p}
# def variant_context(am, var, margin=20):
# span = _span(var, margin)
# span_g = _ival_to_span(fh['g'])
# span_g[0] -= margin
# span_g[1] += margin
# return '\n'.join([
# seq_line_fmt(var=var,
# span=span,
# content=am.hdp.get_seq(var.ac, *span),
# post=''), pointer_line(var, span)
# ])
def variant_context_w_alignment(am, var, margin=20, tx_ac=None):
"""This module is experimental. It requires the uta_align package from pypi."""
from uta_align.align.algorithms import align, cigar_alignment
fh = full_house(am, var, tx_ac=tx_ac)
tm = am._fetch_AlignmentMapper(fh['n'].ac, fh['g'].ac, am.alt_aln_method)
strand = tm.strand
span_g = _ival_to_span(fh['g'].posedit.pos)
span_g = (span_g[0] - margin, span_g[1] + margin)
ival_g = Interval(SimplePosition(span_g[0]), SimplePosition(span_g[1]))
ival_n = tm.g_to_n(ival_g)
assert ival_n.start.offset == 0 and ival_n.end.offset == 0, "limited to coding variants"
span_n = _ival_to_span(ival_n)
ival_c = tm.g_to_c(ival_g)
span_c = _ival_to_span(ival_c)
seq_gt = am.hdp.get_seq(fh['g'].ac, span_g[0] - 1, span_g[1])
seq_gb = complement(seq_gt)
seq_n = am.hdp.get_seq(fh['n'].ac, span_n[0] - 1, span_n[1])
if strand == 1:
a = align(bytes(seq_gt), bytes(seq_n), b'global', extended_cigar=True)
else:
seq_n = ''.join(reversed(seq_n))
a = align(bytes(seq_gb), bytes(seq_n), b'global', extended_cigar=True)
aseq_gt, _ = cigar_alignment(seq_gt, a.query, a.cigar, hide_match=False)
aseq_gb, aseq_n = cigar_alignment(seq_gb, a.query, a.cigar, hide_match=False)
aln_str = _reformat_aln_str(cigar_alignment(a.ref, a.query, a.cigar, hide_match=True)[1])
s_dir = '>' if strand == 1 else '<'
lines = [
[
1,
0,
seq_line_fmt(var=fh['c'], span=span_c if strand == 1 else list(reversed(span_c)), content='', dir=s_dir),
],
[
2,
0,
seq_line_fmt(
var=fh['n'], span=span_n if strand == 1 else list(reversed(span_n)), content=aseq_n, dir=s_dir),
],
[
3,
0,
_line_fmt.format(pre='', content=aln_str, post=a.cigar.to_string(), comment=''),
],
[
4,
1,
seq_line_fmt(var=fh['g'], span=span_g, content=aseq_gt, dir='>'),
],
[
4,
2,
seq_line_fmt(var=fh['g'], span=span_g, content=aseq_gb, dir='<'),
],
[
5,
0,
pointer_line(var=fh['g'], span=span_g),
],
]
if strand == -1:
lines.sort(key=lambda e: (-e[0], e[1]))
return '\n'.join(r[2] for r in lines)
def _ival_to_span(ival):
return (ival.start.base, ival.end.base)
def _reformat_aln_str(aln_str):
return re.sub(r'[ACGT]', ' ', aln_str.replace('.', '|'))
# pre=[ac c s] d content d post=[end] comment
_line_fmt = "{pre:>30s} {content:45s} {post} {comment}"
_pre_fmt = "{ac:12s} {type:1s} {s:10d} {dir:1s}"
_post_fmt = "{dir:1s} {e:8d}"
def seq_line_fmt(var, span, content, dir=''):
return _line_fmt.format(
pre=_pre_fmt.format(ac=var.ac, type=var.type, s=span[0], dir=dir),
content=content,
post=_post_fmt.format(dir=dir, e=span[1]),
comment=str(var))
def pointer_line(var, span):
s0 = span[0]
o = var.posedit.pos.start.base - s0
l = var.posedit.pos.end.base - var.posedit.pos.start.base + 1
if var.posedit.edit.type == 'ins':
p = ' ' * o + '><'
else:
p = ' ' * o + '*' * l
return _line_fmt.format(pre='', content=p, post='', comment=str(var))
def format_sequence(seq, start=None, end=None, group_size=3):
"""print seq from [start, end) in groups of size
3 6 9 12 15
| | | | |
2001 AAA BBB CCC DDD EEE
"""
width = 100
loc_width = 9
sep = " "
body_sep = " : "
start = start or 0
end = end or len(seq)
bw = width - loc_width - len(body_sep)
assert group_size <= bw, "group size must be less than available line width"
gpl = int((bw + len(sep)) / (group_size + len(sep))) # groups per line
gpl = int(gpl / 5) * 5 if gpl > 20 else gpl
rpl = group_size * gpl
line_fmt = "{{l:>{lw}s}}{body_sep}{{body}}".format(lw=loc_width, body_sep=body_sep)
ge_fmt = "{{ge:>{gs}}}".format(gs=group_size)
blocks = []
for ls in range(start, end, rpl):
le = ls + rpl
groups = [ge_fmt.format(ge=str(gs + group_size)[-group_size + 1:]) for gs in range(ls, le, group_size)]
blocks += [line_fmt.format(l="", body=sep.join(groups)) + "\n"]
groups = [seq[gs:min(gs + group_size, end)] for gs in range(ls, le, group_size)]
blocks += [line_fmt.format(l=str(ls + 1), body=sep.join(groups)) + "\n"]
blocks += ["\n"]
return blocks
```
#### File: vvhgvs/utils/reftranscriptdata.py
```python
from Bio.Seq import Seq
from vvhgvs.exceptions import HGVSDataNotAvailableError
class RefTranscriptData(object):
def __init__(self, hdp, tx_ac, pro_ac):
"""helper for generating RefTranscriptData from for c_to_p"""
tx_info = hdp.get_tx_identity_info(tx_ac)
tx_seq = hdp.get_seq(tx_ac)
if tx_info is None or tx_seq is None:
raise HGVSDataNotAvailableError("Missing transcript data for accession: {}".format(tx_ac))
# use 1-based vvhgvs coords
cds_start = tx_info["cds_start_i"] + 1
cds_stop = tx_info["cds_end_i"]
# coding sequences that are not divisable by 3 are not yet supported
tx_seq_to_translate = tx_seq[cds_start - 1:cds_stop]
if len(tx_seq_to_translate) % 3 != 0:
raise NotImplementedError(
"Transcript {} is not supported because its sequence length of {} is not divisible by 3.".format(
tx_ac, len(tx_seq_to_translate)))
tx_seq_cds = Seq(tx_seq_to_translate)
protein_seq = str(tx_seq_cds.translate())
if pro_ac is None:
# get_acs... will always return at least the MD5_ accession
pro_ac = (hdp.get_pro_ac_for_tx_ac(tx_ac) or hdp.get_acs_for_protein_seq(protein_seq)[0])
self.transcript_sequence = tx_seq
self.aa_sequence = protein_seq
self.cds_start = cds_start
self.cds_stop = cds_stop
self.protein_accession = pro_ac
``` |
{
"source": "johng94/imagedemo2",
"score": 3
} |
#### File: imagedemo2/tf_dataset/pair_generator.py
```python
import os
import glob
import random
class PairGenerator(object):
person1 = 'person1'
person2 = 'person2'
label = 'same_person'
def __init__(self, lfw_path='./tf_dataset/resources' + os.path.sep + 'lfw'):
self.all_people = self.generate_all_people_dict(lfw_path)
def generate_all_people_dict(self, lfw_path):
# generates a dictionary between a person and all the photos of that person
all_people = {}
for person_folder in os.listdir(lfw_path):
person_photos = glob.glob(lfw_path + os.path.sep + person_folder + os.path.sep + '*.jpg')
all_people[person_folder] = person_photos
return all_people
def get_next_pair(self):
all_people_names = list(self.all_people.keys())
while True:
# draw a person at random
person1 = random.choice(all_people_names)
# flip a coin to decide whether we fetch a photo of the same person vs different person
same_person = random.random() > 0.5
if same_person:
person2 = person1
else:
# repeatedly pick random names until we find a different name
person2 = person1
while person2 == person1:
person2 = random.choice(all_people_names)
person1_photo = random.choice(self.all_people[person1])
person2_photo = random.choice(self.all_people[person2])
yield ({self.person1: person1_photo,
self.person2: person2_photo,
self.label: same_person})
``` |
{
"source": "johnGachihi/tf-video-generators",
"score": 3
} |
#### File: tf-video-generators/tests/testpandasgenerator.py
```python
import math
import shutil
import unittest
from pathlib import Path
from unittest.mock import Mock
import albumentations as A
import numpy as np
import numpy.testing as npt
import pandas as pd
from generatorutils import GeneratorUtils
from pandasgenerator import PandasGenerator
class TestPandasGenerator(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.data_path = Path('fake_dataset')
cls.source = pd.DataFrame([[1, 'a'], [2, 'b'], [3, 'c'], [4, 'c']])
cls.nb_samples = len(cls.source.index)
generate_fake_dataset(cls.data_path, cls.source)
@classmethod
def tearDownClass(cls) -> None:
shutil.rmtree('fake_dataset')
def test_when_source_not_dataframe(self):
with self.assertRaises(TypeError):
PandasGenerator('not a DataFrame', Path('images'))
def test_when_data_path_non_existent(self):
with self.assertRaises(FileNotFoundError):
PandasGenerator(self.source, Path('non existent'))
def test__pandas_generator__labels_for_categorical_labels(self):
gen = PandasGenerator(self.source,
self.data_path)
classes = sorted(self.source.iloc[:, 1].unique())
expected = GeneratorUtils.generate_class_to_label_mapper(classes, 'categorical')
npt.assert_equal(expected, gen.class_label_map)
def test__pandas_generator__labels_for_binary_labels(self):
source = pd.DataFrame([[1, 'a'], [2, 'b']])
gen = PandasGenerator(source,
self.data_path,
labelling_strategy='binary')
classes = sorted(source.iloc[:, 1].unique())
expected = GeneratorUtils.generate_class_to_label_mapper(classes, 'binary')
npt.assert_equal(expected, gen.class_label_map)
def test__pandas_generator__yields_sample_images_correctly(self):
transformations = [A.HorizontalFlip(p=1)]
nb_frames = 5
batch_size = self.nb_samples
frame_size = (10, 10)
gen = PandasGenerator(self.source,
self.data_path,
batch_size=batch_size,
nb_frames=nb_frames,
transformations=transformations,
frame_size=frame_size)
expected = []
for i in range(1, batch_size + 1):
imgs = GeneratorUtils.get_sample_images(Path(f'fake_dataset/{i}'))
imgs = GeneratorUtils.pick_at_intervals(imgs, nb_frames, math.floor)
imgs = [GeneratorUtils.process_img(img_path, frame_size)
for img_path in imgs]
imgs = GeneratorUtils.augment(imgs, transformations)
expected.append(imgs)
expected = np.stack(expected)
sample, _ = gen.__getitem__(0)
npt.assert_equal(actual=sample, desired=expected)
def test__pandas_generator__batch_size_yielded_as_specified(self):
batch_size = self.nb_samples
gen = PandasGenerator(self.source,
self.data_path,
nb_frames=2,
batch_size=batch_size)
samples, labels = gen.__getitem__(0)
self.assertEqual(batch_size, samples.shape[0])
self.assertEqual(batch_size, labels.shape[0])
def test__pandas_generator__nb_frames_yielded_as_specified(self):
nb_frames = 2
gen = PandasGenerator(self.source,
self.data_path,
nb_frames=nb_frames,
batch_size=1)
samples, _ = gen.__getitem__(0)
self.assertEqual(nb_frames, samples.shape[1])
def test__pandas_generator__number_of_batches_yielded(self):
batch_size = 2
gen = PandasGenerator(self.source,
self.data_path,
nb_frames=5,
batch_size=batch_size)
batches = []
for samples, _ in gen:
batches.append(samples)
self.assertEqual(math.ceil(self.nb_samples / batch_size), len(batches))
def test__pandas_generator__labels(self):
classes = self.source.loc[:, 1].unique()
class_label_map = GeneratorUtils.generate_class_to_label_mapper(
classes, 'categorical')
expected = list(map(lambda c: class_label_map[c], self.source.iloc[:, 1].values))
gen = PandasGenerator(self.source,
self.data_path,
nb_frames=5,
batch_size=self.nb_samples)
_, labels = gen.__getitem__(0)
npt.assert_equal(actual=labels, desired=expected)
def test__pandas_generator__ignores_samples_that_have_less_frames_than_nb_frames(self):
gen = PandasGenerator(self.source,
self.data_path,
nb_frames=6,
batch_size=1)
batches = []
for samples, _ in gen:
batches.append(samples)
self.assertEqual(0, len(batches))
def test__pandas_generator__prints_sample_stats(self):
mock_printer = Mock()
PandasGenerator(self.source,
self.data_path,
nb_frames=5,
batch_size=1,
printer=mock_printer)
mock_printer.assert_any_call(f'Sample size: {self.nb_samples}')
for _class, count in self.source.iloc[:, 1].value_counts().items():
mock_printer.assert_any_call(f'Class {_class}: {count}')
def test__pandas_generator__returns_data_of_specified_type(self):
gen = PandasGenerator(self.source, self.data_path, nb_frames=3, dtype=np.uint32)
self.assertEqual('uint32', gen.__getitem__(0)[0].dtype)
"""
Creates fake dataset folder
Structure:
- fake_dataset
- 1
- 1.png
- 2.png
- 3.png
- 4.png
- 5.png
- 2
- 1.png
- 2.png
- 3.png
- 4.png
- 5.png
- 3
- ...
"""
def generate_fake_dataset(path: Path, labels: pd.DataFrame):
if not path.exists():
path.mkdir()
for sample_name, _ in labels.values.tolist():
sample_dir = path / str(sample_name)
if not sample_dir.exists():
sample_dir.mkdir()
for img in Path('images').iterdir():
shutil.copy(img, sample_dir)
```
#### File: tf-video-generators/tests/testpandasgeneratorutils.py
```python
import os
import shutil
import unittest
from pathlib import Path
from random import randrange
import numpy.testing as npt
import albumentations as A
from generatorutils import GeneratorUtils
from pandasgeneratorutils import process_sample
class TestPandasGeneratorUtils(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.fake_sample = Path('fake-sample')
get_fake_sample(cls.fake_sample, 4)
@classmethod
def tearDownClass(cls) -> None:
shutil.rmtree('fake-sample')
def test__process_sample__without_transformations(self):
nb_frames = 3
frame_size = (20, 20)
img_paths = GeneratorUtils.pick_at_intervals(
GeneratorUtils.get_sample_images(self.fake_sample),
nb_frames)
expected = [GeneratorUtils.process_img(img_path, frame_size)
for img_path in img_paths]
actual = process_sample(self.fake_sample, nb_frames, frame_size)
npt.assert_equal(desired=expected, actual=actual)
def test__process_sample__with_transformations(self):
nb_frames = 3
frame_size = (20, 20)
transformations = [A.HorizontalFlip(p=1)]
img_paths = GeneratorUtils.pick_at_intervals(
GeneratorUtils.get_sample_images(self.fake_sample),
nb_frames)
img_arrays = [GeneratorUtils.process_img(img_path, frame_size)
for img_path in img_paths]
expected = GeneratorUtils.augment(img_arrays, transformations)
actual = process_sample(self.fake_sample, nb_frames, frame_size, transformations=transformations)
npt.assert_equal(desired=expected, actual=actual)
"""
Creates fake sample folder
Structure:
- <fake_sample_name>
- 1.png
- 2.png
- 3.png
- 4.png
- 5.png
- ...
"""
def get_fake_sample(path: Path, nb_frames: int):
if not path.exists():
path.mkdir()
imgs_path = Path('images')
imgs = os.listdir(imgs_path)
nb_images = len(imgs)
for frame in range(nb_frames):
img_path = imgs_path / imgs[randrange(0, nb_images)]
shutil.copy(img_path, path / f'{frame}.png')
```
#### File: tf-video-generators/tests/testpandaspredictiongenerator.py
```python
import shutil
import unittest
from pathlib import Path
import pandas as pd
import numpy as np
import numpy.testing as npt
from pandaspredictiongenerator import PandasPredictionGenerator
import pandasgeneratorutils
class TestPandasPredictionGenerator(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.data_path = Path('fake_dataset')
cls.source = pd.DataFrame([1, 2, 3, 4])
cls.nb_samples = len(cls.source.index)
generate_fake_dataset(cls.data_path, cls.source.values)
@classmethod
def tearDownClass(cls) -> None:
shutil.rmtree(cls.data_path)
def test__pandas_prediction_generator__yields_correct_output(self):
batch_size = self.nb_samples
nb_frames = 3
frame_size = (50, 50)
gen = PandasPredictionGenerator(
self.source,
self.data_path,
batch_size=batch_size,
nb_frames=3,
frame_size=frame_size)
expected = []
for i in range(1, batch_size+1):
sample = pandasgeneratorutils.process_sample(
self.data_path / f'{i}',
nb_frames=nb_frames,
frame_size=frame_size)
expected.append(sample)
expected = np.stack(expected)
actual = gen.__getitem__(0)
npt.assert_equal(actual, expected)
"""
Creates fake dataset folder
Structure:
- fake_dataset
- 1
- 1.png
- 2.png
- 3.png
- 4.png
- 5.png
- 2
- 1.png
- 2.png
- 3.png
- 4.png
- 5.png
- 3
- ...
"""
def generate_fake_dataset(path: Path, samples):
if not path.exists():
path.mkdir()
for sample_name in samples:
sample_dir = path / str(sample_name.item())
if not sample_dir.exists():
sample_dir.mkdir()
for img in Path('images').iterdir():
shutil.copy(img, sample_dir)
``` |
{
"source": "JohnGale87/python-ndb",
"score": 2
} |
#### File: tests/unit/test__transaction.py
```python
import itertools
try:
from unittest import mock
except ImportError: # pragma: NO PY3 COVER
import mock
import pytest
from google.api_core import exceptions as core_exceptions
from google.cloud.ndb import context as context_module
from google.cloud.ndb import exceptions
from google.cloud.ndb import tasklets
from google.cloud.ndb import _transaction
class Test_in_transaction:
@staticmethod
@pytest.mark.usefixtures("in_context")
def test_false():
assert _transaction.in_transaction() is False
@staticmethod
def test_true(in_context):
with in_context.new(transaction=b"tx123").use():
assert _transaction.in_transaction() is True
class Test_transaction:
@staticmethod
@pytest.mark.usefixtures("in_context")
def test_propagation():
with pytest.raises(NotImplementedError):
_transaction.transaction(None, propagation=1)
@staticmethod
def test_already_in_transaction(in_context):
with in_context.new(transaction=b"tx123").use():
with pytest.raises(NotImplementedError):
_transaction.transaction(None)
@staticmethod
def test_transaction_inherits_and_merges_cache(in_context):
original_cache = in_context.cache
in_context.cache["test"] = "original value"
with in_context.new(transaction=b"tx123").use() as new_context:
assert new_context.cache is not original_cache
assert new_context.cache["test"] == original_cache["test"]
new_context.cache["test"] = "new_value"
assert new_context.cache["test"] != original_cache["test"]
assert in_context.cache["test"] == "new_value"
@staticmethod
@mock.patch("google.cloud.ndb._transaction.transaction_async")
def test_success(transaction_async):
transaction_async.return_value.result.return_value = 42
assert _transaction.transaction("callback") == 42
transaction_async.assert_called_once_with(
"callback",
read_only=False,
retries=3,
join=False,
xg=True,
propagation=None,
)
class Test_transaction_async:
@staticmethod
@pytest.mark.usefixtures("in_context")
@mock.patch("google.cloud.ndb._datastore_api")
def test_success(_datastore_api):
context_module.get_context().cache["foo"] = "bar"
on_commit_callback = mock.Mock()
def callback():
context = context_module.get_context()
assert not context.cache
context.call_on_commit(on_commit_callback)
return "I tried, momma."
begin_future = tasklets.Future("begin transaction")
_datastore_api.begin_transaction.return_value = begin_future
commit_future = tasklets.Future("commit transaction")
_datastore_api.commit.return_value = commit_future
future = _transaction.transaction_async(callback)
_datastore_api.begin_transaction.assert_called_once_with(False, retries=0)
begin_future.set_result(b"tx123")
_datastore_api.commit.assert_called_once_with(b"tx123", retries=0)
commit_future.set_result(None)
assert future.result() == "I tried, momma."
on_commit_callback.assert_called_once_with()
@staticmethod
def test_success_join(in_context):
def callback():
return "I tried, momma."
with in_context.new(transaction=b"tx123").use():
future = _transaction.transaction_async(callback, join=True)
assert future.result() == "I tried, momma."
@staticmethod
def test_success_join_callback_returns_future(in_context):
future = tasklets.Future()
def callback():
return future
with in_context.new(transaction=b"tx123").use():
future = _transaction.transaction_async(callback, join=True)
future.set_result("I tried, momma.")
assert future.result() == "I tried, momma."
@staticmethod
@pytest.mark.usefixtures("in_context")
@mock.patch("google.cloud.ndb._datastore_api")
def test_success_no_retries(_datastore_api):
def callback():
return "I tried, momma."
begin_future = tasklets.Future("begin transaction")
_datastore_api.begin_transaction.return_value = begin_future
commit_future = tasklets.Future("commit transaction")
_datastore_api.commit.return_value = commit_future
future = _transaction.transaction_async(callback, retries=0)
_datastore_api.begin_transaction.assert_called_once_with(False, retries=0)
begin_future.set_result(b"tx123")
_datastore_api.commit.assert_called_once_with(b"tx123", retries=0)
commit_future.set_result(None)
assert future.result() == "I tried, momma."
@staticmethod
@pytest.mark.usefixtures("in_context")
@mock.patch("google.cloud.ndb._datastore_api")
def test_success_callback_is_tasklet(_datastore_api):
tasklet = tasklets.Future("tasklet")
def callback():
return tasklet
begin_future = tasklets.Future("begin transaction")
_datastore_api.begin_transaction.return_value = begin_future
commit_future = tasklets.Future("commit transaction")
_datastore_api.commit.return_value = commit_future
future = _transaction.transaction_async(callback)
_datastore_api.begin_transaction.assert_called_once_with(False, retries=0)
begin_future.set_result(b"tx123")
tasklet.set_result("I tried, momma.")
_datastore_api.commit.assert_called_once_with(b"tx123", retries=0)
commit_future.set_result(None)
assert future.result() == "I tried, momma."
@staticmethod
@pytest.mark.usefixtures("in_context")
@mock.patch("google.cloud.ndb._datastore_api")
def test_run_inner_loop(_datastore_api):
begin_futures = [
tasklets.Future("begin transaction 1"),
tasklets.Future("begin transaction 2"),
]
_datastore_api.begin_transaction.side_effect = begin_futures
commit_futures = [
tasklets.Future("commit transaction 1"),
tasklets.Future("commit transaction 2"),
]
_datastore_api.commit.side_effect = commit_futures
@tasklets.tasklet
def callback():
# Scheduling the sleep call here causes control to go back up to
# the main loop before this tasklet, running in the transaction
# loop, has finished, forcing a call to run_inner_loop via the idle
# handler.
yield tasklets.sleep(0)
@tasklets.tasklet
def some_tasklet():
# This tasklet runs in the main loop. In order to get results back
# from the transaction_async calls, the run_inner_loop idle handler
# will have to be run.
yield [
_transaction.transaction_async(callback),
_transaction.transaction_async(callback),
]
# Scheduling this sleep call forces the run_inner_loop idle handler
# to be run again so we can run it in the case when there is no
# more work to be done in the transaction. (Branch coverage.)
yield tasklets.sleep(0)
raise tasklets.Return("I tried, momma.")
future = some_tasklet()
begin_futures[0].set_result(b"tx123")
begin_futures[1].set_result(b"tx234")
commit_futures[0].set_result(None)
commit_futures[1].set_result(None)
assert future.result() == "I tried, momma."
@staticmethod
@pytest.mark.usefixtures("in_context")
@mock.patch("google.cloud.ndb._datastore_api")
def test_error(_datastore_api):
error = Exception("Spurious error.")
def callback():
raise error
begin_future = tasklets.Future("begin transaction")
_datastore_api.begin_transaction.return_value = begin_future
rollback_future = tasklets.Future("rollback transaction")
_datastore_api.rollback.return_value = rollback_future
future = _transaction.transaction_async(callback)
_datastore_api.begin_transaction.assert_called_once_with(False, retries=0)
begin_future.set_result(b"tx123")
_datastore_api.rollback.assert_called_once_with(b"tx123")
rollback_future.set_result(None)
assert future.exception() is error
@staticmethod
@pytest.mark.usefixtures("in_context")
@mock.patch("google.cloud.ndb.tasklets.sleep")
@mock.patch("google.cloud.ndb._retry.core_retry")
@mock.patch("google.cloud.ndb._datastore_api")
def test_transient_error(_datastore_api, core_retry, sleep):
core_retry.exponential_sleep_generator.return_value = itertools.count()
core_retry.if_transient_error.return_value = True
callback = mock.Mock(side_effect=[Exception("Spurious error."), "foo"])
begin_future = tasklets.Future("begin transaction")
begin_future.set_result(b"tx123")
_datastore_api.begin_transaction.return_value = begin_future
rollback_future = tasklets.Future("rollback transaction")
_datastore_api.rollback.return_value = rollback_future
rollback_future.set_result(None)
commit_future = tasklets.Future("commit transaction")
_datastore_api.commit.return_value = commit_future
commit_future.set_result(None)
sleep_future = tasklets.Future("sleep")
sleep_future.set_result(None)
sleep.return_value = sleep_future
future = _transaction.transaction_async(callback)
assert future.result() == "foo"
_datastore_api.begin_transaction.call_count == 2
_datastore_api.rollback.assert_called_once_with(b"tx123")
sleep.assert_called_once_with(0)
_datastore_api.commit.assert_called_once_with(b"tx123", retries=0)
@staticmethod
@pytest.mark.usefixtures("in_context")
@mock.patch("google.cloud.ndb.tasklets.sleep")
@mock.patch("google.cloud.ndb._retry.core_retry")
@mock.patch("google.cloud.ndb._datastore_api")
def test_too_many_transient_errors(_datastore_api, core_retry, sleep):
core_retry.exponential_sleep_generator.return_value = itertools.count()
core_retry.if_transient_error.return_value = True
error = Exception("Spurious error.")
def callback():
raise error
begin_future = tasklets.Future("begin transaction")
begin_future.set_result(b"tx123")
_datastore_api.begin_transaction.return_value = begin_future
rollback_future = tasklets.Future("rollback transaction")
_datastore_api.rollback.return_value = rollback_future
rollback_future.set_result(None)
commit_future = tasklets.Future("commit transaction")
_datastore_api.commit.return_value = commit_future
commit_future.set_result(None)
sleep_future = tasklets.Future("sleep")
sleep_future.set_result(None)
sleep.return_value = sleep_future
future = _transaction.transaction_async(callback)
with pytest.raises(core_exceptions.RetryError) as error_context:
future.check_success()
assert error_context.value.cause is error
assert _datastore_api.begin_transaction.call_count == 4
assert _datastore_api.rollback.call_count == 4
assert sleep.call_count == 4
_datastore_api.commit.assert_not_called()
@pytest.mark.usefixtures("in_context")
@mock.patch("google.cloud.ndb._datastore_api")
def test_transactional(_datastore_api):
@_transaction.transactional()
def simple_function(a, b):
return a + b
begin_future = tasklets.Future("begin transaction")
_datastore_api.begin_transaction.return_value = begin_future
commit_future = tasklets.Future("commit transaction")
_datastore_api.commit.return_value = commit_future
begin_future.set_result(b"tx123")
commit_future.set_result(None)
res = simple_function(100, 42)
assert res == 142
@pytest.mark.usefixtures("in_context")
@mock.patch("google.cloud.ndb._datastore_api")
def test_transactional_async(_datastore_api):
@_transaction.transactional_async()
def simple_function(a, b):
return a + b
begin_future = tasklets.Future("begin transaction")
_datastore_api.begin_transaction.return_value = begin_future
commit_future = tasklets.Future("commit transaction")
_datastore_api.commit.return_value = commit_future
begin_future.set_result(b"tx123")
commit_future.set_result(None)
res = simple_function(100, 42)
assert res.result() == 142
@pytest.mark.usefixtures("in_context")
@mock.patch("google.cloud.ndb._datastore_api")
def test_transactional_tasklet(_datastore_api):
@_transaction.transactional_tasklet()
def generator_function(dependency):
value = yield dependency
raise tasklets.Return(value + 42)
begin_future = tasklets.Future("begin transaction")
_datastore_api.begin_transaction.return_value = begin_future
commit_future = tasklets.Future("commit transaction")
_datastore_api.commit.return_value = commit_future
begin_future.set_result(b"tx123")
commit_future.set_result(None)
dependency = tasklets.Future()
dependency.set_result(100)
res = generator_function(dependency)
assert res.result() == 142
@pytest.mark.usefixtures("in_context")
def test_non_transactional_out_of_transaction():
@_transaction.non_transactional()
def simple_function(a, b):
return a + b
res = simple_function(100, 42)
assert res == 142
@pytest.mark.usefixtures("in_context")
def test_non_transactional_in_transaction(in_context):
with in_context.new(transaction=b"tx123").use():
def simple_function(a, b):
return a + b
wrapped_function = _transaction.non_transactional()(simple_function)
res = wrapped_function(100, 42)
assert res == 142
with pytest.raises(exceptions.BadRequestError):
wrapped_function = _transaction.non_transactional(allow_existing=False)(
simple_function
)
wrapped_function(100, 42)
``` |
{
"source": "JohnGarbutt/ironic",
"score": 2
} |
#### File: unit/drivers/third_party_driver_mock_specs.py
```python
DRACCLIENT_SPEC = (
'client',
'constants',
'exceptions'
)
DRACCLIENT_CLIENT_MOD_SPEC = (
'DRACClient',
)
DRACCLIENT_CONSTANTS_MOD_SPEC = (
'POWER_OFF',
'POWER_ON',
'REBOOT'
)
# ironic_inspector
IRONIC_INSPECTOR_CLIENT_SPEC = (
'ClientV1',
)
class InspectorClientV1Specs(object):
def __init__(self, session, inspector_url, api_version):
pass
def introspect(self, uuid):
pass
def get_status(self, uuid):
pass
# proliantutils
PROLIANTUTILS_SPEC = (
'exception',
'ilo',
'utils',
)
# pywsman
PYWSMAN_SPEC = (
'Client',
'ClientOptions',
'EndPointReference',
'FLAG_ENUMERATION_OPTIMIZATION',
'Filter',
'XmlDoc',
'wsman_transport_set_verify_host',
'wsman_transport_set_verify_peer',
)
# pywsnmp
PYWSNMP_SPEC = (
'entity',
'error',
'proto',
)
# scciclient
SCCICLIENT_SPEC = (
'irmc',
)
SCCICLIENT_IRMC_SCCI_SPEC = (
'POWER_OFF',
'POWER_ON',
'POWER_RESET',
'POWER_SOFT_CYCLE',
'POWER_SOFT_OFF',
'MOUNT_CD',
'POWER_RAISE_NMI',
'UNMOUNT_CD',
'MOUNT_FD',
'UNMOUNT_FD',
'SCCIClientError',
'SCCIInvalidInputError',
'get_share_type',
'get_client',
'get_report',
'get_sensor_data',
'get_virtual_cd_set_params_cmd',
'get_virtual_fd_set_params_cmd',
'get_essential_properties',
)
ONEVIEWCLIENT_SPEC = (
'client',
'states',
'exceptions',
'models',
'utils',
)
ONEVIEWCLIENT_CLIENT_CLS_SPEC = (
)
ONEVIEWCLIENT_STATES_SPEC = (
'ONEVIEW_POWER_OFF',
'ONEVIEW_POWERING_OFF',
'ONEVIEW_POWER_ON',
'ONEVIEW_POWERING_ON',
'ONEVIEW_RESETTING',
'ONEVIEW_ERROR',
)
SUSHY_CONSTANTS_SPEC = (
'BOOT_SOURCE_TARGET_PXE',
'BOOT_SOURCE_TARGET_HDD',
'BOOT_SOURCE_TARGET_CD',
'BOOT_SOURCE_TARGET_BIOS_SETUP',
'SYSTEM_POWER_STATE_ON',
'SYSTEM_POWER_STATE_POWERING_ON',
'SYSTEM_POWER_STATE_OFF',
'SYSTEM_POWER_STATE_POWERING_OFF',
'RESET_ON',
'RESET_FORCE_OFF',
'RESET_GRACEFUL_SHUTDOWN',
'RESET_GRACEFUL_RESTART',
'RESET_FORCE_RESTART',
'RESET_NMI',
'BOOT_SOURCE_ENABLED_CONTINUOUS',
'BOOT_SOURCE_ENABLED_ONCE',
)
``` |
{
"source": "JohnGarbutt/nova",
"score": 2
} |
#### File: openstack/compute/test_versions.py
```python
import copy
import uuid as stdlib_uuid
from oslo_serialization import jsonutils
import webob
from nova.api.openstack.compute import views
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import matchers
from nova import wsgi
NS = {
'atom': 'http://www.w3.org/2005/Atom',
'ns': 'http://docs.openstack.org/common/api/v1.0'
}
EXP_LINKS = {
'v2.0': {
'html': 'http://docs.openstack.org/',
},
'v2.1': {
'html': 'http://docs.openstack.org/'
},
}
EXP_VERSIONS = {
"v2.0": {
"id": "v2.0",
"status": "SUPPORTED",
"version": "",
"min_version": "",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "describedby",
"type": "text/html",
"href": EXP_LINKS['v2.0']['html'],
},
],
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json;version=2",
},
],
},
"v2.1": {
"id": "v2.1",
"status": "CURRENT",
"version": "2.14",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v2.1/",
},
{
"rel": "describedby",
"type": "text/html",
"href": EXP_LINKS['v2.1']['html'],
},
],
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json;version=2.1",
}
],
}
}
def _get_self_href(response):
"""Extract the URL to self from response data."""
data = jsonutils.loads(response.body)
for link in data['versions'][0]['links']:
if link['rel'] == 'self':
return link['href']
return ''
class VersionsTestV20(test.NoDBTestCase):
def setUp(self):
super(VersionsTestV20, self).setUp()
self.wsgi_app = fakes.wsgi_app()
def test_get_version_list(self):
req = webob.Request.blank('/')
req.accept = "application/json"
res = req.get_response(self.wsgi_app)
self.assertEqual(200, res.status_int)
self.assertEqual("application/json", res.content_type)
versions = jsonutils.loads(res.body)["versions"]
expected = [
{
"id": "v2.0",
"status": "SUPPORTED",
"version": "",
"min_version": "",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/",
}],
},
{
"id": "v2.1",
"status": "CURRENT",
"version": "2.14",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v2.1/",
}],
},
]
self.assertEqual(expected, versions)
def test_get_version_list_302(self):
req = webob.Request.blank('/v2')
req.accept = "application/json"
res = req.get_response(self.wsgi_app)
self.assertEqual(302, res.status_int)
redirect_req = webob.Request.blank('/v2/')
self.assertEqual(redirect_req.url, res.location)
def _test_get_version_2_detail(self, url, accept=None):
if accept is None:
accept = "application/json"
req = webob.Request.blank(url)
req.accept = accept
res = req.get_response(self.wsgi_app)
self.assertEqual(200, res.status_int)
self.assertEqual("application/json", res.content_type)
version = jsonutils.loads(res.body)
expected = {
"version": {
"id": "v2.0",
"status": "SUPPORTED",
"version": "",
"min_version": "",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/",
},
{
"rel": "describedby",
"type": "text/html",
"href": EXP_LINKS['v2.0']['html'],
},
],
"media-types": [
{
"base": "application/json",
"type": "application/"
"vnd.openstack.compute+json;version=2",
},
],
},
}
self.assertEqual(expected, version)
def test_get_version_2_detail(self):
self._test_get_version_2_detail('/v2/')
def test_get_version_2_detail_content_type(self):
accept = "application/json;version=2"
self._test_get_version_2_detail('/', accept=accept)
def test_get_version_2_versions_invalid(self):
req = webob.Request.blank('/v2/versions/1234')
req.accept = "application/json"
res = req.get_response(self.wsgi_app)
self.assertEqual(404, res.status_int)
def test_multi_choice_image(self):
req = webob.Request.blank('/images/1')
req.accept = "application/json"
res = req.get_response(self.wsgi_app)
self.assertEqual(300, res.status_int)
self.assertEqual("application/json", res.content_type)
expected = {
"choices": [
{
"id": "v2.0",
"status": "SUPPORTED",
"links": [
{
"href": "http://localhost/v2/images/1",
"rel": "self",
},
],
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json"
";version=2"
},
],
},
{
"id": "v2.1",
"status": "CURRENT",
"links": [
{
"href": "http://localhost/v2.1/images/1",
"rel": "self",
},
],
"media-types": [
{
"base": "application/json",
"type":
"application/vnd.openstack.compute+json;version=2.1",
}
],
},
], }
self.assertThat(jsonutils.loads(res.body),
matchers.DictMatches(expected))
def test_multi_choice_server_atom(self):
"""Make sure multi choice responses do not have content-type
application/atom+xml (should use default of json)
"""
req = webob.Request.blank('/servers')
req.accept = "application/atom+xml"
res = req.get_response(self.wsgi_app)
self.assertEqual(300, res.status_int)
self.assertEqual("application/json", res.content_type)
def test_multi_choice_server(self):
uuid = str(stdlib_uuid.uuid4())
req = webob.Request.blank('/servers/' + uuid)
req.accept = "application/json"
res = req.get_response(self.wsgi_app)
self.assertEqual(300, res.status_int)
self.assertEqual("application/json", res.content_type)
expected = {
"choices": [
{
"id": "v2.0",
"status": "SUPPORTED",
"links": [
{
"href": "http://localhost/v2/servers/" + uuid,
"rel": "self",
},
],
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json"
";version=2"
},
],
},
{
"id": "v2.1",
"status": "CURRENT",
"links": [
{
"href": "http://localhost/v2.1/servers/" + uuid,
"rel": "self",
},
],
"media-types": [
{
"base": "application/json",
"type":
"application/vnd.openstack.compute+json;version=2.1",
}
],
},
], }
self.assertThat(jsonutils.loads(res.body),
matchers.DictMatches(expected))
class VersionsViewBuilderTests(test.NoDBTestCase):
def test_view_builder(self):
base_url = "http://example.org/"
version_data = {
"v3.2.1": {
"id": "3.2.1",
"status": "CURRENT",
"version": "2.3",
"min_version": "2.1",
"updated": "2011-07-18T11:30:00Z",
}
}
expected = {
"versions": [
{
"id": "3.2.1",
"status": "CURRENT",
"version": "2.3",
"min_version": "2.1",
"updated": "2011-07-18T11:30:00Z",
"links": [
{
"rel": "self",
"href": "http://example.org/v2/",
},
],
}
]
}
builder = views.versions.ViewBuilder(base_url)
output = builder.build_versions(version_data)
self.assertEqual(expected, output)
def _test_view_builder_osapi_compute_link_prefix(self,
href=None):
base_url = "http://example.org/v2.1/"
if href is None:
href = base_url
version_data = {
"id": "v2.1",
"status": "CURRENT",
"version": "2.8",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z",
"links": [
{
"rel": "describedby",
"type": "text/html",
"href": EXP_LINKS['v2.1']['html'],
}
],
"media-types": [
{
"base": "application/json",
"type": ("application/vnd.openstack."
"compute+json;version=2.1")
}
],
}
expected_data = copy.deepcopy(version_data)
expected = {'version': expected_data}
expected['version']['links'].insert(0, {
"rel": "self",
"href": href,
})
builder = views.versions.ViewBuilder(base_url)
output = builder.build_version(version_data)
self.assertEqual(expected, output)
def test_view_builder_with_osapi_compute_link_prefix(self):
self.flags(osapi_compute_link_prefix='http://zoo.com:42')
href = "http://zoo.com:42/v2.1/"
self._test_view_builder_osapi_compute_link_prefix(href)
def test_view_builder_without_osapi_compute_link_prefix(self):
self._test_view_builder_osapi_compute_link_prefix()
def test_generate_href(self):
base_url = "http://example.org/app/"
expected = "http://example.org/app/v2/"
builder = views.versions.ViewBuilder(base_url)
actual = builder.generate_href('v2')
self.assertEqual(expected, actual)
def test_generate_href_v21(self):
base_url = "http://example.org/app/"
expected = "http://example.org/app/v2.1/"
builder = views.versions.ViewBuilder(base_url)
actual = builder.generate_href('v2.1')
self.assertEqual(expected, actual)
def test_generate_href_unknown(self):
base_url = "http://example.org/app/"
expected = "http://example.org/app/v2/"
builder = views.versions.ViewBuilder(base_url)
actual = builder.generate_href('foo')
self.assertEqual(expected, actual)
def test_generate_href_with_path(self):
path = "random/path"
base_url = "http://example.org/app/"
expected = "http://example.org/app/v2/%s" % path
builder = views.versions.ViewBuilder(base_url)
actual = builder.generate_href("v2", path)
self.assertEqual(actual, expected)
def test_generate_href_with_empty_path(self):
path = ""
base_url = "http://example.org/app/"
expected = "http://example.org/app/v2/"
builder = views.versions.ViewBuilder(base_url)
actual = builder.generate_href("v2", path)
self.assertEqual(actual, expected)
# NOTE(oomichi): Now version API of v2.0 covers "/"(root).
# So this class tests "/v2.1" only for v2.1 API.
class VersionsTestV21(test.NoDBTestCase):
exp_versions = copy.deepcopy(EXP_VERSIONS)
exp_versions['v2.0']['links'].insert(0,
{'href': 'http://localhost/v2.1/', 'rel': 'self'},
)
def test_get_version_list_302(self):
req = webob.Request.blank('/v2.1')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app_v21())
self.assertEqual(302, res.status_int)
redirect_req = webob.Request.blank('/v2.1/')
self.assertEqual(redirect_req.url, res.location)
def test_get_version_21_detail(self):
req = webob.Request.blank('/v2.1/')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app_v21())
self.assertEqual(200, res.status_int)
self.assertEqual("application/json", res.content_type)
version = jsonutils.loads(res.body)
expected = {"version": self.exp_versions['v2.1']}
self.assertEqual(expected, version)
def test_get_version_21_versions_v21_detail(self):
req = webob.Request.blank('/v2.1/fake/versions/v2.1')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app_v21())
self.assertEqual(200, res.status_int)
self.assertEqual("application/json", res.content_type)
version = jsonutils.loads(res.body)
expected = {"version": self.exp_versions['v2.1']}
self.assertEqual(expected, version)
def test_get_version_21_versions_v20_detail(self):
req = webob.Request.blank('/v2.1/fake/versions/v2.0')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app_v21())
self.assertEqual(200, res.status_int)
self.assertEqual("application/json", res.content_type)
version = jsonutils.loads(res.body)
expected = {"version": self.exp_versions['v2.0']}
self.assertEqual(expected, version)
def test_get_version_21_versions_invalid(self):
req = webob.Request.blank('/v2.1/versions/1234')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app_v21())
self.assertEqual(404, res.status_int)
def test_get_version_21_detail_content_type(self):
req = webob.Request.blank('/')
req.accept = "application/json;version=2.1"
res = req.get_response(fakes.wsgi_app_v21())
self.assertEqual(200, res.status_int)
self.assertEqual("application/json", res.content_type)
version = jsonutils.loads(res.body)
expected = {"version": self.exp_versions['v2.1']}
self.assertEqual(expected, version)
class VersionBehindSslTestCase(test.NoDBTestCase):
def setUp(self):
super(VersionBehindSslTestCase, self).setUp()
self.flags(secure_proxy_ssl_header='HTTP_X_FORWARDED_PROTO')
def test_versions_without_headers(self):
req = wsgi.Request.blank('/')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
href = _get_self_href(res)
self.assertTrue(href.startswith('http://'))
def test_versions_with_header(self):
req = wsgi.Request.blank('/')
req.accept = "application/json"
req.headers['X-Forwarded-Proto'] = 'https'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
href = _get_self_href(res)
self.assertTrue(href.startswith('https://'))
class VersionsTestV21WithV2CompatibleWrapper(VersionsTestV20):
def setUp(self):
super(VersionsTestV21WithV2CompatibleWrapper, self).setUp()
self.wsgi_app = fakes.wsgi_app_v21(v2_compatible=True)
```
#### File: scheduler/filters/test_numa_topology_filters.py
```python
import uuid
from nova import objects
from nova.scheduler.filters import numa_topology_filter
from nova import test
from nova.tests.unit.scheduler import fakes
class TestNUMATopologyFilter(test.NoDBTestCase):
def setUp(self):
super(TestNUMATopologyFilter, self).setUp()
self.filt_cls = numa_topology_filter.NUMATopologyFilter()
def test_numa_topology_filter_pass(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
])
spec_obj = objects.RequestSpec(numa_topology=instance_topology,
pci_requests=None,
instance_uuid=str(uuid.uuid4()))
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None,
'cpu_allocation_ratio': 16.0,
'ram_allocation_ratio': 1.5})
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_numa_topology_filter_numa_instance_no_numa_host_fail(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
])
spec_obj = objects.RequestSpec(numa_topology=instance_topology,
pci_requests=None,
instance_uuid=str(uuid.uuid4()))
host = fakes.FakeHostState('host1', 'node1', {'pci_stats': None})
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_numa_topology_filter_numa_host_no_numa_instance_pass(self):
spec_obj = objects.RequestSpec(numa_topology=None,
pci_requests=None,
instance_uuid=str(uuid.uuid4()))
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY})
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_numa_topology_filter_fail_fit(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([2]), memory=512),
objects.InstanceNUMACell(id=2, cpuset=set([3]), memory=512)
])
spec_obj = objects.RequestSpec(numa_topology=instance_topology,
pci_requests=None,
instance_uuid=str(uuid.uuid4()))
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None,
'cpu_allocation_ratio': 16.0,
'ram_allocation_ratio': 1.5})
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_numa_topology_filter_fail_memory(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]),
memory=1024),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
])
spec_obj = objects.RequestSpec(numa_topology=instance_topology,
pci_requests=None,
instance_uuid=str(uuid.uuid4()))
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None,
'cpu_allocation_ratio': 16.0,
'ram_allocation_ratio': 1})
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_numa_topology_filter_fail_cpu(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3, 4, 5]),
memory=512)])
spec_obj = objects.RequestSpec(numa_topology=instance_topology,
pci_requests=None,
instance_uuid=str(uuid.uuid4()))
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None,
'cpu_allocation_ratio': 1,
'ram_allocation_ratio': 1.5})
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_numa_topology_filter_pass_set_limit(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
])
spec_obj = objects.RequestSpec(numa_topology=instance_topology,
pci_requests=None,
instance_uuid=str(uuid.uuid4()))
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None,
'cpu_allocation_ratio': 21,
'ram_allocation_ratio': 1.3})
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
limits = host.limits['numa_topology']
self.assertEqual(limits.cpu_allocation_ratio, 21)
self.assertEqual(limits.ram_allocation_ratio, 1.3)
``` |
{
"source": "JohnGarbutt/os-capacity-probe",
"score": 2
} |
#### File: os_capacity/commands/commands.py
```python
import logging
from cliff.lister import Lister
from os_capacity.data import metrics
from os_capacity import utils
class FlavorList(Lister):
"""List all the flavors."""
log = logging.getLogger(__name__)
def take_action(self, parsed_args):
flavors = utils.get_flavors(self.app)
return (('UUID', 'Name', 'VCPUs', 'RAM MB', 'DISK GB', 'Extra Specs'),
flavors)
class ListResourcesAll(Lister):
"""List all resource providers, with their resources and servers."""
def take_action(self, parsed_args):
inventories = utils.get_providers_with_resources_and_servers(self.app)
return (('Provider Name', 'Resources', 'Severs'), inventories)
class ListResourcesGroups(Lister):
"""Lists counts of resource providers with similar inventories."""
def take_action(self, parsed_args):
groups = utils.group_providers_by_type_with_capacity(self.app)
groups = list(groups) # convert iterator
metrics_to_send = []
for group in groups:
flavors = group[4].replace(", ", "-")
if not flavors:
# skip empty hosts
continue
resources = group[0]
total = group[1]
used = group[2]
free = group[3]
metrics_to_send.append(metrics.Metric(
name="resources.total", value=total,
value_meta={"flavor_resources": resources},
dimensions={"flavor": flavors}))
metrics_to_send.append(metrics.Metric(
name="resources.used", value=used,
value_meta={"flavor_resources": resources},
dimensions={"flavor": flavors}))
metrics_to_send.append(metrics.Metric(
name="resources.free", value=free,
value_meta={"flavor_resources": resources},
dimensions={"flavor": flavors}))
metrics.send_metrics(self.app.monitoring_client, metrics_to_send)
return (
('Resource Class Groups', 'Total', 'Used', 'Free', 'Flavors'),
groups)
class ListUsagesAll(Lister):
"""List all current resource usages."""
def take_action(self, parsed_args):
allocations = utils.get_allocations_with_server_info(self.app,
get_names=True)
return (
('Provider Name', 'Server UUID', 'Resources',
'Flavor', 'Days', 'Project', 'User'), allocations)
class ListUsagesGroup(Lister):
"""Group usage by specified key (by user or project).
NOTE: The usage days is not complete as it only takes into
account any currently active servers. Any previously deleted
servers are not counted.
"""
def get_parser(self, prog_name):
parser = super(ListUsagesGroup, self).get_parser(prog_name)
parser.add_argument('group_by', nargs='?', default='user',
help='Group by user_id or project_id or all',
choices=['user', 'project', 'all'])
return parser
def take_action(self, parsed_args):
usages = utils.group_usage(self.app, parsed_args.group_by)
sort_key_title = parsed_args.group_by.title()
return ((sort_key_title, 'Current Usage', 'Usage Days'), usages)
```
#### File: os_capacity/data/flavors.py
```python
import collections
Flavor = collections.namedtuple(
"Flavor", ("id", "name", "vcpus", "ram_mb", "disk_gb", "extra_specs"))
def get_all(compute_client, include_extra_specs=True):
response = compute_client.get('/flavors/detail').json()
raw_flavors = response['flavors']
extra_specs = {}
if include_extra_specs:
for flavor in raw_flavors:
url = '/flavors/%s/os-extra_specs' % flavor['id']
response = compute_client.get(url).json()
extra_specs[flavor['id']] = response['extra_specs']
return [Flavor(f['id'], f['name'], f['vcpus'], f['ram'],
(f['disk'] + f['OS-FLV-EXT-DATA:ephemeral']),
extra_specs.get(f['id']))
for f in raw_flavors]
```
#### File: os-capacity-probe/os_capacity/shell.py
```python
import sys
from cliff.app import App
from cliff.commandmanager import CommandManager
import os_client_config
def get_cloud_config():
# TODO(johngarbutt) consider passing in argument parser
return os_client_config.get_config()
def get_client(cloud_config, service_type):
return cloud_config.get_session_client(service_type)
class CapacityApp(App):
def __init__(self):
super(CapacityApp, self).__init__(
description='OS-Capacity (StackHPC) Command Line Interface (CLI)',
version='0.1',
command_manager=CommandManager('os_capacity.commands'),
deferred_help=True,
)
def initialize_app(self, argv):
self.LOG.debug('initialize_app')
config = os_client_config.get_config()
self.compute_client = config.get_session_client("compute")
self.placement_client = config.get_session_client("placement")
self.monitoring_client = config.get_session_client("monitoring")
self.identity_client = config.get_session_client("identity")
self.LOG.debug('setup Keystone API REST clients')
def prepare_to_run_command(self, cmd):
self.LOG.debug('prepare_to_run_command %s', cmd.__class__.__name__)
def clean_up(self, cmd, result, err):
self.LOG.debug('clean_up %s', cmd.__class__.__name__)
if err:
self.LOG.debug('got an error: %s', err)
def main(argv=sys.argv[1:]):
myapp = CapacityApp()
return myapp.run(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
```
#### File: os-capacity-probe/os_capacity/utils.py
```python
import collections
from datetime import datetime
import os
from os_capacity.data import flavors
from os_capacity.data import metrics
from os_capacity.data import resource_provider
from os_capacity.data import server as server_data
from os_capacity.data import users
IGNORE_CUSTOM_RC = 'OS_CAPACITY_IGNORE_CUSTOM_RC' in os.environ
def get_flavors(app):
app.LOG.debug("Getting flavors")
return flavors.get_all(app.compute_client, include_extra_specs=True)
def get_providers_with_resources_and_servers(app):
resource_providers = resource_provider.get_all(app.placement_client)
for rp in resource_providers:
inventories = resource_provider.get_inventories(
app.placement_client, rp)
allocations = resource_provider.get_allocations(
app.placement_client, rp)
inventory_texts = ["%s:%s" % (i.resource_class, i.total)
for i in inventories]
inventory_texts.sort()
inventory_text = ", ".join(inventory_texts)
allocation_texts = [a.consumer_uuid for a in allocations]
allocation_texts.sort()
allocation_text = ", ".join(allocation_texts)
yield (rp.name, inventory_text, allocation_text)
def group_providers_by_type_with_capacity(app):
# TODO(johngarbutt) this flavor grouping is very ironic specific
all_flavors = flavors.get_all(app.compute_client)
grouped_flavors = collections.defaultdict(list)
for flavor in all_flavors:
custom_rc = None
if not IGNORE_CUSTOM_RC:
for extra_spec in flavor.extra_specs:
if extra_spec.startswith('resources:CUSTOM'):
custom_rc = extra_spec.replace('resources:', '')
break # Assuming a good Ironic setup here
key = (flavor.vcpus, flavor.ram_mb, flavor.disk_gb, custom_rc)
grouped_flavors[key] += [flavor.name]
all_resource_providers = resource_provider.get_all(app.placement_client)
inventory_counts = collections.defaultdict(int)
allocation_counts = collections.defaultdict(int)
for rp in all_resource_providers:
inventories = resource_provider.get_inventories(
app.placement_client, rp)
# TODO(johngarbutt) much refinement needed to be general...
vcpus = 0
ram_mb = 0
disk_gb = 0
custom_rc = None
for inventory in inventories:
if "VCPU" in inventory.resource_class:
vcpus += inventory.total
if "MEMORY" in inventory.resource_class:
ram_mb += inventory.total
if "DISK" in inventory.resource_class:
disk_gb += inventory.total
if inventory.resource_class.startswith('CUSTOM_'):
if not IGNORE_CUSTOM_RC:
custom_rc = inventory.resource_class # Ironic specific
key = (<KEY>, custom_rc)
inventory_counts[key] += 1
allocations = resource_provider.get_allocations(
app.placement_client, rp)
if allocations:
allocation_counts[key] += 1
for key, inventory_count in inventory_counts.items():
resources = "VCPU:%s, MEMORY_MB:%s, DISK_GB:%s, %s" % key
matching_flavors = grouped_flavors[key]
matching_flavors.sort()
matching_flavors = ", ".join(matching_flavors)
total = inventory_count
used = allocation_counts[key]
free = total - used
yield (resources, total, used, free, matching_flavors)
def _get_now():
# To make it easy to mock in tests
return datetime.now()
AllocationList = collections.namedtuple(
"AllocationList", ("resource_provider_name", "consumer_uuid",
"usage", "flavor_id", "days",
"project_id", "user_id"))
def get_allocations_with_server_info(app, flat_usage=True, get_names=False):
"""Get allocations, add in server and resource provider details."""
resource_providers = resource_provider.get_all(app.placement_client)
rp_dict = {rp.uuid: rp.name for rp in resource_providers}
all_allocations = resource_provider.get_all_allocations(
app.placement_client, resource_providers)
now = _get_now()
allocation_tuples = []
for allocation in all_allocations:
rp_name = rp_dict[allocation.resource_provider_uuid]
# TODO(johngarbutt) this is too presentation like for here
usage = allocation.resources
if flat_usage:
usage_amounts = ["%s:%s" % (rca.resource_class, rca.amount)
for rca in allocation.resources]
usage_amounts.sort()
usage = ", ".join(usage_amounts)
server = server_data.get(app.compute_client, allocation.consumer_uuid)
delta = now - server.created
days_running = delta.days + 1
allocation_tuples.append(AllocationList(
rp_name, allocation.consumer_uuid, usage,
server.flavor_id, days_running, server.project_id,
server.user_id))
allocation_tuples.sort(key=lambda x: (x.project_id, x.user_id,
x.days * -1, x.flavor_id))
if get_names:
all_users = users.get_all(app.identity_client)
all_projects = users.get_all_projects(app.identity_client)
all_flavors_list = flavors.get_all(app.compute_client,
include_extra_specs=False)
all_flavors = {flavor.id: flavor.name for flavor in all_flavors_list}
updated = []
for allocation in allocation_tuples:
user_id = all_users.get(allocation.user_id)
project_id = all_projects.get(allocation.project_id)
flavor_id = all_flavors.get(allocation.flavor_id)
updated.append(AllocationList(
allocation.resource_provider_name,
allocation.consumer_uuid,
allocation.usage,
flavor_id,
allocation.days,
project_id,
user_id))
allocation_tuples = updated
return allocation_tuples
UsageSummary = collections.namedtuple(
"UsageSummary", ("resource_provider_name", "consumer_uuid",
"usage", "flavor_id", "days",
"project_id", "user_id"))
def group_usage(app, group_by="user"):
all_allocations = get_allocations_with_server_info(app, flat_usage=False)
def get_key(allocation):
if group_by == "user":
return allocation.user_id
if group_by == "project":
return allocation.project_id
return "(All)"
grouped_allocations = collections.defaultdict(list)
for allocation in all_allocations:
grouped_allocations[get_key(allocation)].append(allocation)
all_users = users.get_all(app.identity_client)
all_projects = users.get_all_projects(app.identity_client)
metrics_to_send = []
summary_tuples = []
for key, group in grouped_allocations.items():
grouped_usage = collections.defaultdict(int)
grouped_usage_days = collections.defaultdict(int)
for allocation in group:
for rca in allocation.usage:
grouped_usage[rca.resource_class] += rca.amount
grouped_usage_days[rca.resource_class] += (
rca.amount * allocation.days)
grouped_usage["Count"] += 1
grouped_usage_days["Count"] += allocation.days
usage_amounts = ["%s:%s" % (resource_class, total)
for resource_class, total in grouped_usage.items()]
usage_amounts.sort()
usage = ", ".join(usage_amounts)
usage_days_amounts = [
"%s:%s" % (resource_class, total)
for resource_class, total in grouped_usage_days.items()]
usage_days_amounts.sort()
usage_days = ", ".join(usage_days_amounts)
# Resolve id to name, if possible
key_name = None
if group_by == "user":
key_name = all_users.get(key)
elif group_by == "project":
key_name = all_projects.get(key)
summary_tuples.append((key_name or key, usage, usage_days))
if group_by == "user" or group_by == "project_id":
if group_by == "user":
dimensions = {"user_id": key}
name_key = "username"
else:
dimensions = {"project_id": key}
name_key = "project_name"
if key_name:
dimensions[name_key] = key_name
value_meta = {'usage_summary': usage}
dimensions['version'] = '2.0'
metrics_to_send.append(metrics.Metric(
name="usage.%s.count" % group_by,
value=grouped_usage['Count'],
value_meta=value_meta,
dimensions=dimensions))
metrics_to_send.append(metrics.Metric(
name="usage.%s.days.count" % group_by,
value=grouped_usage_days['Count'],
value_meta=value_meta,
dimensions=dimensions))
# Sort my largest current usage first
summary_tuples.sort(key=lambda x: x[1], reverse=True)
if metrics_to_send:
metrics.send_metrics(app.monitoring_client, metrics_to_send)
return summary_tuples
``` |
{
"source": "JohnGarbutt/python-dracclient",
"score": 2
} |
#### File: dracclient/resources/raid.py
```python
import collections
import copy
import logging
from dracclient import constants
from dracclient import exceptions
from dracclient.resources import uris
from dracclient import utils
LOG = logging.getLogger(__name__)
RAID_LEVELS = {
'non-raid': '1',
'0': '2',
'1': '4',
'5': '64',
'6': '128',
'1+0': '2048',
'5+0': '8192',
'6+0': '16384',
}
REVERSE_RAID_LEVELS = dict((v, k) for (k, v) in RAID_LEVELS.items())
RAID_CONTROLLER_IS_REALTIME = {
'1': True,
'0': False
}
DISK_RAID_STATUS = {
'0': 'unknown',
'1': 'ready',
'2': 'online',
'3': 'foreign',
'4': 'offline',
'5': 'blocked',
'6': 'failed',
'7': 'degraded',
'8': 'non-RAID',
'9': 'missing'
}
VIRTUAL_DISK_PENDING_OPERATIONS = {
'0': None,
'1': 'fast_init',
'2': 'pending_delete',
'3': 'pending_create'
}
PHYSICAL_DISK_MEDIA_TYPE = {
'0': 'hdd',
'1': 'ssd'
}
PHYSICAL_DISK_BUS_PROTOCOL = {
'0': 'unknown',
'1': 'scsi',
'2': 'pata',
'3': 'fibre',
'4': 'usb',
'5': 'sata',
'6': 'sas',
'7': 'pcie',
'8': 'nvme'
}
PhysicalDisk = collections.namedtuple(
'PhysicalDisk',
['id', 'description', 'controller', 'manufacturer', 'model', 'media_type',
'interface_type', 'size_mb', 'free_size_mb', 'serial_number',
'firmware_version', 'status', 'raid_status', 'sas_address',
'device_protocol', 'bus'])
RAIDController = collections.namedtuple(
'RAIDController', ['id', 'description', 'manufacturer', 'model',
'primary_status', 'firmware_version', 'bus',
'supports_realtime'])
VirtualDisk = collections.namedtuple(
'VirtualDisk',
['id', 'name', 'description', 'controller', 'raid_level', 'size_mb',
'status', 'raid_status', 'span_depth', 'span_length',
'pending_operations', 'physical_disks'])
NO_FOREIGN_DRIVES = ["STOR058", "STOR018"]
class RAIDAttribute(object):
"""Generic RAID attribute class"""
def __init__(self, name, instance_id, current_value, pending_value,
read_only, fqdd):
"""Creates RAIDAttribute object
:param name: name of the RAID attribute
:param instance_id: InstanceID of the RAID attribute
:param current_value: list containing the current values of the
RAID attribute
:param pending_value: pending value of the RAID attribute, reflecting
an unprocessed change (eg. config job not completed)
:param read_only: indicates whether this RAID attribute can be changed
:param fqdd: Fully Qualified Device Description of the RAID Attribute
"""
self.name = name
self.instance_id = instance_id
self.current_value = current_value
self.pending_value = pending_value
self.read_only = read_only
self.fqdd = fqdd
def __eq__(self, other):
return self.__dict__ == other.__dict__
@classmethod
def parse(cls, namespace, raid_attr_xml):
"""Parses XML and creates RAIDAttribute object"""
name = utils.get_wsman_resource_attr(
raid_attr_xml, namespace, 'AttributeName')
instance_id = utils.get_wsman_resource_attr(
raid_attr_xml, namespace, 'InstanceID')
current_value = [attr.text for attr in
utils.find_xml(raid_attr_xml, 'CurrentValue',
namespace, find_all=True)]
pending_value = utils.get_wsman_resource_attr(
raid_attr_xml, namespace, 'PendingValue', nullable=True)
read_only = utils.get_wsman_resource_attr(
raid_attr_xml, namespace, 'IsReadOnly')
fqdd = utils.get_wsman_resource_attr(
raid_attr_xml, namespace, 'FQDD')
return cls(name, instance_id, current_value, pending_value,
(read_only == 'true'), fqdd)
class RAIDEnumerableAttribute(RAIDAttribute):
"""Enumerable RAID attribute class"""
namespace = uris.DCIM_RAIDEnumeration
def __init__(self, name, instance_id, current_value, pending_value,
read_only, fqdd, possible_values):
"""Creates RAIDEnumerableAttribute object
:param name: name of the RAID attribute
:param instance_id: InstanceID of the RAID attribute
:param current_value: list containing the current values of the
RAID attribute
:param pending_value: pending value of the RAID attribute, reflecting
an unprocessed change (eg. config job not completed)
:param read_only: indicates whether this RAID attribute can be changed
:param fqdd: Fully Qualified Device Description of the RAID
Attribute
:param possible_values: list containing the allowed values for the RAID
attribute
"""
super(RAIDEnumerableAttribute, self).__init__(name, instance_id,
current_value,
pending_value,
read_only, fqdd)
self.possible_values = possible_values
@classmethod
def parse(cls, raid_attr_xml):
"""Parses XML and creates RAIDEnumerableAttribute object"""
raid_attr = RAIDAttribute.parse(cls.namespace, raid_attr_xml)
possible_values = [attr.text for attr
in utils.find_xml(raid_attr_xml,
'PossibleValues',
cls.namespace, find_all=True)]
return cls(raid_attr.name, raid_attr.instance_id,
raid_attr.current_value, raid_attr.pending_value,
raid_attr.read_only, raid_attr.fqdd, possible_values)
def validate(self, new_value):
"""Validates new value"""
if str(new_value) not in self.possible_values:
msg = ("Attribute '%(attr)s' cannot be set to value '%(val)s'."
" It must be in %(possible_values)r.") % {
'attr': self.name,
'val': new_value,
'possible_values': self.possible_values}
return msg
class RAIDStringAttribute(RAIDAttribute):
"""String RAID attribute class"""
namespace = uris.DCIM_RAIDString
def __init__(self, name, instance_id, current_value, pending_value,
read_only, fqdd, min_length, max_length):
"""Creates RAIDStringAttribute object
:param name: name of the RAID attribute
:param instance_id: InstanceID of the RAID attribute
:param current_value: list containing the current values of the
RAID attribute
:param pending_value: pending value of the RAID attribute, reflecting
an unprocessed change (eg. config job not completed)
:param read_only: indicates whether this RAID attribute can be changed
:param fqdd: Fully Qualified Device Description of the RAID
Attribute
:param min_length: minimum length of the string
:param max_length: maximum length of the string
"""
super(RAIDStringAttribute, self).__init__(name, instance_id,
current_value, pending_value,
read_only, fqdd)
self.min_length = min_length
self.max_length = max_length
@classmethod
def parse(cls, raid_attr_xml):
"""Parses XML and creates RAIDStringAttribute object"""
raid_attr = RAIDAttribute.parse(cls.namespace, raid_attr_xml)
min_length = int(utils.get_wsman_resource_attr(
raid_attr_xml, cls.namespace, 'MinLength'))
max_length = int(utils.get_wsman_resource_attr(
raid_attr_xml, cls.namespace, 'MaxLength'))
return cls(raid_attr.name, raid_attr.instance_id,
raid_attr.current_value, raid_attr.pending_value,
raid_attr.read_only, raid_attr.fqdd,
min_length, max_length)
class RAIDIntegerAttribute(RAIDAttribute):
"""Integer RAID attribute class"""
namespace = uris.DCIM_RAIDInteger
def __init__(self, name, instance_id, current_value, pending_value,
read_only, fqdd, lower_bound, upper_bound):
"""Creates RAIDIntegerAttribute object
:param name: name of the RAID attribute
:param instance_id: InstanceID of the RAID attribute
:param current_value: list containing the current value of the
RAID attribute
:param pending_value: pending value of the RAID attribute,
reflecting an unprocessed change
(eg. config job not completed)
:param read_only: indicates whether this RAID attribute can be
changed
:param fqdd: Fully Qualified Device Description of the RAID
Attribute
:param lower_bound: minimum value for the RAID attribute
:param upper_bound: maximum value for the RAID attribute
"""
super(RAIDIntegerAttribute, self).__init__(name, instance_id,
current_value,
pending_value,
read_only, fqdd)
self.lower_bound = lower_bound
self.upper_bound = upper_bound
@classmethod
def parse(cls, raid_attr_xml):
"""Parses XML and creates RAIDIntegerAttribute object"""
raid_attr = RAIDAttribute.parse(cls.namespace, raid_attr_xml)
lower_bound = utils.get_wsman_resource_attr(
raid_attr_xml, cls.namespace, 'LowerBound')
upper_bound = utils.get_wsman_resource_attr(
raid_attr_xml, cls.namespace, 'UpperBound')
if raid_attr.current_value:
raid_attr.current_value = int(raid_attr.current_value[0])
if raid_attr.pending_value:
raid_attr.pending_value = int(raid_attr.pending_value)
return cls(raid_attr.name, raid_attr.instance_id,
raid_attr.current_value, raid_attr.pending_value,
raid_attr.read_only, raid_attr.fqdd,
int(lower_bound), int(upper_bound))
def validate(self, new_value):
"""Validates new value"""
val = int(new_value)
if val < self.lower_bound or val > self.upper_bound:
msg = ('Attribute %(attr)s cannot be set to value %(val)d.'
' It must be between %(lower)d and %(upper)d.') % {
'attr': self.name,
'val': new_value,
'lower': self.lower_bound,
'upper': self.upper_bound}
return msg
class RAIDManagement(object):
NAMESPACES = [(uris.DCIM_RAIDEnumeration, RAIDEnumerableAttribute),
(uris.DCIM_RAIDString, RAIDStringAttribute),
(uris.DCIM_RAIDInteger, RAIDIntegerAttribute)]
def __init__(self, client):
"""Creates RAIDManagement object
:param client: an instance of WSManClient
"""
self.client = client
def list_raid_settings(self):
"""List the RAID configuration settings
:returns: a dictionary with the RAID settings using InstanceID as the
key. The attributes are either RAIDEnumerableAttribute,
RAIDStringAttribute objects.
:raises: WSManRequestFailure on request failures
:raises: WSManInvalidResponse when receiving invalid response
:raises: DRACOperationFailed on error reported back by the DRAC
interface
"""
return utils.list_settings(self.client, self.NAMESPACES,
by_name=False)
def set_raid_settings(self, raid_fqdd, new_settings):
"""Sets the RAID configuration
It sets the pending_value parameter for each of the attributes
passed in. For the values to be applied, a config job must
be created.
:param raid_fqdd: the FQDD of the RAID setting.
:param new_settings: a dictionary containing the proposed values, with
each key being the name of attribute and the value
being the proposed value.
:returns: a dictionary containing:
- The is_commit_required key with a boolean value indicating
whether a config job must be created for the values to be
applied.
- The is_reboot_required key with a RebootRequired enumerated
value indicating whether the server must be rebooted for the
values to be applied. Possible values are true and false.
:raises: WSManRequestFailure on request failures
:raises: WSManInvalidResponse when receiving invalid response
:raises: DRACOperationFailed on error reported back by the DRAC
interface
"""
return utils.set_settings('RAID',
self.client,
self.NAMESPACES,
new_settings,
uris.DCIM_RAIDService,
"DCIM_RAIDService",
"DCIM:RAIDService",
raid_fqdd,
by_name=False)
def list_raid_controllers(self):
"""Returns the list of RAID controllers
:returns: a list of RAIDController objects
:raises: WSManRequestFailure on request failures
:raises: WSManInvalidResponse when receiving invalid response
:raises: DRACOperationFailed on error reported back by the DRAC
interface
"""
doc = self.client.enumerate(uris.DCIM_ControllerView)
drac_raid_controllers = utils.find_xml(doc, 'DCIM_ControllerView',
uris.DCIM_ControllerView,
find_all=True)
return [self._parse_drac_raid_controller(controller)
for controller in drac_raid_controllers]
def _parse_drac_raid_controller(self, drac_controller):
return RAIDController(
id=self._get_raid_controller_attr(drac_controller, 'FQDD'),
description=self._get_raid_controller_attr(
drac_controller, 'DeviceDescription'),
manufacturer=self._get_raid_controller_attr(
drac_controller, 'DeviceCardManufacturer'),
model=self._get_raid_controller_attr(
drac_controller, 'ProductName'),
primary_status=constants.PRIMARY_STATUS[
self._get_raid_controller_attr(drac_controller,
'PrimaryStatus')],
firmware_version=self._get_raid_controller_attr(
drac_controller, 'ControllerFirmwareVersion'),
bus=self._get_raid_controller_attr(drac_controller, 'Bus').upper(),
supports_realtime=RAID_CONTROLLER_IS_REALTIME[
self._get_raid_controller_attr(
drac_controller, 'RealtimeCapability')])
def _get_raid_controller_attr(self, drac_controller, attr_name):
return utils.get_wsman_resource_attr(
drac_controller, uris.DCIM_ControllerView, attr_name,
nullable=True)
def list_virtual_disks(self):
"""Returns the list of virtual disks
:returns: a list of VirtualDisk objects
:raises: WSManRequestFailure on request failures
:raises: WSManInvalidResponse when receiving invalid response
:raises: DRACOperationFailed on error reported back by the DRAC
interface
"""
doc = self.client.enumerate(uris.DCIM_VirtualDiskView)
drac_virtual_disks = utils.find_xml(doc, 'DCIM_VirtualDiskView',
uris.DCIM_VirtualDiskView,
find_all=True)
return [self._parse_drac_virtual_disk(disk)
for disk in drac_virtual_disks]
def _parse_drac_virtual_disk(self, drac_disk):
fqdd = self._get_virtual_disk_attr(drac_disk, 'FQDD')
drac_raid_level = self._get_virtual_disk_attr(drac_disk, 'RAIDTypes')
size_b = self._get_virtual_disk_attr(drac_disk, 'SizeInBytes')
drac_status = self._get_virtual_disk_attr(drac_disk, 'PrimaryStatus')
drac_raid_status = self._get_virtual_disk_attr(
drac_disk, 'RAIDStatus', allow_missing=True)
if drac_raid_status is None:
drac_raid_status = self._get_virtual_disk_attr(
drac_disk, 'RaidStatus')
drac_pending_operations = self._get_virtual_disk_attr(
drac_disk, 'PendingOperations')
return VirtualDisk(
id=fqdd,
name=self._get_virtual_disk_attr(drac_disk, 'Name',
nullable=True),
description=self._get_virtual_disk_attr(drac_disk,
'DeviceDescription',
nullable=True),
controller=fqdd.split(':')[-1],
raid_level=REVERSE_RAID_LEVELS[drac_raid_level],
size_mb=int(size_b) // 2 ** 20,
status=constants.PRIMARY_STATUS[drac_status],
raid_status=DISK_RAID_STATUS[drac_raid_status],
span_depth=int(self._get_virtual_disk_attr(drac_disk,
'SpanDepth')),
span_length=int(self._get_virtual_disk_attr(drac_disk,
'SpanLength')),
pending_operations=(
VIRTUAL_DISK_PENDING_OPERATIONS[drac_pending_operations]),
physical_disks=self._get_virtual_disk_attrs(drac_disk,
'PhysicalDiskIDs'))
def _get_virtual_disk_attr(
self, drac_disk, attr_name, nullable=False, allow_missing=False):
return utils.get_wsman_resource_attr(
drac_disk, uris.DCIM_VirtualDiskView, attr_name,
nullable=nullable, allow_missing=allow_missing)
def _get_virtual_disk_attrs(self, drac_disk, attr_name):
return utils.get_all_wsman_resource_attrs(
drac_disk, uris.DCIM_VirtualDiskView, attr_name, nullable=False)
def list_physical_disks(self):
"""Returns the list of physical disks
:returns: a list of PhysicalDisk objects
:raises: WSManRequestFailure on request failures
:raises: WSManInvalidResponse when receiving invalid response
:raises: DRACOperationFailed on error reported back by the DRAC
interface
"""
doc = self.client.enumerate(uris.DCIM_PhysicalDiskView)
drac_physical_disks = utils.find_xml(doc, 'DCIM_PhysicalDiskView',
uris.DCIM_PhysicalDiskView,
find_all=True)
physical_disks = [self._parse_drac_physical_disk(disk)
for disk in drac_physical_disks]
drac_pcie_disks = utils.find_xml(doc, 'DCIM_PCIeSSDView',
uris.DCIM_PCIeSSDView,
find_all=True)
pcie_disks = [self._parse_drac_physical_disk(disk,
uris.DCIM_PCIeSSDView) for disk in drac_pcie_disks]
return physical_disks + pcie_disks
def _parse_drac_physical_disk(self,
drac_disk,
uri=uris.DCIM_PhysicalDiskView):
fqdd = self._get_physical_disk_attr(drac_disk, 'FQDD', uri)
size_b = self._get_physical_disk_attr(drac_disk, 'SizeInBytes', uri)
free_size_b = self._get_physical_disk_attr(drac_disk,
'FreeSizeInBytes', uri)
if free_size_b is not None:
free_size_mb = int(free_size_b) // 2 ** 20
else:
free_size_mb = None
drac_status = self._get_physical_disk_attr(drac_disk, 'PrimaryStatus',
uri)
drac_raid_status = self._get_physical_disk_attr(drac_disk,
'RaidStatus', uri)
if drac_raid_status is not None:
raid_status = DISK_RAID_STATUS[drac_raid_status]
else:
raid_status = None
drac_media_type = self._get_physical_disk_attr(drac_disk, 'MediaType',
uri)
drac_bus_protocol = self._get_physical_disk_attr(drac_disk,
'BusProtocol', uri)
bus = self._get_physical_disk_attr(drac_disk,
'Bus', uri, allow_missing=True)
if bus is not None:
bus = bus.upper()
return PhysicalDisk(
id=fqdd,
description=self._get_physical_disk_attr(drac_disk,
'DeviceDescription',
uri),
controller=fqdd.split(':')[-1],
manufacturer=self._get_physical_disk_attr(drac_disk,
'Manufacturer', uri),
model=self._get_physical_disk_attr(drac_disk, 'Model', uri),
media_type=PHYSICAL_DISK_MEDIA_TYPE[drac_media_type],
interface_type=PHYSICAL_DISK_BUS_PROTOCOL[drac_bus_protocol],
size_mb=int(size_b) // 2 ** 20,
free_size_mb=free_size_mb,
serial_number=self._get_physical_disk_attr(drac_disk,
'SerialNumber', uri),
firmware_version=self._get_physical_disk_attr(drac_disk,
'Revision', uri),
status=constants.PRIMARY_STATUS[drac_status],
raid_status=raid_status,
sas_address=self._get_physical_disk_attr(drac_disk, 'SASAddress',
uri, allow_missing=True),
device_protocol=self._get_physical_disk_attr(drac_disk,
'DeviceProtocol',
uri,
allow_missing=True),
bus=bus)
def _get_physical_disk_attr(self, drac_disk, attr_name, uri,
allow_missing=False):
return utils.get_wsman_resource_attr(
drac_disk, uri, attr_name, nullable=True,
allow_missing=allow_missing)
def convert_physical_disks(self, physical_disks, raid_enable):
"""Converts a list of physical disks into or out of RAID mode.
Disks can be enabled or disabled for RAID mode.
:param physical_disks: list of FQDD ID strings of the physical disks
to update
:param raid_enable: boolean flag, set to True if the disk is to
become part of the RAID. The same flag is applied
to all listed disks
:returns: a dictionary containing:
- The is_commit_required key with the value always set to
True indicating that a config job must be created to
complete disk conversion.
- The is_reboot_required key with a RebootRequired enumerated
value indicating whether the server must be rebooted to
complete disk conversion.
"""
invocation = 'ConvertToRAID' if raid_enable else 'ConvertToNonRAID'
selectors = {'SystemCreationClassName': 'DCIM_ComputerSystem',
'CreationClassName': 'DCIM_RAIDService',
'SystemName': 'DCIM:ComputerSystem',
'Name': 'DCIM:RAIDService'}
properties = {'PDArray': physical_disks}
doc = self.client.invoke(uris.DCIM_RAIDService, invocation,
selectors, properties,
expected_return_value=utils.RET_SUCCESS)
return utils.build_return_dict(doc, uris.DCIM_RAIDService,
is_commit_required_value=True)
def create_virtual_disk(self, raid_controller, physical_disks, raid_level,
size_mb, disk_name=None, span_length=None,
span_depth=None):
"""Creates a virtual disk
The created virtual disk will be in pending state. For the changes to
be applied, a config job must be created and the node must be rebooted.
:param raid_controller: id of the RAID controller
:param physical_disks: ids of the physical disks
:param raid_level: RAID level of the virtual disk
:param size_mb: size of the virtual disk in megabytes
:param disk_name: name of the virtual disk (optional)
:param span_length: number of disks per span (optional)
:param span_depth: number of spans in virtual disk (optional)
:returns: a dictionary containing:
- The is_commit_required key with the value always set to
True indicating that a config job must be created to
complete virtual disk creation.
- The is_reboot_required key with a RebootRequired enumerated
value indicating whether the server must be rebooted to
complete virtual disk creation.
:raises: WSManRequestFailure on request failures
:raises: WSManInvalidResponse when receiving invalid response
:raises: DRACOperationFailed on error reported back by the DRAC
interface
:raises: DRACUnexpectedReturnValue on return value mismatch
:raises: InvalidParameterValue on invalid input parameter
"""
virtual_disk_prop_names = []
virtual_disk_prop_values = []
error_msgs = []
# RAID controller validation
if not raid_controller:
error_msgs.append("'raid_controller' is not supplied")
# physical disks validation
if not physical_disks:
error_msgs.append("'physical_disks' is not supplied")
# size validation
utils.validate_integer_value(size_mb, 'size_mb', error_msgs)
virtual_disk_prop_names.append('Size')
virtual_disk_prop_values.append(str(size_mb))
# RAID level validation
virtual_disk_prop_names.append('RAIDLevel')
try:
virtual_disk_prop_values.append(RAID_LEVELS[str(raid_level)])
except KeyError:
error_msgs.append("'raid_level' is invalid")
if disk_name is not None:
virtual_disk_prop_names.append('VirtualDiskName')
virtual_disk_prop_values.append(disk_name)
if span_depth is not None:
utils.validate_integer_value(span_depth, 'span_depth', error_msgs)
virtual_disk_prop_names.append('SpanDepth')
virtual_disk_prop_values.append(str(span_depth))
if span_length is not None:
utils.validate_integer_value(span_length, 'span_length',
error_msgs)
virtual_disk_prop_names.append('SpanLength')
virtual_disk_prop_values.append(str(span_length))
if error_msgs:
msg = ('The following errors were encountered while parsing '
'the provided parameters: %r') % ','.join(error_msgs)
raise exceptions.InvalidParameterValue(reason=msg)
selectors = {'SystemCreationClassName': 'DCIM_ComputerSystem',
'CreationClassName': 'DCIM_RAIDService',
'SystemName': 'DCIM:ComputerSystem',
'Name': 'DCIM:RAIDService'}
properties = {'Target': raid_controller,
'PDArray': physical_disks,
'VDPropNameArray': virtual_disk_prop_names,
'VDPropValueArray': virtual_disk_prop_values}
doc = self.client.invoke(uris.DCIM_RAIDService, 'CreateVirtualDisk',
selectors, properties,
expected_return_value=utils.RET_SUCCESS)
return utils.build_return_dict(doc, uris.DCIM_RAIDService,
is_commit_required_value=True)
def delete_virtual_disk(self, virtual_disk):
"""Deletes a virtual disk
The deleted virtual disk will be in pending state. For the changes to
be applied, a config job must be created and the node must be rebooted.
:param virtual_disk: id of the virtual disk
:returns: a dictionary containing:
- The is_commit_required key with the value always set to
True indicating that a config job must be created to
complete virtual disk deletion.
- The is_reboot_required key with a RebootRequired enumerated
value indicating whether the server must be rebooted to
complete virtual disk deletion.
:raises: WSManRequestFailure on request failures
:raises: WSManInvalidResponse when receiving invalid response
:raises: DRACOperationFailed on error reported back by the DRAC
interface
:raises: DRACUnexpectedReturnValue on return value mismatch
"""
selectors = {'SystemCreationClassName': 'DCIM_ComputerSystem',
'CreationClassName': 'DCIM_RAIDService',
'SystemName': 'DCIM:ComputerSystem',
'Name': 'DCIM:RAIDService'}
properties = {'Target': virtual_disk}
doc = self.client.invoke(uris.DCIM_RAIDService, 'DeleteVirtualDisk',
selectors, properties,
expected_return_value=utils.RET_SUCCESS)
return utils.build_return_dict(doc, uris.DCIM_RAIDService,
is_commit_required_value=True)
def is_jbod_capable(self, raid_controller_fqdd):
"""Find out if raid controller supports jbod
:param raid_controller_fqdd: The raid controller's fqdd
being being checked to see if it is jbod
capable.
:raises: DRACRequestFailed if unable to find any disks in the Ready
or non-RAID states
:raises: DRACOperationFailed on error reported back by the DRAC
and the exception message does not contain
NOT_SUPPORTED_MSG constant
"""
is_jbod_capable = False
# Grab all the disks associated with the RAID controller
all_physical_disks = self.list_physical_disks()
physical_disks = [physical_disk for physical_disk in all_physical_disks
if physical_disk.controller == raid_controller_fqdd]
# If there is a disk in the Non-RAID state, then the controller is JBOD
# capable
ready_disk = None
for physical_disk in physical_disks:
if physical_disk.raid_status == 'non-RAID':
is_jbod_capable = True
break
elif not ready_disk and physical_disk.raid_status == 'ready':
ready_disk = physical_disk
if not is_jbod_capable:
if not ready_disk:
msg = "Unable to find a disk in the Ready state"
raise exceptions.DRACRequestFailed(msg)
# Try moving a disk in the Ready state to JBOD mode
try:
self.convert_physical_disks([ready_disk.id], False)
is_jbod_capable = True
# Flip the disk back to the Ready state. This results in the
# pending value being reset to nothing, so it effectively
# undoes the last command and makes the check non-destructive
self.convert_physical_disks([ready_disk.id], True)
except exceptions.DRACOperationFailed as ex:
# Fix for python 3, Exception.message no longer
# a valid attribute, str(ex) works for both 2.7
# and 3.x
if constants.NOT_SUPPORTED_MSG in str(ex):
pass
else:
raise
return is_jbod_capable
def is_raid_controller(self, raid_controller_fqdd, raid_controllers=None):
"""Find out if object's fqdd is for a raid controller or not
:param raid_controller_fqdd: The object's fqdd we are testing to see
if it is a raid controller or not.
:param raid_controllers: A list of RAIDControllers used to check for
the presence of BOSS cards. If None, the
iDRAC will be queried for the list of
controllers.
:returns: boolean, True if the device is a RAID controller,
False if not.
"""
return raid_controller_fqdd.startswith('RAID.') or \
self.is_boss_controller(raid_controller_fqdd, raid_controllers)
def is_boss_controller(self, raid_controller_fqdd, raid_controllers=None):
"""Find out if a RAID controller a BOSS card or not
:param raid_controller_fqdd: The object's fqdd we are testing to see
if it is a BOSS card or not.
:param raid_controllers: A list of RAIDController to scan for presence
of BOSS card, if None the drac will be queried
for the list of controllers which will then be
scanned.
:returns: boolean, True if the device is a BOSS card, False if not.
:raises: WSManRequestFailure on request failures
:raises: WSManInvalidResponse when receiving invalid response
:raises: DRACOperationFailed on error reported back by the DRAC
interface
"""
if raid_controllers is None:
raid_controllers = self.list_raid_controllers()
boss_raid_controllers = [
c.id for c in raid_controllers if c.model.startswith('BOSS')]
return raid_controller_fqdd in boss_raid_controllers
def _check_disks_status(self, mode, physical_disks,
controllers_to_physical_disk_ids):
"""Find disks that failed, need to be configured, or need no change.
Inspect all the controllers drives and:
- See if there are any disks in a failed or unknown state and raise
a ValueException where appropriate.
- If a controller has disks that still need to be configured add
them to the controllers_to_physical_disk_ids dict for the
appropriate controller.
- If a disk is already in the appropriate state, do nothing, this
function should behave in an idempotent manner.
:param mode: constants.RaidStatus enumeration used to
determine what raid status to check for.
:param physical_disks: all physical disks
:param controllers_to_physical_disk_ids: Dictionary of controllers and
corresponding disk ids to convert to the requested mode.
:returns: a dictionary mapping controller FQDDs to the list of
physical disks that need to be converted for that controller.
:raises: ValueError: Exception message will list failed drives and
drives whose state cannot be changed at this time, drive
state is not "ready" or "non-RAID".
"""
controllers_to_physical_disk_ids = copy.deepcopy(
controllers_to_physical_disk_ids)
p_disk_id_to_status = {}
for physical_disk in physical_disks:
p_disk_id_to_status[physical_disk.id] = physical_disk.raid_status
failed_disks = []
bad_disks = []
jbod = constants.RaidStatus.jbod
raid = constants.RaidStatus.raid
for controller, physical_disk_ids \
in controllers_to_physical_disk_ids.items():
final_physical_disk_ids = []
for physical_disk_id in physical_disk_ids:
raid_status = p_disk_id_to_status[physical_disk_id]
LOG.debug("RAID status for disk id: %s is: %s",
physical_disk_id, raid_status)
if ((mode == jbod and raid_status == "non-RAID") or
(mode == raid and raid_status == "ready")):
# This means the disk is already in the desired state,
# so skip it
continue
elif ((mode == jbod and raid_status == "ready") or
(mode == raid and raid_status == "non-RAID")):
# This disk is moving from a state we expect to RAID or
# JBOD, so keep it
final_physical_disk_ids.append(physical_disk_id)
elif raid_status == "failed":
failed_disks.append(physical_disk_id)
else:
# This disk is in one of many states that we don't know
# what to do with, so pitch it
bad_disks.append("{} ({})".format(physical_disk_id,
raid_status))
controllers_to_physical_disk_ids[controller] = (
final_physical_disk_ids)
if failed_disks or bad_disks:
error_msg = ""
if failed_disks:
error_msg += ("The following drives have failed: "
"{failed_disks}. Manually check the status"
" of all drives and replace as necessary, then"
" try again.").format(
failed_disks=" ".join(failed_disks))
if bad_disks:
if failed_disks:
error_msg += "\n"
error_msg += ("Unable to change the state of the following "
"drives because their status is not ready "
"or non-RAID: {}. Bring up the RAID "
"controller GUI on this node and change the "
"drives' status to ready or non-RAID.").format(
", ".join(bad_disks))
raise ValueError(error_msg)
return controllers_to_physical_disk_ids
def change_physical_disk_state(self, mode,
controllers_to_physical_disk_ids=None):
"""Convert disks RAID status
This method intelligently converts the requested physical disks from
RAID to JBOD or vice versa. It does this by only converting the
disks that are not already in the correct state.
:param mode: constants.RaidStatus enumeration that indicates the mode
to change the disks to.
:param controllers_to_physical_disk_ids: Dictionary of controllers and
corresponding disk ids to convert to the requested mode.
:returns: a dictionary containing:
- conversion_results, a dictionary that maps controller ids
to the conversion results for that controller. The
conversion results are a dict that contains:
- The is_commit_required key with the value always set to
True indicating that a config job must be created to
complete disk conversion.
- The is_reboot_required key with a RebootRequired
enumerated value indicating whether the server must be
rebooted to complete disk conversion.
:raises: DRACOperationFailed on error reported back by the DRAC and the
exception message does not contain NOT_SUPPORTED_MSG constant.
:raises: Exception on unknown error.
"""
physical_disks = self.list_physical_disks()
raid = constants.RaidStatus.raid
if not controllers_to_physical_disk_ids:
controllers_to_physical_disk_ids = collections.defaultdict(list)
all_controllers = self.list_raid_controllers()
for physical_d in physical_disks:
# Weed out disks that are not attached to a RAID controller
if self.is_raid_controller(physical_d.controller,
all_controllers):
physical_disk_ids = controllers_to_physical_disk_ids[
physical_d.controller]
physical_disk_ids.append(physical_d.id)
'''Modify controllers_to_physical_disk_ids dict by inspecting desired
status vs current status of each controller's disks.
Raise exception if there are any failed drives or
drives not in status 'ready' or 'non-RAID'
'''
final_ctls_to_phys_disk_ids = self._check_disks_status(
mode, physical_disks, controllers_to_physical_disk_ids)
controllers_to_results = {}
for controller, physical_disk_ids \
in final_ctls_to_phys_disk_ids.items():
if physical_disk_ids:
LOG.debug("Converting the following disks to {} on RAID "
"controller {}: {}".format(
mode, controller, str(physical_disk_ids)))
try:
conversion_results = \
self.convert_physical_disks(physical_disk_ids,
mode == raid)
except exceptions.DRACOperationFailed as ex:
if constants.NOT_SUPPORTED_MSG in str(ex):
LOG.debug("Controller {} does not support "
"JBOD mode".format(controller))
controllers_to_results[controller] = \
utils.build_return_dict(
doc=None,
resource_uri=None,
is_commit_required_value=False,
is_reboot_required_value=constants.
RebootRequired.false)
else:
raise
else:
controllers_to_results[controller] = conversion_results
else:
controllers_to_results[controller] = \
utils.build_return_dict(
doc=None,
resource_uri=None,
is_commit_required_value=False,
is_reboot_required_value=constants.
RebootRequired.false)
return {'conversion_results': controllers_to_results}
def is_realtime_supported(self, raid_controller_fqdd):
"""Find if controller supports realtime or not
:param raid_controller_fqdd: ID of RAID controller
:returns: True or False
"""
drac_raid_controllers = self.list_raid_controllers()
realtime_controller = [cnt.id for cnt in drac_raid_controllers
if cnt.supports_realtime]
if raid_controller_fqdd in realtime_controller:
return True
return False
def reset_raid_config(self, raid_controller):
"""Delete all virtual disk and unassign all hotspares
The job to reset the RAID controller config will be in pending state.
For the changes to be applied, a config job must be created.
:param raid_controller: id of the RAID controller
:returns: a dictionary containing:
- The is_commit_required key with the value always set to
True indicating that a config job must be created to
reset configuration.
- The is_reboot_required key with a RebootRequired enumerated
value indicating whether the server must be rebooted to
reset configuration.
:raises: WSManRequestFailure on request failures
:raises: WSManInvalidResponse when receiving invalid response
:raises: DRACOperationFailed on error reported back by the DRAC
interface
:raises: DRACUnexpectedReturnValue on return value mismatch
"""
selectors = {'SystemCreationClassName': 'DCIM_ComputerSystem',
'CreationClassName': 'DCIM_RAIDService',
'SystemName': 'DCIM:ComputerSystem',
'Name': 'DCIM:RAIDService'}
properties = {'Target': raid_controller}
doc = self.client.invoke(uris.DCIM_RAIDService, 'ResetConfig',
selectors, properties,
expected_return_value=utils.RET_SUCCESS)
return utils.build_return_dict(doc, uris.DCIM_RAIDService,
is_commit_required_value=True)
def clear_foreign_config(self, raid_controller):
"""Free up foreign drives
The job to clear foreign config will be in pending state.
For the changes to be applied, a config job must be created.
:param raid_controller: id of the RAID controller
:returns: a dictionary containing:
- The is_commit_required key with the value always set to
True indicating that a config job must be created to
clear foreign configuration.
- The is_reboot_required key with a RebootRequired enumerated
value indicating whether the server must be rebooted to
clear foreign configuration.
:raises: WSManRequestFailure on request failures
:raises: WSManInvalidResponse when receiving invalid response
:raises: DRACOperationFailed on error reported back by the DRAC
interface
:raises: DRACUnexpectedReturnValue on return value mismatch
"""
selectors = {'SystemCreationClassName': 'DCIM_ComputerSystem',
'CreationClassName': 'DCIM_RAIDService',
'SystemName': 'DCIM:ComputerSystem',
'Name': 'DCIM:RAIDService'}
properties = {'Target': raid_controller}
doc = self.client.invoke(uris.DCIM_RAIDService, 'ClearForeignConfig',
selectors, properties,
check_return_value=False)
is_commit_required_value = True
is_reboot_required_value = None
ret_value = utils.find_xml(doc,
'ReturnValue',
uris.DCIM_RAIDService).text
if ret_value == utils.RET_ERROR:
message_id = utils.find_xml(doc,
'MessageID',
uris.DCIM_RAIDService).text
# A MessageID 'STOR018'/'STOR058' indicates no foreign drive was
# detected. Return a value which informs the caller nothing
# further needs to be done.
no_foreign_drives_detected = any(
stor_id == message_id for stor_id in NO_FOREIGN_DRIVES)
if no_foreign_drives_detected:
is_commit_required_value = False
is_reboot_required_value = constants.RebootRequired.false
else:
message = utils.find_xml(doc,
'Message',
uris.DCIM_RAIDService).text
raise exceptions.DRACOperationFailed(
drac_messages=message)
return utils.build_return_dict(
doc, uris.DCIM_RAIDService,
is_commit_required_value=is_commit_required_value,
is_reboot_required_value=is_reboot_required_value)
```
#### File: dracclient/tests/test_idrac_card.py
```python
import lxml.etree
import re
from unittest import mock
import requests_mock
import dracclient.client
from dracclient import constants
from dracclient import exceptions
from dracclient.resources import idrac_card
from dracclient.resources import job
from dracclient.resources import uris
from dracclient.tests import base
from dracclient.tests import utils as test_utils
@requests_mock.Mocker()
@mock.patch.object(dracclient.client.WSManClient,
'wait_until_idrac_is_ready', spec_set=True,
autospec=True)
class ClientiDRACCardConfigurationTestCase(base.BaseTest):
def setUp(self):
super(ClientiDRACCardConfigurationTestCase, self).setUp()
self.drac_client = dracclient.client.DRACClient(
**test_utils.FAKE_ENDPOINT)
def test_list_idrac_settings_by_instance_id(
self, mock_requests, mock_wait_until_idrac_is_ready):
expected_enum_attr = idrac_card.iDRACCardEnumerableAttribute(
name='Type',
instance_id='iDRAC.Embedded.1#Info.1#Type',
read_only=True,
current_value='13G Monolithic',
pending_value=None,
fqdd='iDRAC.Embedded.1',
group_id='Info.1',
possible_values=['12G/13G', '12G Monolithic', '12G Modular',
'13G Monolithic', '13G Modular', '12G DCS',
'13G DCS'])
expected_string_attr = idrac_card.iDRACCardStringAttribute(
name='Version',
instance_id='iDRAC.Embedded.1#Info.1#Version',
read_only=True,
current_value='172.16.17.32',
pending_value=None,
fqdd='iDRAC.Embedded.1',
group_id='Info.1',
min_length=0,
max_length=63)
expected_integer_attr = idrac_card.iDRACCardIntegerAttribute(
name='Port',
instance_id='iDRAC.Embedded.1#SSH.1#Port',
read_only=False,
current_value=22,
pending_value=None,
fqdd='iDRAC.Embedded.1',
group_id='SSH.1',
lower_bound=1,
upper_bound=65535)
mock_requests.post('https://1.2.3.4:443/wsman', [
{'text': test_utils.iDracCardEnumerations[
uris.DCIM_iDRACCardEnumeration]['ok']},
{'text': test_utils.iDracCardEnumerations[
uris.DCIM_iDRACCardString]['ok']},
{'text': test_utils.iDracCardEnumerations[
uris.DCIM_iDRACCardInteger]['ok']}])
idrac_settings = self.drac_client.list_idrac_settings()
self.assertEqual(631, len(idrac_settings))
# enumerable attribute
self.assertIn('iDRAC.Embedded.1#Info.1#Type', idrac_settings)
self.assertEqual(expected_enum_attr, idrac_settings[
'iDRAC.Embedded.1#Info.1#Type'])
# string attribute
self.assertIn('iDRAC.Embedded.1#Info.1#Version', idrac_settings)
self.assertEqual(expected_string_attr,
idrac_settings['iDRAC.Embedded.1#Info.1#Version'])
# integer attribute
self.assertIn('iDRAC.Embedded.1#SSH.1#Port', idrac_settings)
self.assertEqual(expected_integer_attr, idrac_settings[
'iDRAC.Embedded.1#SSH.1#Port'])
def test_list_idrac_settings_by_name(
self, mock_requests, mock_wait_until_idrac_is_ready):
expected_enum_attr = idrac_card.iDRACCardEnumerableAttribute(
name='Type',
instance_id='iDRAC.Embedded.1#Info.1#Type',
read_only=True,
current_value='13G Monolithic',
pending_value=None,
fqdd='iDRAC.Embedded.1',
group_id='Info.1',
possible_values=['12G/13G', '12G Monolithic', '12G Modular',
'13G Monolithic', '13G Modular', '12G DCS',
'13G DCS'])
expected_string_attr = idrac_card.iDRACCardStringAttribute(
name='Version',
instance_id='iDRAC.Embedded.1#Info.1#Version',
read_only=True,
current_value='172.16.17.32',
pending_value=None,
fqdd='iDRAC.Embedded.1',
group_id='Info.1',
min_length=0,
max_length=63)
expected_integer_attr = idrac_card.iDRACCardIntegerAttribute(
name='Port',
instance_id='iDRAC.Embedded.1#SSH.1#Port',
read_only=False,
current_value=22,
pending_value=None,
fqdd='iDRAC.Embedded.1',
group_id='SSH.1',
lower_bound=1,
upper_bound=65535)
mock_requests.post('https://1.2.3.4:443/wsman', [
{'text': test_utils.iDracCardEnumerations[
uris.DCIM_iDRACCardEnumeration]['ok']},
{'text': test_utils.iDracCardEnumerations[
uris.DCIM_iDRACCardString]['ok']},
{'text': test_utils.iDracCardEnumerations[
uris.DCIM_iDRACCardInteger]['ok']}])
idrac_settings = self.drac_client.list_idrac_settings(by_name=True)
self.assertEqual(630, len(idrac_settings))
# enumerable attribute
self.assertIn('Info.1#Type', idrac_settings)
self.assertEqual(expected_enum_attr, idrac_settings[
'Info.1#Type'])
# string attribute
self.assertIn('Info.1#Version', idrac_settings)
self.assertEqual(expected_string_attr,
idrac_settings['Info.1#Version'])
# integer attribute
self.assertIn('SSH.1#Port', idrac_settings)
self.assertEqual(expected_integer_attr, idrac_settings[
'SSH.1#Port'])
def test_list_multi_idrac_settings_by_name(
self, mock_requests, mock_wait_until_idrac_is_ready):
expected_enum_attr = idrac_card.iDRACCardEnumerableAttribute(
name='Type',
instance_id='iDRAC.Embedded.2#Info.1#Type',
read_only=True,
current_value='13G Monolithic',
pending_value=None,
fqdd='iDRAC.Embedded.2',
group_id='Info.1',
possible_values=['12G/13G', '12G Monolithic', '12G Modular',
'13G Monolithic', '13G Modular', '12G DCS',
'13G DCS'])
mock_requests.post('https://1.2.3.4:443/wsman', [
{'text': test_utils.iDracCardEnumerations[
uris.DCIM_iDRACCardEnumeration]['ok']},
{'text': test_utils.iDracCardEnumerations[
uris.DCIM_iDRACCardString]['ok']},
{'text': test_utils.iDracCardEnumerations[
uris.DCIM_iDRACCardInteger]['ok']}])
idrac_settings = self.drac_client.list_idrac_settings(
by_name=True, fqdd_filter='iDRAC.Embedded.2')
self.assertEqual(1, len(idrac_settings))
# enumerable attribute
self.assertIn('Info.1#Type', idrac_settings)
self.assertEqual(expected_enum_attr, idrac_settings[
'Info.1#Type'])
@mock.patch.object(dracclient.client.WSManClient, 'invoke',
spec_set=True, autospec=True)
def test_set_idrac_settings(
self, mock_requests, mock_invoke, mock_wait_until_idrac_is_ready):
expected_selectors = {'CreationClassName': 'DCIM_iDRACCardService',
'SystemName': 'DCIM:ComputerSystem',
'Name': 'DCIM:iDRACCardService',
'SystemCreationClassName': 'DCIM_ComputerSystem'}
expected_properties = {'Target': 'iDRAC.Embedded.1',
'AttributeName': ['LDAP.1#GroupAttributeIsDN'],
'AttributeValue': ['Disabled']}
mock_requests.post('https://1.2.3.4:443/wsman', [
{'text': test_utils.iDracCardEnumerations[
uris.DCIM_iDRACCardEnumeration]['ok']},
{'text': test_utils.iDracCardEnumerations[
uris.DCIM_iDRACCardString]['ok']},
{'text': test_utils.iDracCardEnumerations[
uris.DCIM_iDRACCardInteger]['ok']}])
mock_invoke.return_value = lxml.etree.fromstring(
test_utils.iDracCardInvocations[uris.DCIM_iDRACCardService][
'SetAttributes']['ok'])
result = self.drac_client.set_idrac_settings(
{'LDAP.1#GroupAttributeIsDN': 'Disabled'})
self.assertEqual({'is_commit_required': True,
'is_reboot_required':
constants.RebootRequired.false},
result)
mock_invoke.assert_called_once_with(
mock.ANY, uris.DCIM_iDRACCardService, 'SetAttributes',
expected_selectors, expected_properties,
wait_for_idrac=True)
@mock.patch.object(dracclient.client.WSManClient, 'invoke',
spec_set=True, autospec=True)
def test_set_idrac_settings_with_valid_length_string(
self, mock_requests, mock_invoke, mock_wait_until_idrac_is_ready):
expected_selectors = {'CreationClassName': 'DCIM_iDRACCardService',
'SystemName': 'DCIM:ComputerSystem',
'Name': 'DCIM:iDRACCardService',
'SystemCreationClassName': 'DCIM_ComputerSystem'}
expected_properties = {'Target': 'iDRAC.Embedded.1',
'AttributeName': ['Users.16#Password'],
'AttributeValue': ['12345678901234567890']}
mock_requests.post('https://1.2.3.4:443/wsman', [
{'text': test_utils.iDracCardEnumerations[
uris.DCIM_iDRACCardEnumeration]['ok']},
{'text': test_utils.iDracCardEnumerations[
uris.DCIM_iDRACCardString]['ok']},
{'text': test_utils.iDracCardEnumerations[
uris.DCIM_iDRACCardInteger]['ok']}])
mock_invoke.return_value = lxml.etree.fromstring(
test_utils.iDracCardInvocations[uris.DCIM_iDRACCardService][
'SetAttributes']['ok'])
result = self.drac_client.set_idrac_settings(
{'Users.16#Password': '<PASSWORD>'})
self.assertEqual({'is_commit_required': True,
'is_reboot_required':
constants.RebootRequired.false},
result)
mock_invoke.assert_called_once_with(
mock.ANY, uris.DCIM_iDRACCardService, 'SetAttributes',
expected_selectors, expected_properties,
wait_for_idrac=True)
def test_set_idrac_settings_with_too_long_string(
self, mock_requests, mock_wait_until_idrac_is_ready):
expected_message = ("Attribute 'Password' cannot be set to "
"value '123456789012345678901'. It must be "
"between 0 and 20 characters in length.")
mock_requests.post('https://1.2.3.4:443/wsman', [
{'text': test_utils.iDracCardEnumerations[
uris.DCIM_iDRACCardEnumeration]['ok']},
{'text': test_utils.iDracCardEnumerations[
uris.DCIM_iDRACCardString]['ok']},
{'text': test_utils.iDracCardEnumerations[
uris.DCIM_iDRACCardInteger]['ok']}])
self.assertRaisesRegexp(
exceptions.DRACOperationFailed, re.escape(expected_message),
self.drac_client.set_idrac_settings,
{'Users.16#Password': '<PASSWORD>'})
class ClientiDRACCardChangesTestCase(base.BaseTest):
def setUp(self):
super(ClientiDRACCardChangesTestCase, self).setUp()
self.drac_client = dracclient.client.DRACClient(
**test_utils.FAKE_ENDPOINT)
@mock.patch.object(job.JobManagement, 'create_config_job', spec_set=True,
autospec=True)
def test_commit_pending_idrac_changes(self, mock_create_config_job):
self.drac_client.commit_pending_idrac_changes()
mock_create_config_job.assert_called_once_with(
mock.ANY,
resource_uri=uris.DCIM_iDRACCardService,
cim_creation_class_name='DCIM_iDRACCardService',
cim_name='DCIM:iDRACCardService',
target=dracclient.client.DRACClient.IDRAC_FQDD,
reboot=False, start_time='TIME_NOW')
@mock.patch.object(job.JobManagement, 'create_config_job', spec_set=True,
autospec=True)
def test_commit_pending_idrac_changes_with_reboot(
self, mock_create_config_job):
self.drac_client.commit_pending_idrac_changes(
reboot=True)
mock_create_config_job.assert_called_once_with(
mock.ANY,
resource_uri=uris.DCIM_iDRACCardService,
cim_creation_class_name='DCIM_iDRACCardService',
cim_name='DCIM:iDRACCardService',
target=dracclient.client.DRACClient.IDRAC_FQDD,
reboot=True, start_time='TIME_NOW')
@mock.patch.object(job.JobManagement, 'create_config_job', spec_set=True,
autospec=True)
def test_commit_pending_idrac_changes_with_time(
self, mock_create_config_job):
timestamp = '20140924120101'
self.drac_client.commit_pending_idrac_changes(
start_time=timestamp)
mock_create_config_job.assert_called_once_with(
mock.ANY,
resource_uri=uris.DCIM_iDRACCardService,
cim_creation_class_name='DCIM_iDRACCardService',
cim_name='DCIM:iDRACCardService',
target=dracclient.client.DRACClient.IDRAC_FQDD,
reboot=False, start_time=timestamp)
@mock.patch.object(job.JobManagement, 'create_config_job', spec_set=True,
autospec=True)
def test_commit_pending_idrac_changes_with_reboot_and_time(
self, mock_create_config_job):
timestamp = '20140924120101'
self.drac_client.commit_pending_idrac_changes(
reboot=True,
start_time=timestamp)
mock_create_config_job.assert_called_once_with(
mock.ANY,
resource_uri=uris.DCIM_iDRACCardService,
cim_creation_class_name='DCIM_iDRACCardService',
cim_name='DCIM:iDRACCardService',
target=dracclient.client.DRACClient.IDRAC_FQDD,
reboot=True, start_time=timestamp)
@mock.patch.object(job.JobManagement, 'delete_pending_config',
spec_set=True, autospec=True)
def test_abandon_pending_idrac_changes(self, mock_delete_pending_config):
self.drac_client.abandon_pending_idrac_changes()
mock_delete_pending_config.assert_called_once_with(
mock.ANY,
resource_uri=uris.DCIM_iDRACCardService,
cim_creation_class_name='DCIM_iDRACCardService',
cim_name='DCIM:iDRACCardService',
target=dracclient.client.DRACClient.IDRAC_FQDD)
class ClientiDRACCardResetTestCase(base.BaseTest):
def setUp(self):
super(ClientiDRACCardResetTestCase, self).setUp()
self.drac_client = dracclient.client.DRACClient(
**test_utils.FAKE_ENDPOINT)
@mock.patch('dracclient.client.subprocess.call')
def test_ping_host(self, mock_os_system):
mock_os_system.return_value = 0
response = self.drac_client._ping_host('127.0.0.1')
self.assertEqual(mock_os_system.call_count, 1)
self.assertEqual(True, response)
@mock.patch('dracclient.client.subprocess.call')
def test_ping_host_not_pingable(self, mock_os_system):
mock_os_system.return_value = 1
response = self.drac_client._ping_host('127.0.0.1')
self.assertEqual(mock_os_system.call_count, 1)
self.assertEqual(False, response)
@mock.patch('dracclient.client.subprocess.call')
def test_ping_host_name_not_known(self, mock_os_system):
mock_os_system.return_value = 2
response = self.drac_client._ping_host('127.0.0.1')
self.assertEqual(mock_os_system.call_count, 1)
self.assertEqual(False, response)
@mock.patch('time.sleep')
@mock.patch('dracclient.client.DRACClient._ping_host')
def test_wait_for_host_alive(self, mock_ping_host, mock_sleep):
total_calls = 5
ping_count = 3
mock_ping_host.return_value = True
mock_sleep.return_value = None
response = self.drac_client._wait_for_host_state(
'hostname',
alive=True,
ping_count=ping_count,
retries=total_calls)
self.assertEqual(True, response)
self.assertEqual(mock_sleep.call_count, ping_count)
self.assertEqual(mock_ping_host.call_count, ping_count)
@mock.patch('time.sleep')
@mock.patch('dracclient.client.DRACClient._ping_host')
def test_wait_for_host_alive_fail(self, mock_ping_host, mock_sleep):
total_calls = 5
ping_count = 3
mock_ping_host.return_value = False
mock_sleep.return_value = None
response = self.drac_client._wait_for_host_state(
'hostname',
alive=True,
ping_count=ping_count,
retries=total_calls)
self.assertEqual(False, response)
self.assertEqual(mock_sleep.call_count, total_calls)
self.assertEqual(mock_ping_host.call_count, total_calls)
@mock.patch('time.sleep')
@mock.patch('dracclient.client.DRACClient._ping_host')
def test_wait_for_host_dead(self, mock_ping_host, mock_sleep):
total_calls = 5
ping_count = 3
mock_ping_host.return_value = False
mock_sleep.return_value = None
response = self.drac_client._wait_for_host_state(
'hostname',
alive=False,
ping_count=ping_count,
retries=total_calls)
self.assertEqual(True, response)
self.assertEqual(mock_sleep.call_count, ping_count)
self.assertEqual(mock_ping_host.call_count, ping_count)
@mock.patch('time.sleep')
@mock.patch('dracclient.client.DRACClient._ping_host')
def test_wait_for_host_dead_fail(self, mock_ping_host, mock_sleep):
total_calls = 5
ping_count = 3
mock_ping_host.return_value = True
mock_sleep.return_value = None
response = self.drac_client._wait_for_host_state(
'hostname',
alive=False,
ping_count=ping_count,
retries=total_calls)
self.assertEqual(False, response)
self.assertEqual(mock_sleep.call_count, total_calls)
self.assertEqual(mock_ping_host.call_count, total_calls)
@mock.patch('time.sleep')
@mock.patch('dracclient.client.DRACClient._ping_host')
def test_wait_for_host_alive_with_intermittent(
self, mock_ping_host, mock_sleep):
total_calls = 6
ping_count = 3
mock_ping_host.side_effect = [True, True, False, True, True, True]
mock_sleep.return_value = None
response = self.drac_client._wait_for_host_state(
'hostname',
alive=True,
ping_count=ping_count,
retries=total_calls)
self.assertEqual(True, response)
self.assertEqual(mock_sleep.call_count, total_calls)
@mock.patch('time.sleep')
@mock.patch('dracclient.client.DRACClient._ping_host')
def test_wait_for_host_dead_with_intermittent(
self, mock_ping_host, mock_sleep):
total_calls = 6
ping_count = 3
mock_ping_host.side_effect = [False, False, True, False, False, False]
mock_sleep.return_value = None
response = self.drac_client._wait_for_host_state(
'hostname',
alive=False,
ping_count=ping_count,
retries=total_calls)
self.assertEqual(True, response)
self.assertEqual(mock_sleep.call_count, total_calls)
@mock.patch.object(dracclient.client.WSManClient, 'invoke', spec_set=True,
autospec=True)
def test_reset_idrac(self, mock_invoke):
expected_selectors = {
'CreationClassName': "DCIM_iDRACCardService",
'Name': "DCIM:iDRACCardService",
'SystemCreationClassName': 'DCIM_ComputerSystem',
'SystemName': 'DCIM:ComputerSystem'}
expected_properties = {'Force': '0'}
mock_invoke.return_value = lxml.etree.fromstring(
test_utils.iDracCardInvocations[uris.DCIM_iDRACCardService][
'iDRACReset']['ok'])
result = self.drac_client.reset_idrac()
mock_invoke.assert_called_once_with(
mock.ANY, uris.DCIM_iDRACCardService, 'iDRACReset',
expected_selectors, expected_properties,
check_return_value=False)
self.assertTrue(result)
@mock.patch.object(dracclient.client.WSManClient, 'invoke', spec_set=True,
autospec=True)
def test_reset_idrac_force(self, mock_invoke):
expected_selectors = {
'CreationClassName': "DCIM_iDRACCardService",
'Name': "DCIM:iDRACCardService",
'SystemCreationClassName': 'DCIM_ComputerSystem',
'SystemName': 'DCIM:ComputerSystem'}
expected_properties = {'Force': '1'}
mock_invoke.return_value = lxml.etree.fromstring(
test_utils.iDracCardInvocations[uris.DCIM_iDRACCardService][
'iDRACReset']['ok'])
result = self.drac_client.reset_idrac(force=True)
mock_invoke.assert_called_once_with(
mock.ANY, uris.DCIM_iDRACCardService, 'iDRACReset',
expected_selectors, expected_properties,
check_return_value=False)
self.assertTrue(result)
@mock.patch.object(dracclient.client.WSManClient, 'invoke', spec_set=True,
autospec=True)
def test_reset_idrac_bad_result(self, mock_invoke):
expected_selectors = {
'CreationClassName': "DCIM_iDRACCardService",
'Name': "DCIM:iDRACCardService",
'SystemCreationClassName': 'DCIM_ComputerSystem',
'SystemName': 'DCIM:ComputerSystem'}
expected_properties = {'Force': '0'}
expected_message = ("Failed to reset iDRAC")
mock_invoke.return_value = lxml.etree.fromstring(
test_utils.iDracCardInvocations[uris.DCIM_iDRACCardService][
'iDRACReset']['error'])
self.assertRaisesRegexp(
exceptions.DRACOperationFailed, re.escape(expected_message),
self.drac_client.reset_idrac)
mock_invoke.assert_called_once_with(
mock.ANY, uris.DCIM_iDRACCardService, 'iDRACReset',
expected_selectors, expected_properties,
check_return_value=False)
@mock.patch('time.sleep')
@mock.patch('dracclient.client.WSManClient.wait_until_idrac_is_ready')
@mock.patch('dracclient.client.DRACClient._wait_for_host_state')
@mock.patch(
'dracclient.client.idrac_card.iDRACCardConfiguration.reset_idrac')
def test_reset_idrac_wait(
self,
mock_reset_idrac,
mock_wait_for_host_state,
mock_wait_until_idrac_is_ready,
mock_sleep):
mock_reset_idrac.return_value = True
mock_wait_for_host_state.side_effect = [True, True]
mock_wait_until_idrac_is_ready.return_value = True
mock_sleep.return_value = None
self.drac_client.reset_idrac(wait=True)
mock_reset_idrac.assert_called_once()
self.assertEqual(mock_wait_for_host_state.call_count, 2)
mock_wait_until_idrac_is_ready.assert_called_once()
@mock.patch('time.sleep')
@mock.patch('dracclient.client.WSManClient.wait_until_idrac_is_ready')
@mock.patch('dracclient.client.DRACClient._wait_for_host_state')
@mock.patch(
'dracclient.client.idrac_card.iDRACCardConfiguration.reset_idrac')
def test_reset_idrac_wait_failed_reset(
self,
mock_reset_idrac,
mock_wait_for_host_state,
mock_wait_until_idrac_is_ready,
mock_sleep):
mock_reset_idrac.return_value = False
mock_wait_for_host_state.side_effect = [True, True]
mock_wait_until_idrac_is_ready.return_value = False
mock_sleep.return_value = None
expected_message = ("Failed to reset iDRAC")
self.assertRaisesRegexp(
exceptions.DRACOperationFailed, re.escape(expected_message),
self.drac_client.reset_idrac, wait=True)
mock_reset_idrac.assert_called_once()
mock_wait_for_host_state.assert_not_called()
mock_wait_until_idrac_is_ready.assert_not_called()
@mock.patch('time.sleep')
@mock.patch('dracclient.client.WSManClient.wait_until_idrac_is_ready')
@mock.patch('dracclient.client.DRACClient._wait_for_host_state')
@mock.patch(
'dracclient.client.idrac_card.iDRACCardConfiguration.reset_idrac')
def test_reset_idrac_fail_wait_not_pingable(
self,
mock_reset_idrac,
mock_wait_for_host_state,
mock_wait_until_idrac_is_ready,
mock_sleep):
mock_reset_idrac.return_value = True
mock_wait_for_host_state.side_effect = [False, True]
mock_wait_until_idrac_is_ready.return_value = True
mock_sleep.return_value = None
expected_message = (
"Timed out waiting for the 1.2.3.4 iDRAC to become not pingable")
self.assertRaisesRegexp(
exceptions.DRACOperationFailed, re.escape(expected_message),
self.drac_client.reset_idrac, wait=True)
mock_reset_idrac.assert_called_once()
mock_wait_for_host_state.assert_called_once()
mock_wait_until_idrac_is_ready.assert_not_called()
@mock.patch('time.sleep')
@mock.patch('dracclient.client.WSManClient.wait_until_idrac_is_ready')
@mock.patch('dracclient.client.DRACClient._wait_for_host_state')
@mock.patch(
'dracclient.client.idrac_card.iDRACCardConfiguration.reset_idrac')
def test_reset_idrac_fail_wait_pingable(
self,
mock_reset_idrac,
mock_wait_for_host_state,
mock_wait_until_idrac_is_ready,
mock_sleep):
mock_reset_idrac.return_value = True
mock_wait_for_host_state.side_effect = [True, False]
mock_wait_until_idrac_is_ready.return_value = True
mock_sleep.return_value = None
expected_message = (
"Timed out waiting for the 1.2.3.4 iDRAC to become pingable")
self.assertRaisesRegexp(
exceptions.DRACOperationFailed, re.escape(expected_message),
self.drac_client.reset_idrac, wait=True)
mock_reset_idrac.assert_called_once()
self.assertEqual(mock_wait_for_host_state.call_count, 2)
mock_wait_until_idrac_is_ready.assert_not_called()
```
#### File: dracclient/tests/test_lifecycle_controller.py
```python
import lxml.etree
import re
from unittest import mock
import requests_mock
import dracclient.client
from dracclient import constants
from dracclient import exceptions
import dracclient.resources.job
from dracclient.resources import lifecycle_controller
from dracclient.resources import uris
from dracclient.tests import base
from dracclient.tests import utils as test_utils
from dracclient import utils
class ClientLifecycleControllerManagementTestCase(base.BaseTest):
def setUp(self):
super(ClientLifecycleControllerManagementTestCase, self).setUp()
self.drac_client = dracclient.client.DRACClient(
**test_utils.FAKE_ENDPOINT)
@requests_mock.Mocker()
def test_get_lifecycle_controller_version(self, mock_requests):
mock_requests.post(
'https://1.2.3.4:443/wsman',
text=test_utils.LifecycleControllerEnumerations[
uris.DCIM_SystemView]['ok'])
version = self.drac_client.get_lifecycle_controller_version()
self.assertEqual((2, 1, 0), version)
@requests_mock.Mocker()
class ClientLCConfigurationTestCase(base.BaseTest):
def setUp(self):
super(ClientLCConfigurationTestCase, self).setUp()
self.drac_client = dracclient.client.DRACClient(
**test_utils.FAKE_ENDPOINT)
@mock.patch.object(dracclient.client.WSManClient,
'wait_until_idrac_is_ready', spec_set=True,
autospec=True)
def test_list_lifecycle_settings_by_instance_id(
self, mock_requests,
mock_wait_until_idrac_is_ready):
expected_enum_attr = lifecycle_controller.LCEnumerableAttribute(
name='Lifecycle Controller State',
instance_id='LifecycleController.Embedded.1#LCAttributes.1#LifecycleControllerState', # noqa
read_only=False,
current_value='Enabled',
pending_value=None,
possible_values=['Disabled', 'Enabled', 'Recovery'])
expected_string_attr = lifecycle_controller.LCStringAttribute(
name='SYSID',
instance_id='LifecycleController.Embedded.1#LCAttributes.1#SystemID', # noqa
read_only=True,
current_value='639',
pending_value=None,
min_length=0,
max_length=3)
mock_requests.post('https://1.2.3.4:443/wsman', [
{'text': test_utils.LifecycleControllerEnumerations[
uris.DCIM_LCEnumeration]['ok']},
{'text': test_utils.LifecycleControllerEnumerations[
uris.DCIM_LCString]['ok']}])
lifecycle_settings = self.drac_client.list_lifecycle_settings(
by_name=False)
self.assertEqual(14, len(lifecycle_settings))
# enumerable attribute
self.assertIn(
'LifecycleController.Embedded.1#LCAttributes.1#LifecycleControllerState', # noqa
lifecycle_settings)
self.assertEqual(expected_enum_attr, lifecycle_settings[
'LifecycleController.Embedded.1#LCAttributes.1#LifecycleControllerState']) # noqa
# string attribute
self.assertIn(
'LifecycleController.Embedded.1#LCAttributes.1#SystemID',
lifecycle_settings)
self.assertEqual(expected_string_attr,
lifecycle_settings['LifecycleController.Embedded.1#LCAttributes.1#SystemID']) # noqa
@mock.patch.object(dracclient.client.WSManClient,
'wait_until_idrac_is_ready', spec_set=True,
autospec=True)
def test_list_lifecycle_settings_by_name(
self, mock_requests,
mock_wait_until_idrac_is_ready):
expected_enum_attr = lifecycle_controller.LCEnumerableAttribute(
name='Lifecycle Controller State',
instance_id='LifecycleController.Embedded.1#LCAttributes.1#LifecycleControllerState', # noqa
read_only=False,
current_value='Enabled',
pending_value=None,
possible_values=['Disabled', 'Enabled', 'Recovery'])
expected_string_attr = lifecycle_controller.LCStringAttribute(
name='SYSID',
instance_id='LifecycleController.Embedded.1#LCAttributes.1#SystemID', # noqa
read_only=True,
current_value='639',
pending_value=None,
min_length=0,
max_length=3)
mock_requests.post('https://1.2.3.4:443/wsman', [
{'text': test_utils.LifecycleControllerEnumerations[
uris.DCIM_LCEnumeration]['ok']},
{'text': test_utils.LifecycleControllerEnumerations[
uris.DCIM_LCString]['ok']}])
lifecycle_settings = self.drac_client.list_lifecycle_settings(
by_name=True)
self.assertEqual(14, len(lifecycle_settings))
# enumerable attribute
self.assertIn(
'Lifecycle Controller State',
lifecycle_settings)
self.assertEqual(expected_enum_attr, lifecycle_settings[
'Lifecycle Controller State'])
# string attribute
self.assertIn(
'SYSID',
lifecycle_settings)
self.assertEqual(expected_string_attr,
lifecycle_settings['SYSID'])
@mock.patch.object(dracclient.client.WSManClient, 'invoke',
spec_set=True, autospec=True)
def test_is_lifecycle_in_recovery(self, mock_requests,
mock_invoke):
expected_selectors = {'CreationClassName': 'DCIM_LCService',
'SystemName': 'DCIM:ComputerSystem',
'Name': 'DCIM:LCService',
'SystemCreationClassName': 'DCIM_ComputerSystem'}
mock_invoke.return_value = lxml.etree.fromstring(
test_utils.LifecycleControllerInvocations[uris.DCIM_LCService][
'GetRemoteServicesAPIStatus']['is_recovery'])
result = self.drac_client.is_lifecycle_in_recovery()
mock_invoke.assert_called_once_with(
mock.ANY, uris.DCIM_LCService, 'GetRemoteServicesAPIStatus',
expected_selectors, {},
expected_return_value=utils.RET_SUCCESS,
wait_for_idrac=False)
self.assertEqual(True, result)
@mock.patch.object(dracclient.client.WSManClient,
'invoke', spec_set=True,
autospec=True)
def test_set_lifecycle_settings(self, mock_requests,
mock_invoke):
mock_requests.post('https://1.2.3.4:443/wsman', [
{'text': test_utils.LifecycleControllerEnumerations[
uris.DCIM_LCEnumeration]['ok']},
{'text': test_utils.LifecycleControllerEnumerations[
uris.DCIM_LCString]['ok']}])
mock_invoke.return_value = lxml.etree.fromstring(
test_utils.LifecycleControllerInvocations[uris.DCIM_LCService][
'SetAttributes']['ok'])
result = self.drac_client.set_lifecycle_settings(
{'Collect System Inventory on Restart': 'Disabled'})
self.assertEqual({'is_commit_required': True,
'is_reboot_required': constants.RebootRequired.false
},
result)
@mock.patch.object(dracclient.client.WSManClient,
'wait_until_idrac_is_ready', spec_set=True,
autospec=True)
def test_set_lifecycle_settings_with_unknown_attr(
self, mock_requests, mock_wait_until_idrac_is_ready):
mock_requests.post('https://1.2.3.4:443/wsman', [
{'text': test_utils.LifecycleControllerEnumerations[
uris.DCIM_LCEnumeration]['ok']},
{'text': test_utils.LifecycleControllerEnumerations[
uris.DCIM_LCString]['ok']},
{'text': test_utils.LifecycleControllerInvocations[
uris.DCIM_LCService]['SetAttributes']['error']}])
self.assertRaises(exceptions.InvalidParameterValue,
self.drac_client.set_lifecycle_settings,
{'foo': 'bar'})
@mock.patch.object(dracclient.client.WSManClient,
'wait_until_idrac_is_ready', spec_set=True,
autospec=True)
def test_set_lifecycle_settings_with_unchanged_attr(
self, mock_requests, mock_wait_until_idrac_is_ready):
mock_requests.post('https://1.2.3.4:443/wsman', [
{'text': test_utils.LifecycleControllerEnumerations[
uris.DCIM_LCEnumeration]['ok']},
{'text': test_utils.LifecycleControllerEnumerations[
uris.DCIM_LCString]['ok']}])
result = self.drac_client.set_lifecycle_settings(
{'Lifecycle Controller State': 'Enabled'})
self.assertEqual({'is_commit_required': False,
'is_reboot_required':
constants.RebootRequired.false},
result)
@mock.patch.object(dracclient.client.WSManClient,
'wait_until_idrac_is_ready', spec_set=True,
autospec=True)
def test_set_lifecycle_settings_with_readonly_attr(
self, mock_requests, mock_wait_until_idrac_is_ready):
expected_message = ("Cannot set read-only Lifecycle attributes: "
"['Licensed'].")
mock_requests.post('https://1.2.3.4:443/wsman', [
{'text': test_utils.LifecycleControllerEnumerations[
uris.DCIM_LCEnumeration]['ok']},
{'text': test_utils.LifecycleControllerEnumerations[
uris.DCIM_LCString]['ok']}])
self.assertRaisesRegexp(
exceptions.DRACOperationFailed, re.escape(expected_message),
self.drac_client.set_lifecycle_settings, {'Licensed': 'yes'})
@mock.patch.object(dracclient.client.WSManClient,
'wait_until_idrac_is_ready', spec_set=True,
autospec=True)
def test_set_lifecycle_settings_with_incorrect_enum_value(
self, mock_requests, mock_wait_until_idrac_is_ready):
expected_message = ("Attribute 'Lifecycle Controller State' cannot "
"be set to value 'foo'. It must be in "
"['Disabled', 'Enabled', 'Recovery'].")
mock_requests.post('https://1.2.3.4:443/wsman', [
{'text': test_utils.LifecycleControllerEnumerations[
uris.DCIM_LCEnumeration]['ok']},
{'text': test_utils.LifecycleControllerEnumerations[
uris.DCIM_LCString]['ok']}])
self.assertRaisesRegexp(
exceptions.DRACOperationFailed, re.escape(expected_message),
self.drac_client.set_lifecycle_settings,
{'Lifecycle Controller State': 'foo'})
class ClientLCChangesTestCase(base.BaseTest):
def setUp(self):
super(ClientLCChangesTestCase, self).setUp()
self.drac_client = dracclient.client.DRACClient(
**test_utils.FAKE_ENDPOINT)
@mock.patch.object(dracclient.resources.job.JobManagement,
'create_config_job', spec_set=True, autospec=True)
def test_commit_pending_lifecycle_changes(self, mock_create_config_job):
self.drac_client.commit_pending_lifecycle_changes()
mock_create_config_job.assert_called_once_with(
mock.ANY, resource_uri=uris.DCIM_LCService,
cim_creation_class_name='DCIM_LCService',
cim_name='DCIM:LCService', target='',
reboot=False, start_time='TIME_NOW',
wait_for_idrac=False,
method_name='CreateConfigJob')
@mock.patch.object(dracclient.resources.job.JobManagement,
'create_config_job', spec_set=True, autospec=True)
def test_commit_pending_lifecycle_changes_with_time(
self, mock_create_config_job):
timestamp = '20140924140201'
self.drac_client.commit_pending_lifecycle_changes(
start_time=timestamp)
mock_create_config_job.assert_called_once_with(
mock.ANY, resource_uri=uris.DCIM_LCService,
cim_creation_class_name='DCIM_LCService',
cim_name='DCIM:LCService', target='',
reboot=False, start_time=timestamp,
wait_for_idrac=False,
method_name='CreateConfigJob')
``` |
{
"source": "JohnGarbutt/spark-vagrant",
"score": 3
} |
#### File: spark-vagrant/provisioning/create-inventory-from-server.py
```python
import argparse
import openstack
def get_connection():
#openstack.enable_logging(debug=True)
conn = openstack.connect()
return conn
def get_server_floatingips(conn, server_uuid):
floating = []
all_addresses = conn.compute.get_server(server_uuid).addresses
for network, addresses in all_addresses.items():
for address in addresses:
if address['OS-EXT-IPS:type'] == 'floating':
floating.append(address)
return floating
def main():
parser = argparse.ArgumentParser()
parser.add_argument('server_uuid', type=str,
help='Nova server uuid')
args = parser.parse_args()
server_uuid = args.server_uuid
conn = get_connection()
floatingips = get_server_floatingips(conn, server_uuid)
if len(floatingips) == 1:
server_address = str(floatingips[0]['addr'])
else:
raise Exception("Not found just one floating ip, panic!")
print "[all]"
print "%s ansible_user=centos" % server_address
if __name__ == '__main__':
main()
``` |
{
"source": "johngarg/neutrinomass",
"score": 2
} |
#### File: neutrinomass/analysis/utils.py
```python
import sympy as sym
import numpy as np
import functools as ft
# CONSTANTS
pert_bound = np.sqrt(4.0 * np.pi)
GF = 1.16638e-5 # Inverse GeV squared
ALPHA_EM = 1.0 / 127.0 # EM alpha at M_z
mb = 4.18 # Mass of the b-quark in GeV
## Measured neutrino oscillation params from NuFit (http://www.nu-fit.org/?q=node/166)
## arXiv:1611.01514, NuFIT 3.2 (2018), www.nu-fit.org
THETA12 = np.deg2rad(33.82)
THETA23 = np.deg2rad(49.6)
THETA13 = np.deg2rad(8.61)
DELTA_CP = np.deg2rad(215.0)
# classes
class UnitaryMatrix(object):
"""
A unitary matrix, by default the PMNS matrix.
"""
def __init__(
self,
c12=np.cos(THETA12),
c13=np.cos(THETA13),
c23=np.cos(THETA23),
s12=np.sin(THETA12),
s13=np.sin(THETA13),
s23=np.sin(THETA23),
δ_CP=DELTA_CP,
α_1=0.0,
α_2=0.0,
symb=False,
):
"""
Creates a unitary matrix in the parametrisation of eq. 1.1 in 1611.01514.
Conventions for Majorana phases from from eq. 8 of 1710.00715.
"""
self.symb = symb
if not symb:
# numpy
dtype = np.complex128
matrix_1 = np.matrix(
[[1.0, 0.0, 0.0], [0.0, c23, s23], [0.0, -s23, c23]], dtype=dtype
)
matrix_2 = np.matrix(
[
[c13, 0.0, s13 * np.exp(-1j * δ_CP)],
[0.0, 1.0, 0.0],
[-s13 * np.exp(1j * δ_CP), 0.0, c13],
],
dtype=dtype,
)
matrix_3 = np.matrix(
[[c12, s12, 0.0], [-s12, c12, 0.0], [0.0, 0.0, 1.0]], dtype=dtype
)
# P matrix contains the Majorana phases
matrix_p = np.matrix(
[
[np.exp(1j * α_1), 0.0, 0.0],
[0.0, np.exp(1j * α_2), 0.0],
[0.0, 0.0, 1.0],
],
dtype=dtype,
)
if symb:
# sympy matrices
matrix_1 = sym.Matrix([[1.0, 0.0, 0.0], [0.0, c23, s23], [0.0, -s23, c23]])
matrix_2 = sym.Matrix(
[
[c13, 0.0, s13 * sym.exp(-1j * δ_CP)],
[0.0, 1.0, 0.0],
[-s13 * sym.exp(1j * δ_CP), 0.0, c13],
]
)
matrix_3 = sym.Matrix([[c12, s12, 0.0], [-s12, c12, 0.0], [0.0, 0.0, 1.0]])
# P matrix contains the Majorana phases
matrix_p = sym.Matrix(
[
[sym.exp(1j * α_1), 0.0, 0.0],
[0.0, sym.exp(1j * α_2), 0.0],
[0.0, 0.0, 1.0],
]
)
if not symb:
self.val = ft.reduce(np.dot, [matrix_1, matrix_2, matrix_3, matrix_p])
else:
self.val = ft.reduce(
lambda x, y: x * y, [matrix_1, matrix_2, matrix_3, matrix_p]
)
def cols(self):
return self.val.T
def get_dagger(self):
return self.val.getH()
# Measured squared neutrino mass differences
# atm is \sqrt{Δm_{21}^2} and sol is \sqrt{Δm_{3l}^2} (in GeV)
measured_mv_diffs = {"sol": np.sqrt(7.40e-5) * 1e-9, "atm": np.sqrt(2.494e-3) * 1e-9}
# NO: v1 v2 v3
# <- Δm_sol -> <------ Δm_atm ------>
# IO: v3 v1 v2
# <------ Δm_atm ------>
# <- Δm_sol ->
NEUTRINO_MASSES = [0.0, measured_mv_diffs["sol"], measured_mv_diffs["atm"]]
PMNS = UnitaryMatrix()
# Measured CKM matrix parameters
THETA12_CKM = np.deg2rad(13.04)
THETA13_CKM = np.deg2rad(0.201)
THETA23_CKM = np.deg2rad(2.38)
DELTA_CP_CKM = np.deg2rad(1.20)
CKM = UnitaryMatrix(
c12=np.cos(THETA12_CKM),
c13=np.cos(THETA13_CKM),
c23=np.cos(THETA23_CKM),
s12=np.sin(THETA12_CKM),
s13=np.sin(THETA13_CKM),
s23=np.sin(THETA23_CKM),
δ_CP=DELTA_CP_CKM,
α_1=0.0,
α_2=0.0,
symb=False,
).val
VEV = 246.0 / np.sqrt(2)
```
#### File: neutrinomass/completions/completions.py
```python
from neutrinomass.tensormethod.core import (
Index,
Field,
IndexedField,
eps,
delta,
is_invariant_symbol,
Operator,
get_dynkin,
D,
)
from neutrinomass.tensormethod.contract import (
lorentz_singlets,
colour_singlets,
invariants,
contract_su2,
)
from neutrinomass.utils import timeit
from neutrinomass.tensormethod.utils import safe_nocoeff
from neutrinomass.completions.utils import (
flatten,
chunks,
factors,
multiple_replace,
allowed_lor_dyn,
)
from neutrinomass.utils.functions import remove_equivalent, remove_equivalent_nopop
from neutrinomass.completions.core import (
Completion,
Model,
FailedCompletion,
EffectiveOperator,
cons_completion_field,
FieldType,
VectorLikeDiracFermion,
MajoranaFermion,
ComplexScalar,
RealScalar,
)
from neutrinomass.completions.topologies import get_topology_data, Leaf
from neutrinomass.utils import pmatch
from neutrinomass.utils.functions import stringify_qns, conjugate_term
from typing import Tuple, List, Dict, Union
import networkx as nx
import networkx.algorithms.isomorphism as iso
from copy import copy, deepcopy
from alive_progress import alive_bar
from collections import Counter, defaultdict
from itertools import permutations, groupby, combinations
from sympy.tensor.tensor import Tensor
from sympy import prime
from functools import lru_cache, reduce
import re
import os
def replace(data, to_replace, replace_with, found=False) -> Tuple[tuple, bool]:
"""Replace first occurance of ``to_replace`` with ``replace_with`` in
``data``.
Example:
>>> replace((("F", 18), ("S", 45), ...), "F", L('u1 i1'))
((L(u1, i1), 18), ("S", 45), ...), True
"""
if found:
return data, found
if isinstance(data, tuple):
new_data = []
for datai in data:
new_datai, found = replace(datai, to_replace, replace_with, found)
new_data.append(new_datai)
f = lambda x: Leaf(*x) if isinstance(data, Leaf) else tuple(x)
return f(new_data), found
if data == to_replace:
return replace_with, True
return data, found
def replace_fields(fields: List[IndexedField], partition):
"""Takes the fields and puts them in place of the strings in the partition
template.
>>> replace_fields([H('i0_'), H('i1_'), L('u0_ i2_'), L('u1_ i3_')], (('F', 18), ('S', 162), (('F', 6), ('S', 54))))
((L(u0_, i2_), 18), (H(i0_), 162), ((L(u1_, i3_), 6), (H(i1_), 54)))
"""
for field in fields:
char = "S" if field.is_boson else "F"
partition, _ = replace(data=partition, to_replace=char, replace_with=field)
return partition
def quick_remove_equivalent_partitions(partitions):
"""Just remove double ups. (For now.)
This is also a good place to remove partitions that you know will be
filtered out.
"""
return list(set(partitions))
def distribute_fields(fields, partition):
"""Takes the fields and puts them in place of the strings in the partition
template in every possible way.
>>> distribute_fields([H('i0_'), H('i1_'), L('u0_ i2_'), L('u1_ i3_')], (('F', 18), ('S', 162), (('F', 6), ('S', 54))))
[((L(u0_, i2_), 18), (H(i0_), 162), ...), ((L(u1_, i3_), 18), (H(i0_), 162), ...), ...]
Returns lots of double ups.
"""
perms = permutations(fields)
parts = [replace_fields(fields, partition) for fields in perms]
return quick_remove_equivalent_partitions(parts)
def node_dictionary(
partition: tuple, field_dict: Dict[IndexedField, int]
) -> Dict[int, str]:
"""Returns a dictionary mapping node to indexed field label.
Example:
>>> node_dictionary((((Q(u364_, c210_, i369_), 6), (L(u362_, i367_), 18)),
((L(u361_, i366_), 54), (Q(u363_, c209_, i368_), 162)),
((db(u368_, -c214_), 486), (db(u366_, -c212_), 1458))))
{6: 'Q', 18: 'L', ...}
"""
flat_data = list(flatten(partition))
tuples = chunks(flat_data, 2)
reversed_data = list(map(reversed, tuples))
return {k: {"particle": v.label + str(field_dict[v])} for k, v in reversed_data}
def set_external_fields(
partition: tuple, graph: nx.Graph, field_dict: Dict[IndexedField, int]
) -> nx.Graph:
"""Add indexed fields as edge attributes on graph through side effect."""
g = deepcopy(graph)
node_attrs = node_dictionary(partition, field_dict)
edge_attrs = {}
for edge in graph.edges:
for n, field_dict in node_attrs.items():
if n in edge:
edge_attrs[edge] = field_dict
nx.set_edge_attributes(g, edge_attrs)
return g
def indexed_fields_with_counters(op: Operator) -> Dict[IndexedField, int]:
"""Return a dictionary mapping indexed fields to an integer labelling distinct
fields to help with isomorphism filtering.
TODO Need to rewrite this to include colour indices! Need to then move
position of call to include operator with colour structure!
"""
# idxs are the pairs of contracted isospin indices
counts = defaultdict(list)
idxs = []
for f in op.tensors:
if isinstance(f, IndexedField):
counts[f.label].append(f)
else:
idxs.append(f.indices)
labelled_counts = {k: [[f, i] for i, f in enumerate(v)] for k, v in counts.items()}
for k, v in labelled_counts.items():
for (f1, i1), (f2, i2) in combinations(v, 2):
if not f1.indices_by_type["Isospin"]:
# fields are interchangeable, replace
f2_idx = labelled_counts[k].index([f2, i2])
labelled_counts[k][f2_idx] = [f2, i1]
continue
iso1 = f1.indices_by_type["Isospin"][0]
iso2 = f2.indices_by_type["Isospin"][0]
if [-iso1, -iso2] in idxs or [-iso2, -iso1] in idxs:
# combination of indices match an epsilon-index pair. In this
# case, need to replace i2 with i1
f2_idx = labelled_counts[k].index([f2, i2])
labelled_counts[k][f2_idx] = [f2, i1]
else:
continue
flat = reduce(lambda x, y: x + y, labelled_counts.values())
return dict(flat)
def partitions(operator: EffectiveOperator, verbose=False) -> List[dict]:
"""Returns a list of operator partitions, epsilons and graphs of the form:
{"fields": ((L(u0, I_0), 18), ...)
"epsilons": (...),
"graph": ...}
from the partitions of the fields in the operator. This is all of the
information required to find the completion.
"""
topology_data_list = get_topology_data(**operator.topology_type)
colour_ops = colour_singlets([operator.operator], overcomplete=True)
colour_ops = [EffectiveOperator(operator.name, op) for op in colour_ops]
if verbose:
print(
f"Finding partitions of {operator.name}. "
+ f"There are {len(colour_ops)} colour structures and "
+ f"{len(topology_data_list)} relevant topologies."
)
out = []
counter = 1
for topology_data in topology_data_list:
if verbose:
print(f"Furnishing topology {counter}...")
counter += 1
# return counters as well for isomorphism filtering
fields_and_counters = indexed_fields_with_counters(operator.operator)
fields = [f for f, i in fields_and_counters.items()]
perms = distribute_fields(fields, topology_data["partition"])
for op in colour_ops:
# col_out = []
epsilons = op.operator.epsilons
for perm in perms:
g = topology_data["graph"]
g = set_external_fields(perm, g, fields_and_counters)
partition_file = topology_data["partition_file"]
topology_classification = os.path.splitext(
os.path.basename(partition_file)
)[0]
data = {
"operator": op,
"partition": perm,
"epsilons": epsilons,
"graph": g,
"topology": topology_classification,
}
out.append(data)
# if remove_isomorphic_diagrams:
# col_out = remove_isomorphic(col_out)
# out += col_out
return out
def are_equivalent_partitions(a, b):
"""Checks for partition equivalence by checking if the graphs are isomorphic."""
ga = a["graph"]
gb = b["graph"]
if not iso.faster_could_be_isomorphic(ga, gb):
return False
em = iso.categorical_edge_match("particle", "exotic")
return nx.is_isomorphic(ga, gb, edge_match=em)
def graph_fingerprint(part):
g = part["graph"]
degree = dict(g.degree())
return sorted(degree.values())
def remove_isomorphic(partitions: List[dict]) -> List[dict]:
"""Same algorithm as removeIsomorphic in ``wolfram/`` directory. Remove
isomorphic graphs (by side effect) to reduce double-ups of completions.
"""
return remove_equivalent_nopop(partitions, are_equivalent_partitions)
# The approach to finding the completions is the following: contract off fields
# and find corresponding exotic and term. Replace the fields by the exotic and
# keep track of the available epsilons and the terms by mutation. The pipeline is
#
# contract: returns exotic field, new gauge epsilons (fewer) and new lorentz
# epsilons (more)
#
# replace_and_mutate: returns a Leaf structure that enters the partition in
# place of the contracted fields, mutates terms, edge_dict of graph,
# gauge_epsilons and lorentz_epsilons
#
# reduce_partition: applies replace_and_mutate to a partition until last vertex.
def all_scalars(fields: List[Field]) -> bool:
"""Checks if all fields are scalars."""
boolean = True
for f in fields:
boolean = boolean and f.is_boson
return boolean
def all_fermions(fields: List[Field]) -> bool:
"""Checks if all fields are fermions."""
boolean = True
for f in fields:
boolean = boolean and f.is_fermion
return boolean
def drop_scalar(fields: List[Field]) -> List[Field]:
"""Given a list of fields with one scalar, return a list of only the
fermions, i.e. remove the scalar.
"""
scalars, fermions = [], []
for f in fields:
if f.is_boson:
scalars.append(f)
elif f.is_fermion:
fermions.append(f)
assert len(scalars) == 1
return fermions
def get_lorentz_epsilons(fields: Tuple[IndexedField]) -> Tuple[bool, List[Tensor]]:
"""Takes a list of two or three fields (possibly with derivatives) and returns
the lorentz epsilons that contract the fields to as low a Lorentz irrep as
possible as well as a boolean indicating whether the contraction is allowed.
"""
deriv_structure = [f.derivs for f in fields]
n_derivs = sum(deriv_structure)
if n_derivs > 2:
raise Exception(
f"Not currently supporting {n_derivs} derivatives in an operator."
)
if not n_derivs and len(fields) == 4:
return True, []
if not n_derivs and len(fields) == 3:
if all_scalars(fields):
return True, []
elif all_fermions(fields):
return False, []
return get_lorentz_epsilons(drop_scalar(fields))
if n_derivs == 2 and len(fields) == 3:
fields = sorted(fields, key=lambda f: -f.derivs)
prod = reduce(lambda x, y: x * y, fields)
undotted, dotted, _, _, _, = prod.indices_by_type.values()
# Reject vector contraction
if len(undotted) == 1 and len(dotted) == 1:
return False, []
epsilons = []
for indices in [undotted, dotted]:
# skip single indices (fermion, scalar) contraction
if len(indices) == 1:
continue
# pair up all even indices; if odd, leave last index
if len(indices) % 2 != 0:
indices.pop(-1)
for i, j in chunks(indices, 2):
epsilons.append(eps(f"-{i} -{j}"))
return True, epsilons
def is_contracted_epsilon(eps: Tensor, indices: List[Index]) -> bool:
"""Return True if two indices on epsilon are contracted, False otherwise."""
i, j, *k = eps.indices
# deal with su2 epsilon first
if not k:
if -i in indices and -j in indices:
return True
return False
# su3 epsilon has three indices
to_remove, free = [], []
for idx in eps.indices:
if -idx in indices:
to_remove.append(-idx)
else:
free.append(idx)
# is contracted epsilon
if len(to_remove) == 2:
assert len(free) == 1
indices.append(free[0])
return True
if len(to_remove) == 3:
return True
return False
def separate_gauge_epsilons(
fields: List[IndexedField], epsilons: List[Tensor]
) -> Tuple[List[Tensor], List[Tensor]]:
"""Return a 2-tuple with the spectator epsilon tensors carrying gauge indices
and those that are contracted with the fields passed in.
"""
prod_fields = reduce(lambda x, y: x * y, fields)
free_indices = prod_fields.get_free_indices()
contracted_epsilons, spectator_epsilons = [], []
# contracted epsilons are those all of whose indices are contracted on the
# set of fields passed in. Spectator epsilons are all of the others
for epsilon in epsilons:
if is_contracted_epsilon(epsilon, free_indices):
contracted_epsilons.append(epsilon)
else:
spectator_epsilons.append(epsilon)
return spectator_epsilons, contracted_epsilons
def check_charges(operator: Operator, ignore=[]) -> None:
"""Make sure sum of charges vanishes, i.e. term is a U(1) singlet."""
fields = [f for f in operator.tensors if isinstance(f, IndexedField)]
for q in fields[0].charges:
if q in ignore:
continue
assert not sum(f.charges[q] for f in fields)
def check_singlet(operator: Operator, ignore=["3b"]) -> None:
"""Make sure operator is a SM and Lorentz singlet."""
check_charges(operator, ignore=ignore)
for free in operator.free_indices:
assert free.index_type == "Generation"
def exotic_field_and_term(
op: Operator, symbols: Dict[str, List[str]], field_dict: Dict[tuple, str]
) -> Tuple[IndexedField, IndexedField, Union[Operator, str]]:
"""Returns exotic field, partner (that couples in Lagrangian) and Lagrangian
term. Mutates field_dict with exotic field's symbol.
The last item returned may be a string explaining why the contraction
failed.
"""
fields = [f for f in op.tensors if isinstance(f, IndexedField)]
exotic_charges = {}
pairs = list(map(lambda x: x.charges.items(), fields))
# ensure no fields have missing or extra charges
fst, *rst = pairs
for pair in rst:
assert len(pair) == len(fst)
for n_pairs in zip(*pairs):
# fish out key
(k, _), *rst = n_pairs
# sum over values
exotic_charges[k] = sum(map(lambda x: x[1], n_pairs))
indices_by_type = op.indices_by_type.values()
exotic_undotted, exotic_dotted, exotic_colour, exotic_isospin, _ = map(
sorted, indices_by_type
)
exotic_indices = " ".join(
str(i)
for i in [*exotic_undotted, *exotic_dotted, *exotic_colour, *exotic_isospin]
)
# establish fermion or boson for symbols
lorentz_dynkin = get_dynkin(exotic_indices)[:2]
if lorentz_dynkin in ("10", "01"):
symbols_to_use = symbols["fermion"]
else:
symbols_to_use = symbols["boson"]
fs = sorted([f.field for f in fields], key=lambda x: x.label_with_dagger)
fs = tuple(fs)
if fs in field_dict.keys():
symbol = field_dict[fs]
else:
symbol = symbols_to_use.pop(0)
# field_dict mutated here!
field_dict[fs] = symbol
# for Dirac and Majorana fermions, always keep plain symbol left handed
to_conj = False
if exotic_indices and exotic_indices[0] == "d":
exotic_indices = Index.conj_index_string(exotic_indices)
to_conj = True
exotic_indexed_field = IndexedField(
label=symbol, indices=exotic_indices, charges=exotic_charges
)
# construct MajoranaFermion, VectorLikeDiracFermion, ...
# `partner` is in the Lagrangian, `exotic_field` transforms like contracted pair
exotic_field = cons_completion_field(exotic_indexed_field)
exotic_field = exotic_field.conj_indices if to_conj else exotic_field
partner = exotic_field
if isinstance(exotic_field, ComplexScalar):
partner = exotic_field.conj
elif isinstance(exotic_field, RealScalar):
partner = exotic_field.swap_colour_indices()
elif isinstance(exotic_field, VectorLikeDiracFermion):
partner = exotic_field.dirac_partner()
elif isinstance(exotic_field, MajoranaFermion):
partner = exotic_field.majorana_partner()
# Need additional su2 epsilons to fix su2 indices (since not working with
# lowered indices at all). Won't need to do this if removing a derivative in
# the process
partner, fix_su2_epsilons = partner.lower_su2()
term = reduce(lambda x, y: x * y, fix_su2_epsilons, op * partner)
# construct term and check to see if vanishes. This is a very costly step,
# check first whether there are any doubled up fields in the term and only
# run on those
set_fields = set([f.label for f in term.fields])
if len(set_fields) < len(term.fields) and term.safe_simplify() == 0:
return exotic_field, partner, f"Vanishing coupling at {term}"
# need to construct term again because sympy is annoying
term = reduce(lambda x, y: x * y, fix_su2_epsilons, op * partner)
check_singlet(term)
return exotic_field, partner, term
def process_derivative_term(op: Operator) -> Union[Operator, str]:
"""Process term containing derivatives, return corresponding term that would
appear in the Lagrangian.
"""
deriv_structure = [f.derivs for f in op.fields]
n_derivs = sum(deriv_structure)
if n_derivs == 0:
return op
# Remove derivatives and lorentz epsilons and call contract_su2 on Lorentz
# structure.
#
# There are a number of cases here
# 1. (DH)(DH)SS
# 2. (DH)(DH)S
# 3. (DH)ψF -> in all but this case, affected fields are clear
# 4. S(Dψ)F
# 5. (Dψ)(Dψ)S
# 6. S(Dψ)ψ
scalars, fermions, exotic_fermions, epsilons = [], [], [], []
for t in op.tensors:
if isinstance(t, IndexedField) and t.derivs > 0 and t.is_boson:
no_deriv_field = t.strip_derivs_with_indices()
scalars.append(no_deriv_field)
if isinstance(t, IndexedField) and t.derivs > 0 and t.is_fermion:
no_deriv_field = t.strip_derivs_with_indices()
fermions.append(no_deriv_field)
elif isinstance(t, VectorLikeDiracFermion):
fixed_field = t.dirac_partner().conj
exotic_fermions.append(fixed_field)
elif isinstance(t, MajoranaFermion):
fixed_field = t.conj
exotic_fermions.append(fixed_field)
elif isinstance(t, IndexedField) and t.is_fermion:
fermions.append(t)
elif isinstance(t, IndexedField) and t.is_scalar:
scalars.append(t)
elif not isinstance(t, IndexedField):
# is epsilon, keep gauge ones, not lorentz
if t.indices[0].index_type in ("Undotted", "Dotted"):
continue
else:
epsilons.append(t)
# cases 1 and 2
if len(scalars) > 2:
term = reduce(lambda x, y: x * y, scalars + epsilons)
if term.safe_simplify() == 0:
return "Vanishing structure"
return term
# case 6
if len(fermions) == 2 and n_derivs == 1:
return "Not allowed contraction"
# case 5
if len(fermions) == 2 and n_derivs == 2:
left, right = fermions
# cases 3 and 4
if len(exotic_fermions) == 1:
assert len(fermions) == 1
left, right = exotic_fermions[0], fermions[0]
if len(exotic_fermions) == 2:
left, right = exotic_fermions
# include scalars and epsilons in su2 contraction
right = reduce(lambda x, y: x * y, scalars + epsilons, right)
lu, ld, _, _, _ = left.indices_by_type.values()
ru, rd, _, _, _ = right.indices_by_type.values()
# if the indices are equal after taking the conj, then there will be an
# error. In this case, you can just lower one of them
if lu == ru and ld == rd:
partner, fix_su2_epsilons = left.lower_su2(skip=["Isospin"])
assert len(fix_su2_epsilons) == 1
return right * partner * fix_su2_epsilons[0]
if lu:
if not (len(lu) == 1 and len(ru) == 1):
return "Not allowed contraction"
index_str = " ".join(str(-i) for i in lu + ru)
else:
if not (len(ld) == 1 and len(rd) == 1):
return "Not allowed contraction"
index_str = " ".join(str(-i) for i in ld + rd)
return right * left * eps(index_str)
def contract(
fields: Tuple[IndexedField],
symbols: Dict[str, List[str]],
gauge_epsilons: list,
field_dict: Dict[tuple, str],
) -> Union[Tuple[FieldType, Operator, List[Tensor], List[Tensor]], str]:
"""Takes two or three indexed fields and the epsilons [epsilons and deltas of
SU(2) and SU(3) from the operator] and returns a new indexed field
transforming in the same way as $x \otimes y$.
Gauge epsilons (and deltas) are going to be potentially used up in this
process, while epsilons carrying Lorentz indices will be introduced
enforcing the contractions between dotted and undotted indices in the
generated operator.
Returns a tuple with the field transforming like the product of `fields`,
the term, and the new gauge and lorentz epsilons.
If the contraction fails, returns a string with the reason it failed.
Example:
>>> field, term, gauge_epsilons, lorentz_epsilons = contract((H('i0'), H('i1')), [], {"fermion": [], "boson": ["S"]}, {})
>>> field
S(i0, i1)
>>> field.y
1
"""
if len(fields) != 2 and len(fields) != 3:
raise Exception("Too many fields passed to contract.")
allowed_contraction, lorentz_epsilons = get_lorentz_epsilons(fields)
if not allowed_contraction:
# Bad lorentz contraction
return "Bad Lorentz contraction."
# some gauge epsilons will be removed in the contraction, the others will
# just watch
spectator_gauge_eps, eps_to_remove = separate_gauge_epsilons(fields, gauge_epsilons)
# contracted_fields is the fields (with the derivatives still present) with
# lorentz indices contracted
contracted_fields = reduce(
lambda x, y: x * y, (*fields, *lorentz_epsilons, *eps_to_remove)
)
exotic, partner, maybe_term = exotic_field_and_term(
contracted_fields, symbols, field_dict
)
if isinstance(maybe_term, str):
# return the reason
return maybe_term
check_singlet(maybe_term)
# Check to see if there are any derivatives present, if there are process the term
deriv_structure = [f.derivs for f in maybe_term.fields]
n_derivs = sum(deriv_structure)
if n_derivs == 0:
no_deriv_maybe_term = maybe_term
if isinstance(no_deriv_maybe_term, str):
return no_deriv_maybe_term
else:
no_deriv_maybe_term = process_derivative_term(maybe_term)
if isinstance(no_deriv_maybe_term, str):
return no_deriv_maybe_term
if no_deriv_maybe_term.safe_simplify() == 0:
return f"Vanishing coupling at {maybe_term} after derivative processing."
check_singlet(no_deriv_maybe_term)
return exotic, no_deriv_maybe_term, spectator_gauge_eps, lorentz_epsilons
def get_connecting_edge(graph: nx.Graph, nodes: List[int]) -> Tuple[int, int]:
"""Returns an edge that connects to nodes in ``nodes``.
Taking ``graph`` to be:
4
|
|
3
/ \
/ \
1 2
Example:
>>> get_connecting_edge(graph, (1, 2))
(3, 4)
"""
neighbours = {}
for node in nodes:
neighbours[node] = set(graph.neighbors(node))
fst, *rst = list(neighbours.values())
intersection = fst.intersection(*rst)
assert len(intersection) == 1
connecting_node = list(intersection)[0]
other_nodes = list(graph.neighbors(connecting_node))
for node in nodes:
other_nodes.remove(node)
assert len(other_nodes) == 1
return (connecting_node, other_nodes[0])
def replace_and_mutate(
leaves: Tuple[Tuple[IndexedField, int]],
symbols: List[str],
gauge_epsilons: list,
lorentz_epsilons: list,
terms: list,
edge_dict: Dict[FieldType, Tuple[int, int]],
field_dict: Dict[tuple, str],
graph: nx.Graph,
) -> Leaf:
"""Returns a Leaf structure that enters the partition in place of the contracted
fields. Mutates major state of completion: terms, edge_dict of graph,
gauge_epsilons and lorentz_epsilons. Mutation of the field_dict happens in
`exotic_field_and_term` through `contract`.
For a failed completion, keep reason in first element of leaf-tuple.
"""
fields, nodes = [], []
for leaf in leaves:
if leaf[1] == None:
return leaf
field, node = leaf
fields.append(field)
nodes.append(node)
# if only one field, SM field at last vertex
if len(fields) == 1:
return Leaf(field, node)
# field_dict is updated in this call
maybe_contract = contract(fields, symbols, gauge_epsilons, field_dict)
# For a failed completion, keep reason in first element of leaf-tuple.
if isinstance(maybe_contract, str):
return Leaf(maybe_contract, None)
# mutate gauge_epsilons immediately
exotic_field, term, gauge_epsilons, new_lorentz_epsilons = maybe_contract
# mutate lorentz_epsilons, terms
lorentz_epsilons += new_lorentz_epsilons
check_singlet(term)
terms.append(term)
# update edge_dict
exotic_edge = get_connecting_edge(graph, nodes)
edge_dict[exotic_field] = exotic_edge
return Leaf(exotic_field, exotic_edge[0])
def contains_only_leaves(xs: tuple) -> bool:
if not isinstance(xs, tuple):
return False
for x in xs:
if not isinstance(x, Leaf):
return False
return True
def reduced_row(row, func):
"""Helper function to apply recursive call until you reach leaves."""
if isinstance(row, Leaf):
return row
# row is a tuple
if contains_only_leaves(row):
return func(row)
return func(tuple(map(lambda a: reduced_row(a, func), row)))
def construct_completion(partition, gauge_epsilons, graph) -> Union[str, tuple]:
"""Returns arguments needed to pass into Completion object contructor, or a
string with the reason the completion failed.
"""
lorentz_epsilons, terms, edge_dict, field_dict = [], [], {}, {}
more_fermion_symbols = ["f" + str(i) for i in range(10)]
more_scalar_symbols = ["S" + str(i) for i in range(10)]
symbols = {
"fermion": ["ψ", "χ", "f", "ζ", "θ"] + more_fermion_symbols,
"boson": ["φ", "η", "s", "ω", "σ"] + more_scalar_symbols,
}
func = lambda leaves: replace_and_mutate(
leaves=leaves,
symbols=symbols,
gauge_epsilons=gauge_epsilons,
lorentz_epsilons=lorentz_epsilons,
terms=terms,
edge_dict=edge_dict,
field_dict=field_dict,
graph=graph,
)
reduced_partition = [reduced_row(row, func) for row in partition]
# construct final interaction term and add to terms
prod = None
for i in reduced_partition:
f = i.field
if isinstance(f, str):
return f
if prod is None:
prod = f
else:
prod *= f
fields = [f for f in prod.tensors if isinstance(f, IndexedField)]
allowed, new_lorentz_epsilons = get_lorentz_epsilons(fields)
if not allowed:
return "Bad Lorentz contraction."
_, eps_to_remove = separate_gauge_epsilons(fields, gauge_epsilons)
for e in [*new_lorentz_epsilons, *eps_to_remove]:
prod *= e
# mutate Lorentz epsilons with last contraction
lorentz_epsilons += new_lorentz_epsilons
# Check to see if there are any derivatives present, if there are process the term
deriv_structure = [f.derivs for f in prod.fields]
n_derivs = sum(deriv_structure)
if n_derivs == 0:
proc_term = prod
if isinstance(proc_term, str):
return proc_term
else:
proc_term = process_derivative_term(prod)
if isinstance(proc_term, str):
return proc_term
if proc_term.safe_simplify() == 0:
return f"Vanishing coupling at {maybe_term} after derivative processing."
# make sure the term is a singlet
check_singlet(proc_term)
# append the processed term to terms
terms.append(proc_term)
return terms, edge_dict, field_dict, lorentz_epsilons
def partition_completion(partition) -> Union[Completion, FailedCompletion]:
"""Return the completion object associated with a partition."""
part = partition["partition"]
gauge_epsilons = partition["epsilons"]
graph = partition["graph"]
op = partition["operator"]
topo = partition["topology"]
# if args is a string, then it's the reason the completion failed
args = construct_completion(part, gauge_epsilons, graph)
if not isinstance(args, str):
terms, edge_dict, field_dict, lorentz_epsilons = args
else:
return FailedCompletion(args)
explicit_op = reduce(lambda a, b: a * b, lorentz_epsilons, op.operator)
exotics = set(f for f in edge_dict.keys())
eff_operator = EffectiveOperator(op.name, explicit_op)
new_edge_attrs = {v: {"particle": k.label} for k, v in edge_dict.items()}
nx.set_edge_attributes(graph, new_edge_attrs)
return Completion(
operator=eff_operator,
partition=part,
graph=graph,
exotics=exotics,
terms=terms,
topology=topo,
)
def operator_completions(
operator: EffectiveOperator, verbose=False
) -> List[Completion]:
"""Return a list of the completions of an effective operator."""
parts = partitions(operator, verbose=verbose)
if verbose:
print(f"Starting with {len(parts)} partitions, removing isomorphic ones...")
# if remove_isomorphic_diagrams:
# parts = remove_isomorphic(parts)
if verbose:
print(f"Finding completions of {len(parts)} partitions...")
with alive_bar(len(parts)) as bar:
for p in parts:
# completions.append(partition_completion(p))
comp = partition_completion(p)
if not isinstance(comp, FailedCompletion):
yield comp
bar()
else:
for p in parts:
comp = partition_completion(p)
if not isinstance(comp, FailedCompletion):
yield comp
# completions = [partition_completion(p) for p in parts]
# good_completions = [c for c in completions if not isinstance(c, FailedCompletion)]
# return good_completions
def sort_strings(terms: List[List[str]]):
"""To account for (anti)symmetric indices, just sort the strings representing
the fields. For use in the function `check_remapping_on_terms`.
"""
data = [["".join(sorted(item)) for item in interaction] for interaction in terms]
return set(map(lambda x: tuple(sorted(x)), data))
def check_remapping_on_terms(terms1, terms2, remapping):
"""Return the remapping on the field labels in the terms that would get you from
one to the other, i.e. return the isomorphism if one exists, otherwise
return the empty dictionary.
"""
new_terms = set()
for term in terms1:
for k, v in remapping.items():
simple = term.safe_simplify()
s = str(safe_nocoeff(simple))
s = multiple_replace(remapping, s)
# remove generation indices in comparison
s = re.sub(r"g[0-9]+_", "g_", s)
# remove negative signs on indices
s = re.sub(r"-", "", s)
ss = tuple(sorted(s.split("*")))
new_terms.add(ss)
new_terms = sort_strings(new_terms)
comp2_strs = []
for term in terms2:
simple = term.safe_simplify()
s = str(safe_nocoeff(simple))
comp2_strs.append(s)
comp2_strs = [re.sub(r"g[0-9]+_", "g_", s) for s in comp2_strs]
comp2_strs = [re.sub(r"-", "", s) for s in comp2_strs]
comp2_tups = [tuple(sorted(s.split("*"))) for s in comp2_strs]
# sort epsilons and deltas to account for symmetric indices
comp2_tups = sort_strings(comp2_tups)
if new_terms == comp2_tups:
return remapping
# otherwise, no equivalence, return empty dict
return {}
def compare_terms(comp1: Completion, comp2: Completion) -> Dict[str, str]:
"""Returns a dictionary representing the field relabellings that would need to
be applied to the terms of comp1 to make it equivalent to comp2. This
includes the identity remapping. That is, if the terms of comp1 are the same
as the terms in comp2, the function returns a dictionary like
{"φ": "φ", "η": "η", ...}
"""
# make sure field content is the same
if set(comp1.exotic_info().values()) != set(comp2.exotic_info().values()):
return {}
# cannot be equivalent
if len(comp1.terms) != len(comp2.terms):
return {}
# qnumbers -> label
rev_map2 = {
qnumbers: field.label for field, qnumbers in comp2.exotic_info().items()
}
remapping = {
# rev_map2[qnumbers]: field.label
field.label: rev_map2[qnumbers]
for field, qnumbers in comp1.exotic_info().items()
}
return check_remapping_on_terms(comp1.terms, comp2.terms, remapping)
def are_equivalent_completions(comp1: Completion, comp2: Completion) -> bool:
"""Checks to see if the Lagrangian terms describing two completions are
equivalent.
Two completions are equivalent if their Lagrangian terms in canonical form
are the same up to field relabellings.
"""
return bool(compare_terms(comp1, comp2))
def slow_remove_equivalent_completions(
comps: List[Completion], verbose: bool = False
) -> List[Completion]:
"""Compares completions by comparing Lagrangian terms. Removes duplicates and
returns copied list.
"""
remove_equivalent(comps, are_equivalent_completions)
def collect_completions(
completions: List[Completion], key=None
) -> Dict[tuple, Completion]:
"""Return dictionary mapping field content to list of completions.
`key` is a function that takes a completion and returns a dictionary mapping
FieldType to a tuple of numbers representing that field. This defaults to
the `exotic_info` method.
Not for general user interface.
"""
out = {}
if key is None:
key = lambda x: x.exotic_info()
func = lambda c: tuple(sorted(key(c).values()))
for k, g in groupby(completions, key=func):
g_list = list(g)
# slow_remove_equivalent_completions(g_list)
k = tuple(sorted(set(k)))
out[k] = g_list
return out
def prime_registry(sieve: Dict[tuple, List[Completion]]) -> Dict[tuple, int]:
"""Ascribe a unique prime number to each exotic appearing in `sieve`.
`sieve` is a dictionary mapping a tuple of field information to a list of
completions.
"""
reg = {}
counter = 1
for k, v in sieve.items():
for field in k:
if field not in reg:
reg[field] = prime(counter)
counter += 1
return reg
def model_registry(completions, registry) -> Dict[tuple, int]:
"""Assigns an unique integer to every model by multiplying primes of fields."""
reg = {}
for k in completions:
prod = 1
for field in k:
prod *= registry[field]
reg[k] = prod
return reg
def filter_completions(
completions: Dict[tuple, List[Completion]], sieve: Dict[tuple, List[Completion]]
) -> Dict[tuple, List[Completion]]:
# establish prime registry
registry = prime_registry({**sieve, **completions})
# construct dictionaries mapping tuples of field info to integers (products
# of primes)
completions_model_registry = model_registry(completions, registry)
sieve_model_registry = model_registry(sieve, registry)
unique = {}
for k, v in completions_model_registry.items():
factors_ = factors(v)
for ref_val in sieve_model_registry.values():
if ref_val in factors_:
break
else: # no break => unique model
unique[k] = completions[k]
return unique
def operator_strip_derivs(op: Operator) -> List[Operator]:
"""Removes the derivatives from the operator and returns a dictionary of the
fields, epsilons and number of derivatives. The fields output is an
association list between Field and list of guage indices (including
generational indices) for the field in the operator.
"""
tensors = op.tensors
new_fields = []
epsilons = []
n_derivs = 0
for field in tensors:
if isinstance(field, Field):
if field.derivs:
assert field.derivs == 1
n_derivs += 1
data = field.stripped
new_field = Field(**data, is_conj=field.is_conj)
indices = field.gauge_indices
new_fields.append((new_field, " ".join(str(i) for i in indices)))
# new_fields.append((new_field, indices))
else:
indices = field.gauge_indices
new_fields.append((field.field, " ".join(str(i) for i in indices)))
# new_fields.append((field.field, indices))
else:
epsilons.append(field)
return {"fields": new_fields, "epsilons": epsilons, "n_derivs": n_derivs}
def construct_operator(
fields: List[Tuple[Field, str]], epsilons: List[Tensor]
) -> Operator:
"""Helper function to construct operator."""
tensors = []
for field, index_string in fields:
u, d, _, _, _ = field.fresh_indices().indices_by_type.values()
lor_idx_str = " ".join(str(i) for i in u + d)
tensors.append(field(lor_idx_str + " " + index_string))
return reduce(lambda x, y: x * y, tensors + epsilons)
def derivative_combinations(
op: Union[Operator, EffectiveOperator]
) -> Union[List[Operator], List[EffectiveOperator]]:
"""Takes an operator with derivatives and returns a list of operators with
equivalent SU2 structure with the derivative acted in all possible ways.
Function expects a specific kind of input: no double derivatives on a single
field.
"""
eff_op = None
if isinstance(op, EffectiveOperator):
eff_op = op
op = op.operator
fields, epsilons, n_derivs = operator_strip_derivs(op).values()
deriv_id_func = lambda x: x
act_deriv = lambda f: D(f, allowed_lor_dyn(f))
deriv_tuple = [act_deriv for _ in range(n_derivs)] + [
deriv_id_func for _ in range(len(fields) - n_derivs)
]
structs, out = [], []
for perm in permutations(deriv_tuple):
new_structure = []
for field, func in zip(fields, perm):
new_structure.append((func(field[0]), field[1]))
structs.append(new_structure)
remove_equivalent(structs, eq_func=lambda x, y: x == y)
for struct in structs:
new_op = construct_operator(struct, epsilons)
if new_op.safe_simplify():
out.append(new_op)
return [EffectiveOperator(eff_op.name, i) for i in out] if eff_op else out
def deriv_operator_completions(
operator: EffectiveOperator, verbose=False
) -> List[Completion]:
"""Find the completions of a derivative operator. Differs from regular
``operator_completions`` in that it acts the derivatives in all possible
ways. There shouldn't be more than one derivative acting on a single field.
"""
deriv_combos = derivative_combinations(operator)
if verbose:
print(f"Finding completions of {len(deriv_combos)} IBP-related operators...")
comps = []
for combo in deriv_combos:
if combo.operator.simplify() == 0:
continue
comps += list(operator_completions(combo, verbose=verbose))
return comps
def completions(*args, **kwargs):
"""General dispatch function for completions"""
if "D" in operator.name:
return deriv_operator_completions(*args, **kwargs)
return operator_completions(*args, **kwargs)
def collect_models(comps):
"""Group models by particle content.
A bit cumbersome to use. Should be refactored out of tests at some point.
"""
collected = collect_completions(comps)
return [Model(cs) for _, cs in list(collected.items())]
def cons_term_prime_dict(completions: List[Completion]) -> Dict[tuple, int]:
# begin by generating term prime dictionary
term_dict = {}
counter = 1
for comp in completions:
n_terms = len(comp.terms)
for term in comp.terms:
# sort all of the terms by side effect
new_term = tuple(sorted(stringify_qns(f) for f in term.fields))
if new_term not in term_dict:
# add conj term first so that when filtering you keep
# the unconjugated term
term_dict[conjugate_term(new_term)] = prime(counter)
term_dict[new_term] = prime(counter)
counter += 1
return term_dict
def completion_characteristic_number(
comp: Completion, prime_dict: Dict[tuple, int]
) -> int:
prod = 1
for term in comp.terms:
new_term = tuple(sorted(stringify_qns(f) for f in term.fields))
prod *= prime_dict[new_term]
return prod
def clean_completions(completions: List[Completion]) -> List[Completion]:
"""A fast way of removing equivalent completions using prime label method on terms.
"""
completions = list(completions)
prime_dict = cons_term_prime_dict(completions)
comp_dict = {}
for comp in completions:
num = completion_characteristic_number(comp, prime_dict)
comp_dict[(num, comp.topology)] = comp
return sorted((v for k, v in comp_dict.items()), key=lambda x: x.topology)
```
#### File: neutrinomass/completions/operators.py
```python
import pickle
import os
from functools import reduce
from neutrinomass.tensormethod.core import eps, Operator
from neutrinomass.tensormethod.sm import L, Q, db, ub, eb, H
from neutrinomass.completions.core import EffectiveOperator
def prod(lst):
return reduce(lambda x, y: x * y, lst)
# read in pickled data from tensormethod script lnvlatex
with open(os.path.join(os.path.dirname(__file__), "operators.p"), "rb") as f:
pickle_form_ops = pickle.load(f)
with open(os.path.join(os.path.dirname(__file__), "deriv_operators.p"), "rb") as f:
pickle_form_deriv_ops = pickle.load(f)
# define operators
EFF_OPERATORS = {}
for k, op in pickle_form_ops.items():
EFF_OPERATORS[k] = EffectiveOperator(k, Operator.from_pickle_form(op))
DERIV_EFF_OPERATORS = {}
for k, op in pickle_form_deriv_ops.items():
if k in ("D1", "D11"): # non-explosive
continue
if k.startswith("D19"): # four-deriv
continue
DERIV_EFF_OPERATORS[k] = EffectiveOperator(k, Operator.from_pickle_form(op))
# add additional operators not in BL/dGJ list
# aux. Operator objects
o1 = EFF_OPERATORS["1"].operator.tensors
o2 = EFF_OPERATORS["2"].operator.tensors
o3a = EFF_OPERATORS["3a"].operator.tensors
o3b = EFF_OPERATORS["3b"].operator.tensors
o4a = EFF_OPERATORS["4a"].operator.tensors
o4b = EFF_OPERATORS["4b"].operator.tensors
o5a = EFF_OPERATORS["5a"].operator.tensors
o5b = EFF_OPERATORS["5b"].operator.tensors
o6a = EFF_OPERATORS["6a"].operator.tensors
o6b = EFF_OPERATORS["6b"].operator.tensors
o7 = EFF_OPERATORS["7"].operator.tensors
o8 = EFF_OPERATORS["8"].operator.tensors
o61a = EFF_OPERATORS["61a"].operator.tensors
o71 = EFF_OPERATORS["71"].operator.tensors
o76 = [
eb.conj("d0"),
eb.conj("d1"),
ub.conj("d2 c0"),
ub.conj("d3 c1"),
db("u0 -c2"),
db("u1 -c3"),
]
o82 = [
L("u0 i0"),
L.conj("d0 i1"),
eb.conj("d1"),
eb.conj("d2"),
ub.conj("d3 c0"),
db("u1 -c1"),
H("i2"),
H("i3"),
eps("-i0 -i2"),
eps("-i1 -i3"),
]
oyec = [L.conj("d3 i6"), eb.conj("d4"), H("i7"), eps("-i6 -i7")]
oydc = [Q.conj("d3 -c0 i6"), db.conj("d4 c1"), H("i7"), eps("-i6 -i7")]
prime = [H("i4"), H.conj("i5"), eps("-i4 -i5")]
prime_prime = [
H("i4"),
H.conj("i5"),
H("i6"),
H.conj("i7"),
eps("-i6 -i7"),
eps("-i4 -i5"),
]
prime_prime_prime = [
H("i4"),
H.conj("i5"),
H("i6"),
H.conj("i7"),
H("i8"),
H.conj("i9"),
eps("-i8 -i9"),
eps("-i6 -i7"),
eps("-i4 -i5"),
]
# new operators from table in paper
EFF_OPERATORS["77"] = EffectiveOperator("77", prod(o1 + oyec))
EFF_OPERATORS["78"] = EffectiveOperator("78", prod(o1 + oydc))
EFF_OPERATORS["1p"] = EffectiveOperator("1p", prod(o1 + prime))
EFF_OPERATORS["8p"] = EffectiveOperator("8p", prod(o8 + prime))
EFF_OPERATORS["1pp"] = EffectiveOperator("1pp", prod(o1 + prime_prime))
# EFF_OPERATORS["1ppp"] = EffectiveOperator("1ppp", prod(o1 + prime_prime_prime))
EFF_OPERATORS["7p"] = EffectiveOperator("7p", prod(o7 + prime))
EFF_OPERATORS["8pp"] = EffectiveOperator("8pp", prod(o8 + prime_prime))
EFF_OPERATORS["71p"] = EffectiveOperator("71p", prod(o71 + prime))
EFF_OPERATORS["76p"] = EffectiveOperator("76p", prod(o76 + prime))
EFF_OPERATORS["77p"] = EffectiveOperator("77p", prod(o1 + oyec + prime))
EFF_OPERATORS["78p"] = EffectiveOperator("78p", prod(o1 + oydc + prime))
EFF_OPERATORS["79a"] = EffectiveOperator("79a", prod(o61a + prime))
EFF_OPERATORS["79b"] = EffectiveOperator("79b", prod(o2 + prime_prime))
EFF_OPERATORS["80a"] = EffectiveOperator("80a", prod(o5a + prime))
EFF_OPERATORS["80b"] = EffectiveOperator("80b", prod(o5b + prime))
EFF_OPERATORS["80c"] = EffectiveOperator("80c", prod(o3a + prime_prime))
EFF_OPERATORS["80d"] = EffectiveOperator("80d", prod(o3b + prime_prime))
EFF_OPERATORS["81a"] = EffectiveOperator("81a", prod(o6a + prime))
EFF_OPERATORS["81b"] = EffectiveOperator("81b", prod(o6b + prime))
EFF_OPERATORS["81c"] = EffectiveOperator("81c", prod(o4a + prime_prime))
EFF_OPERATORS["81d"] = EffectiveOperator("81d", prod(o4b + prime_prime))
EFF_OPERATORS["82"] = EffectiveOperator("82", prod(o82))
```
#### File: neutrinomass/completions/topologies.py
```python
import os
from glob import glob
import matplotlib.pyplot as plt
import networkx as nx
from typing import NamedTuple
from neutrinomass.tensormethod.core import IndexedField
# PATH_TO_MV = "/Users/johngargalionis/Dropbox/PhD/mv/"
TOPOLOGY_PATH = os.path.join(os.path.dirname(__file__), "topology_data")
# INTERNAL_PATH = "neutrinomass/neutrinomass/completions/topology_data/"
# TOPOLOGY_PATH = PATH_TO_MV + INTERNAL_PATH
PARTITIONS = os.path.join(TOPOLOGY_PATH, "partitions")
DIAGRAMS = os.path.join(TOPOLOGY_PATH, "diagrams")
GRAPHS = os.path.join(TOPOLOGY_PATH, "graphs")
class Leaf(NamedTuple):
field: IndexedField
node: int
def read_topology_file(data_path) -> str:
"""Reads the topology and returns the contents of the data file as a string."""
with open(data_path, "r") as f:
data_string = f.read()
return data_string
def eval_partition(partition: str):
S = lambda x: Leaf("S", x)
F = lambda x: Leaf("F", x)
def List(*args):
return args
structure = eval(partition)
# Take first element to simplify output but ensure not losing any info
return structure
def eval_graph(graph: str):
G = nx.Graph()
for edge in graph.splitlines():
i, j = eval(edge)
G.add_edge(i, j)
return G
def get_topology_data(n_scalars, n_fermions):
"""Returns a list of dictionaries with data from topology data files.
[{"partition": parition_string, "graph": graph_string, "img": image}]
"""
partition_files = sorted(glob(PARTITIONS + f"/{n_scalars}s{n_fermions}f_*"))
diagram_files = sorted(glob(DIAGRAMS + f"/{n_scalars}s{n_fermions}f_*"))
graph_files = sorted(glob(GRAPHS + f"/{n_scalars}s{n_fermions}f_*"))
if not partition_files:
raise Exception("Topologies not found, please generate them again.")
out = []
for p, d, g in zip(partition_files, diagram_files, graph_files):
topology = {}
partition_string = eval_partition(read_topology_file(p))
# img = plt.imread(d)
graph_string = eval_graph(read_topology_file(g))
topology["partition"] = partition_string
topology["graph"] = graph_string
# topology["diagram"] = img
topology["partition_file"] = p
out.append(topology)
return out
```
#### File: neutrinomass/database/closures.py
```python
from typing import Union, List
import math
import sympy
from functools import reduce
from matchpy import Operation, Symbol, Arity, match, Pattern, Wildcard, substitute
from neutrinomass.completions import EffectiveOperator, EFF_OPERATORS
from neutrinomass.tensormethod import H, L, Q, Field, IndexedField
from neutrinomass.tensormethod.core import ISOSPIN, GENERATION, Operator
Op = Operation.new("Op", Arity.variadic, commutative=True, associative=True)
Const = Operation.new("Const", Arity.unary)
c = Operation.new("c", Arity.unary) # conjugate
# fields
e = Symbol("e")
nu = Symbol("nu")
u = Symbol("u")
d = Symbol("d")
h0 = Symbol("h0")
hp = Symbol("hp")
db = Symbol("db")
ub = Symbol("ub")
eb = Symbol("eb")
W = Symbol("W")
# masses
ye = Symbol("ye")
yu = Symbol("yu")
yd = Symbol("yd")
v = Symbol("v")
# couplings and symbols
nunu = Symbol("nunu")
g2 = Symbol("g2")
loop = Symbol("loop")
loopv2 = Symbol("loopv2")
# replacement rules for closures
RST = Wildcard.dot("rst")
RULES = {
# neutrinos
Op(nu, nu, RST): Op(Const(nunu), RST),
# free loops
Op(c(h0), h0, RST): Op(Const(loopv2), RST),
Op(c(hp), hp, RST): Op(Const(loop), RST),
Op(c(eb), eb, RST): Op(Const(loop), RST),
Op(c(db), db, RST): Op(Const(loop), RST),
Op(c(ub), ub, RST): Op(Const(loop), RST),
Op(c(d), d, RST): Op(Const(loop), RST),
Op(c(u), u, RST): Op(Const(loop), RST),
Op(c(e), e, RST): Op(Const(loop), RST),
Op(c(nu), nu, RST): Op(Const(loop), RST),
# masses
Op(e, eb, RST): Op(Const(v), Const(ye), Const(loop), RST),
Op(u, ub, RST): Op(Const(v), Const(yu), Const(loop), RST),
Op(d, db, RST): Op(Const(v), Const(yd), Const(loop), RST),
Op(c(e), c(eb), RST): Op(Const(v), Const(ye), Const(loop), RST),
Op(c(u), c(ub), RST): Op(Const(v), Const(yu), Const(loop), RST),
Op(c(d), c(db), RST): Op(Const(v), Const(yd), Const(loop), RST),
# make W
Op(e, nu, RST): Op(Const(g2), W, nu, nu, RST),
Op(c(eb), db, c(ub), RST): Op(
Const(v),
Const(g2),
Const(loop),
Const(loop),
Const(loopv2),
Const(yu),
Const(yd),
Const(ye),
nu,
RST,
),
Op(c(eb), c(d), u, RST): Op(
Const(v),
Const(g2),
Const(loop),
Const(loop),
Const(loopv2),
Const(yu),
Const(yd),
Const(ye),
nu,
RST,
),
Op(db, c(ub), c(hp), RST): Op(
Const(v),
Const(g2),
Const(loop),
Const(loop),
Const(loopv2),
Const(yu),
Const(yd),
RST,
),
# remove W
Op(W, hp, RST): Op(Const(v), Const(loop), RST),
Op(W, u, RST): Op(d, Const(loop), RST),
Op(W, c(d), RST): Op(c(u), Const(loop), RST),
Op(W, c(ub), db, RST): Op(
Const(v), Const(v), Const(yu), Const(yd), Const(loop), Const(loop), RST
),
# remove hp
Op(hp, c(eb), RST): Op(Const(loop), Const(ye), nu, RST),
Op(hp, c(u), RST): Op(Const(yd), db, Const(loop), RST),
Op(c(hp), db, RST): Op(Const(yd), c(u), Const(loop), RST),
Op(hp, d, RST): Op(Const(yu), c(ub), Const(loop), RST),
Op(hp, ub, RST): Op(Const(yu), c(d), Const(loop), RST),
Op(c(hp), c(ub), RST): Op(Const(yu), d, Const(loop), RST),
Op(hp, u, c(d), RST): Op(Const(g2), Const(loop), Const(loop), RST),
# make hp
Op(c(eb), nu, RST): Op(Const(ye), hp, nu, nu, RST),
# vev
Op(h0, RST): Op(Const(v), RST),
Op(c(h0), RST): Op(Const(v), RST),
}
def apply_rules(rules, subject):
for k, v in rules.items():
for substitution in match(subject, Pattern(k)):
subject = substitute(Pattern(v), substitution)
return subject
return subject
def fixed_point(start, rules=RULES, max_iterations=10, verbose=False):
old, new = None, start
counter = 1
if verbose:
print(start)
while new != old:
# Check if max iterations reached
if counter > max_iterations:
print("Maximum iterations reached on fixed_point")
return new
old = new
new = apply_rules(rules, old)
if verbose:
print(new)
counter += 1
return new
def clean(op):
lst = list(op)
out = []
while lst:
item = lst.pop(0)
if item.head.name == "Const":
s = str(item.operands[0])
out.append(s)
else:
# raise Exception(f"Non Const symbol {item} encountered in reduced operator.")
return op
return out
h = [hp, h0]
l = [nu, e]
q = [u, d]
hc = [h0, hp]
lc = [e, nu]
qc = [d, u]
eps = [[0, -1], [1, 0]]
def parse_operator(eff_op: Union[EffectiveOperator, Operator]):
"""Parse the operator `eff_op` into matchpy symbols with the SU(2) structure
expanded.
This is an unfortunately messy function and needs to be rewritten in a
cleaner way.
"""
if isinstance(eff_op, EffectiveOperator):
operator = eff_op.operator
else:
operator = eff_op
fields, epsilons = [], []
n_indices = 0
for expr in operator.tensors:
if isinstance(expr, Field):
if expr.derivs:
expr = expr.strip_derivs_with_indices()
i = expr.indices_by_type["Isospin"]
if expr.is_conj:
label = expr.label.lower()[:-1]
label = label + ("c" if i else "")
else:
label = expr.label.lower()
if i:
label += f"[{i[0]}]"
if expr.is_conj:
label = f"c({label})"
fields.append(label)
else:
epsilons.append(expr.indices)
n_indices += 2
field_string = "*".join(fields)
eval_str = ""
indices = [("_i", "_j"), ("_k", "_l"), ("_m", "_n"), ("_p", "_q")]
for (i, j), (a, b) in zip(epsilons, indices):
field_string = field_string.replace(str(-i), a)
field_string = field_string.replace(str(-j), b)
field_string += f"*eps[{a}][{b}]"
loop_ranges = [1, 1, 1, 1, 1, 1, 1, 1]
for i in range(n_indices):
loop_ranges[i] += 1
_a, _b, _c, _d, _e, _f, _g, _h = loop_ranges
res = []
for s1, _i in zip(["_i", "_i"], range(_a)):
for s2, _j in zip(["_j", "_j"], range(_b)):
for s3, _k in zip(["_k", "_k"], range(_c)):
for s4, _l in zip(["_l", "_l"], range(_d)):
for s5, _m in zip(["_m", "_m"], range(_e)):
for s6, _n in zip(["_n", "_n"], range(_f)):
for s7, _p in zip(["_p", "_p"], range(_g)):
for s8, _q in zip(["_q", "_q"], range(_h)):
res.append(
field_string.replace(s1, str(_i))
.replace(s2, str(_j))
.replace(s3, str(_k))
.replace(s4, str(_l))
.replace(s5, str(_m))
.replace(s6, str(_n))
.replace(s7, str(_p))
.replace(s8, str(_q))
)
out = []
for elem in res:
new_elem = elem.replace("*eps[0][1]", "").replace("*eps[1][0]", "")
if not "eps" in new_elem:
out.append(new_elem)
return [eval(f'Op({elem.replace("*", ",")})') for elem in out]
def neutrino_mass_estimate(eff_op: Union[EffectiveOperator, List[Op]], verbose=False):
if isinstance(eff_op, EffectiveOperator):
clean_lst = [clean(fixed_point(op)) for op in parse_operator(eff_op)]
else:
clean_lst = [clean(fixed_point(op)) for op in eff_op]
out = []
for lst in clean_lst:
if not isinstance(lst, list):
if verbose:
print(f"Skipping structure {lst} since it should be negligible")
continue
# make sure only two neutrinos are in the diagram
assert lst.count("nunu") == 1
lst.remove("nunu")
n_vevs = lst.count("v")
assert n_vevs % 2 == 0
n_loopv2 = (n_vevs - 2) // 2
for _ in range(n_vevs):
lst.remove("v")
# need to account for seesaw case
prod = reduce(lambda x, y: x * y, [sympy.Symbol(i) for i in lst]) if lst else 1
prod *= sympy.Symbol("v") * sympy.Symbol("v") / sympy.Symbol("Λ")
for _ in range(n_loopv2):
prod *= sympy.Symbol("loopv2")
out.append(prod)
return out
def numerical_np_scale_estimate(expr):
"""Returns log10 of estimate of Λ in TeV."""
vev = 174
mv = 5e-11
loop = 1.0 / (16 * math.pi ** 2)
subs_list = [
(sympy.Symbol("v"), vev),
(sympy.Symbol("loop"), loop),
(sympy.Symbol("loopv2"), (loop + vev ** 2 / sympy.Symbol("Λ") ** 2),),
(sympy.Symbol("g2"), 0.6295 ** 2),
(sympy.Symbol("yu"), 172.76 / vev),
(sympy.Symbol("yd"), 4.18 / vev),
(sympy.Symbol("ye"), 1.78 / vev),
]
m = expr.subs(subs_list)
sol = sympy.solve(m - mv, sympy.Symbol("Λ"))[0]
scale = abs(sol) * 1e-3
return scale
def numerical_mv(expr):
"""Returns estimate of neutrino-mass scale in GeV assuming a NP scale of 1
TeV.
"""
vev = 174
mv = 5e-11
loop = 1.0 / (16 * math.pi ** 2)
subs_list = [
(sympy.Symbol("v"), vev),
(sympy.Symbol("loop"), loop),
(sympy.Symbol("loopv2"), (loop + vev ** 2 / sympy.Symbol("Λ") ** 2),),
(sympy.Symbol("g2"), 0.6295 ** 2),
(sympy.Symbol("yu"), 172.76 / vev),
(sympy.Symbol("yd"), 4.18 / vev),
(sympy.Symbol("ye"), 1.78 / vev),
(sympy.Symbol("Λ"), 1000),
]
return expr.subs(subs_list)
```
#### File: neutrinomass/database/closures_test.py
```python
from neutrinomass.database.closures import *
from neutrinomass.completions import EFF_OPERATORS, DERIV_EFF_OPERATORS
from neutrinomass.database.utils import get_leading_mv, estimate_np_scale
loop = sympy.Symbol("loop")
loopv2 = sympy.Symbol("loopv2")
v = sympy.Symbol("v")
Λ = sympy.Symbol("Λ")
yd = sympy.Symbol("yd")
ye = sympy.Symbol("ye")
yu = sympy.Symbol("yu")
g2 = sympy.Symbol("g2")
SEESAW = v ** 2 / Λ
dGJ_RESULTS = {
"1": SEESAW,
"2": SEESAW * ye * loop,
"3a": SEESAW * yd * loop ** 2 * g2,
"3b": SEESAW * yd * loop,
"4a": SEESAW * yu * loop,
"4b": SEESAW * yu * loop ** 2 * g2,
"5a": SEESAW * yd * loop ** 2,
"6a": SEESAW * yu * loop ** 2,
"7": SEESAW * ye * loop ** 2 * g2 * loopv2,
"8": SEESAW * ye * yd * yu * loop ** 2,
"9": SEESAW * ye ** 2 * loop ** 2,
"10": SEESAW * ye * yd * loop ** 2,
"11a": SEESAW * yd ** 2 * g2 * loop ** 3,
"11b": SEESAW * yd ** 2 * loop ** 2,
"12b": SEESAW * yu ** 2 * loop ** 2,
"12b": SEESAW * yu ** 2 * g2 * loop ** 3,
"13": SEESAW * ye * yu * loop ** 2,
"14a": SEESAW * yd * yu * g2 * loop ** 3,
"14b": SEESAW * yd * yu * loop ** 2,
"15": SEESAW * yd * yu * g2 * loop ** 3,
"16": SEESAW * yd * yu * g2 ** 2 * loop ** 4,
"17": SEESAW * yd * yu * g2 ** 2 * loop ** 4,
"18": SEESAW * yd * yu * g2 ** 2 * loop ** 4,
"19": SEESAW * yd ** 2 * yu * ye * loop ** 3,
"20": SEESAW * yd * yu ** 2 * ye * loop ** 3,
"21a": SEESAW * ye * yu * loop ** 2 * loopv2,
"21b": SEESAW * ye * yu * loop ** 2 * loopv2,
"22a": SEESAW * g2 * loop ** 3,
"23a": SEESAW * loopv2 * ye * yd * loop ** 2,
"24a": SEESAW * yd ** 2 * loop ** 3,
"24b": SEESAW * yd ** 2 * loop ** 3,
"25a": SEESAW * yd * yu * loop ** 2 * loopv2,
"26a": SEESAW * yd * ye * loop ** 3,
"26b": SEESAW * yd * ye * loop ** 2 * loopv2,
"27a": SEESAW * loop ** 3 * g2,
"27b": SEESAW * loop ** 3 * g2,
"28a": SEESAW * yd * yu * loop ** 3,
"28b": SEESAW * yd * yu * loop ** 3,
"28c": SEESAW * yd * yu * loop ** 3,
"29a": SEESAW * yu ** 2 * loop ** 2 * loopv2,
"29b": SEESAW * g2 * loop ** 3,
"30a": SEESAW * ye * yu * loop ** 3,
"30b": SEESAW * ye * yu * loop ** 2 * loopv2,
"31a": SEESAW * yd * yu * loop ** 2 * loopv2,
"31b": SEESAW * yd * yu * loop ** 2 * loopv2,
"37": SEESAW * ye ** 2 * loop ** 5 * yd ** 2 * g2,
"43a": SEESAW * g2 * loop ** 4 * yu * yd,
"43b": SEESAW * g2 * loop ** 4 * yu * yd,
"43c": SEESAW * g2 * loop ** 4 * yu * yd,
"49": g2 * loop ** 3 * SEESAW,
"50a": yd * yu * g2 * loop ** 3 * SEESAW,
"52a": yd * yu * g2 * loop ** 4 * SEESAW,
"53": yd ** 2 * yu ** 2 * g2 * loop ** 5 * SEESAW,
"57": ye * yu * g2 * loop ** 4 * SEESAW,
"59a": ye * yu * yd ** 2 * loop ** 4 * SEESAW,
"60a": ye * yu ** 2 * yd * loop ** 4 * SEESAW,
"65a": yu * yd * loop ** 4 * g2 * SEESAW,
"75": ye * yd * yu ** 2 * loop ** 3 * loopv2 * SEESAW,
}
def test_operators_expr():
seesaw = v ** 2 / Λ
assert seesaw == get_leading_mv(EFF_OPERATORS["1"])
assert ye * loop * seesaw == get_leading_mv(EFF_OPERATORS["2"])
assert yd * g2 * loop ** 2 * seesaw == get_leading_mv(EFF_OPERATORS["3a"])
assert yd * loop * seesaw == get_leading_mv(EFF_OPERATORS["3b"])
assert yu * loop * seesaw == get_leading_mv(EFF_OPERATORS["4a"])
assert yu * g2 * loop ** 2 * seesaw == get_leading_mv(EFF_OPERATORS["4b"])
assert yd * loop ** 2 * seesaw == get_leading_mv(EFF_OPERATORS["5a"])
assert yd * loop * loopv2 * seesaw == get_leading_mv(EFF_OPERATORS["5b"])
assert yd * g2 * loop ** 2 * loopv2 * seesaw == get_leading_mv(EFF_OPERATORS["5c"])
assert yd * loop * loopv2 * seesaw == get_leading_mv(EFF_OPERATORS["5d"])
assert yu * loop ** 2 * seesaw == get_leading_mv(EFF_OPERATORS["6a"])
assert yu * loop * loopv2 * seesaw == get_leading_mv(EFF_OPERATORS["6b"])
assert yu * loop * loopv2 * seesaw == get_leading_mv(EFF_OPERATORS["6c"])
assert yu * g2 * loop ** 2 * loopv2 * seesaw == get_leading_mv(EFF_OPERATORS["6d"])
assert ye * loop ** 2 * seesaw == get_leading_mv(EFF_OPERATORS["7"])
assert ye * yu * yd * g2 * loop ** 2 * seesaw * loopv2 == get_leading_mv(
EFF_OPERATORS["8"]
)
assert ye ** 2 * loop ** 2 * seesaw == get_leading_mv(EFF_OPERATORS["9"])
assert ye * yd * loop ** 2 * seesaw == get_leading_mv(EFF_OPERATORS["10"])
assert yd ** 2 * loop ** 3 * g2 * seesaw == get_leading_mv(EFF_OPERATORS["11a"])
assert yd ** 2 * loop ** 2 * seesaw == get_leading_mv(EFF_OPERATORS["11b"])
assert yu ** 2 * loop ** 2 * seesaw == get_leading_mv(EFF_OPERATORS["12a"])
assert yu ** 2 * loop ** 3 * g2 * seesaw == get_leading_mv(EFF_OPERATORS["12b"])
assert ye * yu * loop ** 2 * seesaw == get_leading_mv(EFF_OPERATORS["13"])
assert yd * yu * loop ** 3 * g2 * seesaw == get_leading_mv(EFF_OPERATORS["14a"])
assert yd * yu * loop ** 2 * seesaw == get_leading_mv(EFF_OPERATORS["14b"])
assert yd * yu * loop ** 3 * g2 * seesaw == get_leading_mv(EFF_OPERATORS["15"])
assert yd * yu * loop ** 3 * g2 * seesaw == get_leading_mv(EFF_OPERATORS["16"])
assert yd * yu * loop ** 3 * g2 * seesaw == get_leading_mv(EFF_OPERATORS["17"])
assert yd * yu * loop ** 3 * g2 * seesaw == get_leading_mv(EFF_OPERATORS["18"])
assert yd ** 2 * yu * ye * g2 * loop ** 3 * loopv2 * seesaw == get_leading_mv(
EFF_OPERATORS["19"]
)
assert yd * yu ** 2 * ye * g2 * loop ** 3 * loopv2 * seesaw == get_leading_mv(
EFF_OPERATORS["20"]
)
assert dGJ_RESULTS["21a"] == get_leading_mv(EFF_OPERATORS["21a"])
assert dGJ_RESULTS["21b"] == get_leading_mv(EFF_OPERATORS["21b"])
assert loop ** 4 * yd * ye ** 2 * yu * seesaw == get_leading_mv(EFF_OPERATORS["37"])
assert g2 * loop ** 3 * loopv2 * yd * yu * seesaw == get_leading_mv(
EFF_OPERATORS["43a"]
)
assert g2 * loop ** 3 * loopv2 * yd * yu * seesaw == get_leading_mv(
EFF_OPERATORS["50a"]
)
assert g2 * loop ** 3 * loopv2 * yd * yu * seesaw == get_leading_mv(
EFF_OPERATORS["52a"]
)
assert (
g2 ** 2 * loop ** 4 * loopv2 ** 2 * yd ** 2 * yu ** 2 * seesaw
== get_leading_mv(EFF_OPERATORS["53"])
)
assert (
seesaw * yd ** 2 * ye ** 2 * yu ** 2 * g2 ** 2 * loop ** 4 * loopv2 ** 2
== get_leading_mv(EFF_OPERATORS["76"])
)
assert loop ** 3 * ye * yu * seesaw == get_leading_mv(EFF_OPERATORS["57"])
assert g2 * loop ** 3 * loopv2 ** 2 * ye * yu * yd ** 2 * seesaw == get_leading_mv(
EFF_OPERATORS["59a"]
)
assert g2 * loop ** 4 * loopv2 * ye * yu ** 2 * yd * seesaw == get_leading_mv(
EFF_OPERATORS["60a"]
)
assert loop ** 3 * yd * ye * seesaw == get_leading_mv(EFF_OPERATORS["75"])
assert seesaw * loopv2 == get_leading_mv(EFF_OPERATORS["1p"])
assert ye * loop * loopv2 * seesaw == get_leading_mv(EFF_OPERATORS["61a"])
assert g2 * loop ** 2 * seesaw == get_leading_mv(DERIV_EFF_OPERATORS["D8i"])
assert yd * yu * g2 * loop ** 2 * loopv2 * seesaw == get_leading_mv(
DERIV_EFF_OPERATORS["D10b"]
)
def test_operators_numerical():
from math import log10
proc = lambda n: round(log10(max(n)))
assert proc(estimate_np_scale(EFF_OPERATORS["1"])) == 12
assert proc(estimate_np_scale(EFF_OPERATORS["2"])) == 8
assert proc(estimate_np_scale(EFF_OPERATORS["3a"])) == 5
assert proc(estimate_np_scale(EFF_OPERATORS["3b"])) == 8
assert proc(estimate_np_scale(EFF_OPERATORS["4a"])) == 10
assert proc(estimate_np_scale(EFF_OPERATORS["5a"])) == 6
assert proc(estimate_np_scale(EFF_OPERATORS["76"])) == -2
def test_dgj():
checked = {
# vanishing loop
"7",
"16",
"17",
"18",
"22a",
"27a",
"27b",
"29a",
"29b",
"49",
"50a",
"52a",
"57",
"75",
# vanishing Higgs combo
"8",
"19",
"20",
"28a",
"28b",
"28c",
"43a",
"43b",
"43c",
"53",
"59a",
"60a",
"65a",
# unclear
"37",
}
for k, v in dGJ_RESULTS.items():
if k in checked:
continue
expr = get_leading_mv(EFF_OPERATORS[k])
assert v == expr
# if v != expr:
# print(f"{k}: {expr}")
```
#### File: neutrinomass/database/database_test.py
```python
from neutrinomass.database.database import *
def test_conjugate_term():
test_terms = [
["L.conj", "F,10,3,1/6,1", "F,10,3,7/6,1"],
["F,11,1,1/2,0", "F,11,2,0,0", "F,20,0,1/3,2", "F,20,1,5/6,2"],
["Q", "S,02,1,7/6,-2", "S,02,2,5/3,-2"],
["S,10,0,2/3,1", "S,11,0,1,0", "S,11,1,1/2,0"],
]
conj_terms = [
["L", "F,01,3,-1/6,-1", "F,01,3,-7/6,-1"],
["F,11,1,-1/2,0", "F,11,2,0,0", "F,02,0,-1/3,-2", "F,02,1,-5/6,-2"],
["Q.conj", "S,20,1,-7/6,2", "S,20,2,-5/3,2"],
["S,01,0,-2/3,-1", "S,11,0,-1,0", "S,11,1,-1/2,0"],
]
proc_terms = [conjugate_term(t) for t in test_terms]
for i, sorted_conj in enumerate(proc_terms):
assert list(sorted_conj) == sorted(conj_terms[i])
```
#### File: neutrinomass/tensormethod/contract_test.py
```python
from neutrinomass.tensormethod.core import Index, delta, eps
from neutrinomass.tensormethod.contract import colour_singlets
from neutrinomass.tensormethod.contract import construct_operators
from neutrinomass.tensormethod.contract import unsimplified_invariants
from neutrinomass.tensormethod.contract import extract_relabellings
from neutrinomass.tensormethod.contract import compare_singlets
from neutrinomass.tensormethod.contract import is_identity_mapping
from neutrinomass.tensormethod.contract import clean_operators
from neutrinomass.tensormethod.contract import remove_relabellings
from neutrinomass.tensormethod.contract import invariants
from neutrinomass.tensormethod.sm import *
def test_colour_singlets():
singlets = colour_singlets([Q("u0 c0 i0") * db("u1 -c1")])
assert singlets[0] == Q("u0 c0 i0") * db("u1 -c1") * delta("c1 -c0")
singlets = colour_singlets([G("u0 u1 c0 -c1") * G("u2 u3 c2 -c3")])
assert len(singlets) == 2
# tensormethod doesn't know that G should be traceless on c0 and c1
ans1 = G("u0 u1 c0 -c1") * G("u2 u3 c2 -c3") * delta("c1 -c0") * delta("c3 -c2")
ans2 = G("u0 u1 c0 -c1") * G("u2 u3 c2 -c3") * delta("c1 -c2") * delta("c3 -c0")
assert singlets == [ans1, ans2]
singlets = colour_singlets(
[Q("u0 c0 i0") * db("u1 -c1"), Q("u0 c0 i0") * ub("u1 -c1")]
)
assert len(singlets) == 2
def test_construct_operators():
prods = [i.walked() for i in L * L]
ans = [
[L("u0 i0") * L("u1 i1")],
[L("u0 i0") * L("u1 i1") * eps("-i0 -i1")],
[L("u0 i0") * L("u1 i1") * eps("-u0 -u1")],
[L("u0 i0") * L("u1 i1") * eps("-u0 -u1") * eps("-i0 -i1")],
]
# need to define a better operator equality to test this better
assert len([construct_operators(i) for i in prods]) == len(ans)
def test_unsimplified_invariants():
pass
def test_extract_relabellings():
a = [eps("-i0 -i2"), eps("-i3 -i1")]
b = [eps("-i3 -i2"), eps("-i2 -i1")]
# relabellings action on a
# 0 -> 2, 2 -> 3, 3 -> 2
# 1 -> 2, 0 -> 1
relabellings1 = [
(Index("-i0"), Index("-i2")),
(Index("-i2"), Index("-i3")),
(Index("-i3"), Index("-i2")),
]
relabellings2 = [(Index("-i1"), Index("-i2")), (Index("-i0"), Index("-i1"))]
assert extract_relabellings(a, b) == [relabellings1, relabellings2]
def test_compare_singlets():
pass
def test_is_identity_mapping():
pass
def test_clean_operators():
pass
def test_remove_relabellings():
pass
def test_invariants():
o1 = invariants(L, L, H, H)
o2 = invariants(L, L, L, eb, H)
o3 = invariants(L, L, Q, db, H)
o4 = invariants(L, L, Q.conj, ub.conj, H)
o5 = invariants(L, L, Q, db, H, H, H.conj)
# o29 = invariants(L, L, Q, Q.conj, ub, ub.conj, H, H)
assert len(o1) == 1
assert len(o2) == 1
assert len(o3) == 2
assert len(o4) == 2
assert len(o5) == 4
# assert len(o29) == 4
```
#### File: neutrinomass/tensormethod/lagrangian.py
```python
from itertools import combinations_with_replacement
from alive_progress import alive_bar
from sympy import Matrix
from sympy.tensor.tensor import tensorhead
from collections import Counter
import numpy as np
from typing import List
from neutrinomass.tensormethod.contract import invariants, unsimplified_invariants
from neutrinomass.utils import remove_equivalent
from neutrinomass.tensormethod.core import (
Operator,
GENERATION,
Index,
decompose_product,
Prod,
Field,
)
from neutrinomass.tensormethod.sm import L, Q, db, H, ub, eb
def make_coupling(symbol: str, indices: str, sym=None):
indices_list = indices.split()
if sym is None:
sym = [[1]] * len(indices_list)
return tensorhead(symbol, [Index(i) for i in indices_list], sym)
def npoint_fieldstrings(n, fields=(L, eb, Q, db, ub, H), derivs=False, func=None):
L.charges["l"] = 1
eb.charges["l"] = -1
Q.charges["l"] = 0
ub.charges["l"] = 0
db.charges["l"] = 0
H.charges["l"] = 0
conjs = tuple([f.conj for f in fields])
if derivs:
T = Field("D", "11000", charges={"y": 0, "3b": 0, "l": 0})
fields += (T,)
combos = list(combinations_with_replacement(fields + conjs, n))
terms = []
with alive_bar(len(combos)) as bar:
while combos:
combo = combos.pop(0)
if func is not None:
if not func(combo):
bar()
continue
prods = decompose_product(*combo)
singlets = [i for i in prods if i.is_singlet]
if singlets:
terms += [singlets[0]]
bar()
# for f in [L, eb, Q, ub, db, H]:
# del f.charges["l"]
return terms
def prod_mass_dim(prod: Prod) -> int:
"""Returns the mass dimension of a Prod object"""
out = []
while True:
irrep, left, right = prod
out.append(right)
if not isinstance(left, Prod):
out.append(left)
return sum(map(lambda x: x.mass_dim, out))
prod = left
def generate_fieldstrings(max_dim, fields, derivs=True):
"""Returns a product of fields of maximum dimension `max_dim` built out of
`fields`.
"""
out = []
for n_fields in range(2, max_dim + 1):
for fieldstring in npoint_fieldstrings(n_fields, fields, derivs=derivs):
if prod_mass_dim(fieldstring.walked()) <= max_dim:
out.append(fieldstring)
return out
def npoint_terms(n, fields, nf=3, ignore=[]):
conjs = [f.conj for f in fields]
combos = combinations_with_replacement([*fields, *conjs], n)
terms = []
for combo in combos:
invs = unsimplified_invariants(*combo, ignore=ignore)
terms += invs
return terms
def clean_fields(exotics: set):
"""Returns fields with Dirac partners separated and duplicates removed"""
out = []
for f in exotics:
out.append(f)
if f.is_fermion and f.y != 0:
out.append(f.dirac_partner())
eq = lambda x, y: x.field == y.field
remove_equivalent(out, eq_func=eq)
return sorted(out)
class Lagrangian:
def __init__(self, exotics: set, interaction_terms: list):
"""Exotics is a set containing only one Dirac partner for a Dirac fermion."""
self.exotics = exotics
self.interaction_terms = interaction_terms
self.fields = clean_fields(self.exotics)
@property
def terms(self):
exotic_mass_terms = [f.mass_term for f in self.fields]
return self.interaction_terms + exotic_mass_terms
def u1_symmetries(self):
exotics = [f.field for f in self.fields]
extra_0s = [0 for _ in range(len(self.fields))]
# H Q ub db L eb
yukawas = [[1, 1, 1, 0, 0, 0], [-1, 1, 0, 1, 0, 0], [-1, 0, 0, 0, 1, 1]]
new_yukawas = [list(yuk) + extra_0s for yuk in yukawas]
exotic_indices_nonconj = {k: v for k, v in zip(exotics, range(0, len(exotics)))}
exotic_indices_conj = {
k: v for k, v in zip([f.conj for f in exotics], range(0, len(exotics)))
}
exotic_indices = {**exotic_indices_conj, **exotic_indices_nonconj}
matrix = []
for term in self.terms:
if contains(term, exotics):
matrix += [term_to_row(term, exotics, exotic_indices)]
matrix += new_yukawas
X = Matrix(matrix)
return X.nullspace()
def num_u1_symmetries(self):
return len(self.u1_symmetries())
def generate_full(self):
interaction_terms = generate_uv_terms(self.fields)
exotic_mass_terms = [f.mass_term for f in self.fields]
return Lagrangian(self.exotics, interaction_terms + exotic_mass_terms)
def term_to_row(term, exotics, exotic_indices):
sm_matter = (H, Q, ub, db, L, eb)
sm_matter_conj = [f.conj for f in sm_matter]
index_dict_nonconj = dict(zip(sm_matter, range(6)))
index_dict_conj = dict(zip(sm_matter_conj, range(6)))
index_dict = {**index_dict_conj, **index_dict_nonconj}
n_fields = len(exotics) + 6
row = np.zeros(n_fields)
for field, mult in Counter(term.fields).items():
if field in [*sm_matter, *sm_matter_conj]:
row[index_dict[field]] += mult if not field.is_conj else -mult
else:
row[6 + exotic_indices[field]] += mult if not field.is_conj else -mult
return [int(i) for i in row]
def generate_uv_terms(fields: set):
sm_matter = [H, Q, ub, db, L, eb]
all_fields = sm_matter + [f.field for f in fields]
cubic_terms = npoint_terms(3, all_fields)
quartic_terms = npoint_terms(4, all_fields)
out = []
for term in [*cubic_terms, *quartic_terms]:
if term.mass_dim <= 4:
out.append(term)
eq = lambda x, y: x.nocoeff.safe_simplify() == y.nocoeff.safe_simplify()
remove_equivalent(out, eq_func=eq)
# only keep terms that contain exotic fields
return [i for i in out if i != 0 and contains(i, [f.field for f in fields])]
def contains(term: Operator, fields: List[Field]):
term_fields = term.fields
for f in fields:
if f in term_fields or f.conj in term_fields:
return True
return False
```
#### File: neutrinomass/tensormethod/parse_hs.py
```python
from copy import copy
from functools import reduce
from itertools import product
import sympy
import neutrinomass.tensormethod.hs as hs
import neutrinomass.tensormethod.sm as sm
import neutrinomass.tensormethod.core as tm
from neutrinomass.tensormethod.contract import invariants
from neutrinomass.tensormethod.hs import X
# plug in 3 fermion generations
H7_LNV_NF3 = hs.H7_LNV.xreplace({hs.Nf: 3})
H9_LNV_NF3 = hs.H9_LNV.xreplace({hs.Nf: 3})
H11_LNV_NF3 = hs.H11_LNV.xreplace({hs.Nf: 3})
FIELD_LOOKUP = {
hs.L(X): sm.L,
hs.Ld(X): sm.L.conj,
hs.H(X): sm.H,
hs.Hd(X): sm.H.conj,
hs.Q(X): sm.Q,
hs.Qd(X): sm.Q.conj,
hs.eb(X): sm.eb,
hs.ebd(X): sm.eb.conj,
hs.ub(X): sm.ub,
hs.ubd(X): sm.ub.conj,
hs.db(X): sm.db,
hs.dbd(X): sm.db.conj,
hs.G(X): sm.G,
hs.Gb(X): sm.Gb,
hs.W(X): sm.W,
hs.Wb(X): sm.Wb,
hs.B(X): sm.B,
hs.Bb(X): sm.Bb,
}
def distribute_derivatives(expr):
"""Returns a new Hilbert Series with the derivatives distributed across each
term.
For a single term, pass it in wrapped in a list.
"""
new_terms = []
f = lambda x: x.args if not isinstance(expr, list) else x
for term in f(expr):
# derivatives will never be outside of Mul
if not isinstance(term, sympy.Mul):
new_terms.append(term)
continue
# iterate through items in a term to extract derivative order if present
for item in term.args:
if not str(item).startswith("D"):
continue
if isinstance(item, sympy.Pow):
base, power = item.args
if base == hs.D:
new_term = term / (hs.D ** power)
for _ in range(power):
new_term = new_term.diff(X)
else:
new_term = term / hs.D
new_term = new_term.diff(X)
new_terms.append(new_term)
break
else:
new_terms.append(term)
return sum(new_terms)
def is_number(expr):
return isinstance(expr, sympy.Integer) or isinstance(expr, sympy.Rational)
def is_field(expr, term):
if term:
if isinstance(expr, sympy.Pow):
expr = expr.args[0]
return isinstance(expr, sympy.Function)
def is_deriv(expr):
if isinstance(expr, sympy.Pow):
expr = expr.args[0]
return isinstance(expr, sympy.Derivative)
def is_term(expr):
return isinstance(expr, sympy.Pow) or isinstance(expr, sympy.Mul)
def proc_number(expr):
return [1]
def proc_field(expr):
if isinstance(expr, sympy.Function):
return [FIELD_LOOKUP[expr]]
if isinstance(expr, sympy.Pow):
base, power = expr.args
return [FIELD_LOOKUP[base]] * power
def proc_deriv(expr):
if isinstance(expr, sympy.Derivative):
field, (_, n) = expr.args
return [("D", n, FIELD_LOOKUP[field])]
if isinstance(expr, sympy.Pow):
base, power = expr.args
return proc_deriv(base) * power
def is_sum(expr):
return isinstance(expr, sympy.Add)
def is_symbolic_deriv(expr):
# derivatives represented by tuples:
# ("D", order, field)
return isinstance(expr, tuple)
def no_numbers(expr):
return [i for i in expr if not isinstance(i, int)]
def deriv_possibilities(field, order):
if order < 1:
return [field]
if field.is_fermion:
deltas = [(1, 1), (-1, 1), (1, -1)]
else:
deltas = [(1, 1), (-1, -1)]
dynkin_options = []
for delta_u, delta_d in deltas:
u, d = field.lorentz_irrep
sum_u = delta_u + u
sum_d = delta_d + d
if sum_u >= 0 and sum_d >= 0:
u, d = delta_u + u, delta_d + d
new_dynkin = str(u) + str(d)
dynkin_options.append(new_dynkin)
result = [deriv_possibilities(tm.D(field, d), order - 1) for d in dynkin_options]
return sympy.flatten(result)
def proc_term(expr):
flat_term = reduce(lambda x, y: x + y, expr)
# expand derivative possibilities and find invariants, return as a list
contains_deriv = False
for item in flat_term:
if is_symbolic_deriv(item):
contains_deriv = True
if not contains_deriv:
# return [invariants(*no_numbers(flat_term))]
return [no_numbers(flat_term)]
# build new lists with derivative possibilities
new_terms = [[]]
for i, item in enumerate(flat_term):
if not is_symbolic_deriv(item):
for new_term in new_terms:
new_term.append(item)
if is_symbolic_deriv(item):
_, order, field = item
possible_fields = deriv_possibilities(field, order)
new_terms = list(product(new_terms, possible_fields))
# product leaves the list a bit dirty, need to clean:
# ([ old list ], new_field) -> [*old_list, new_field]
new_terms = [[*old_list, new_field] for old_list, new_field in new_terms]
return [no_numbers(term) for term in new_terms]
def proc_sum(expr):
return reduce(lambda x, y: x + y, expr)
def parse_hs(expr, term=False):
if is_number(expr):
return proc_number(expr)
if is_field(expr, term=term):
return proc_field(expr)
if is_deriv(expr):
return proc_deriv(expr)
# recursive calls
# term is a product of fields (not power)
if is_term(expr):
args = expr.args if not isinstance(expr, sympy.Pow) else [expr]
return proc_term([parse_hs(item, term=True) for item in args])
if is_sum(expr):
return proc_sum([parse_hs(item) for item in expr.args])
raise Exception(f"Missed a case for {expr} when parsing Hilbert Series.")
def parse(hs):
"""Parses Hilbert Series into a list of lists of fields."""
return parse_hs(distribute_derivatives(hs))
``` |
{
"source": "johngathure/africastalking-python",
"score": 3
} |
#### File: africastalking-python/africastalking/Token.py
```python
import json
from Service import APIService, validate_phone
class TokenService(APIService):
def __init__(self, username, api_key):
super(TokenService, self).__init__(username, api_key)
def create_checkout_token(self, phone_number, callback=None):
if not validate_phone(phone_number):
raise ValueError('Invalid phone number')
url = self._make_url('/checkout/token/create')
headers = dict(self._headers)
data = {'phoneNumber': phone_number}
return self._make_request(url, 'POST', headers, data=data, params=None, callback=callback)
def generate_auth_token(self, callback=None):
url = self._make_url('/auth-token/generate')
headers = dict(self._headers)
headers['Content-Type'] = 'application/json'
data = json.dumps({
'username': self._username
})
return self._make_request(url, 'POST', headers, params=None, data=data, callback=callback)
```
#### File: africastalking-python/test/test_account.py
```python
import africastalking
import unittest
from test import USERNAME, API_KEY
africastalking.initialize(USERNAME, API_KEY)
service = africastalking.Account
class TestAccountService(unittest.TestCase):
def test_fetch_account(self):
res = service.fetch_account()
assert res['UserData']['balance'] is not None
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "johngatop/DeepLearning",
"score": 3
} |
#### File: 1-MLP_and_CNN/code/custom_batchnorm.py
```python
import numpy as np
import torch
import torch.nn as nn
"""
The modules/function here implement custom versions of batch normalization in PyTorch.
In contrast to more advanced implementations no use of a running mean/variance is made.
You should fill in code into indicated sections.
"""
######################################################################################
# Code for Question 3.1
######################################################################################
class CustomBatchNormAutograd(nn.Module):
"""
This nn.module implements a custom version of the batch norm operation for MLPs.
The operations called in self.forward track the history if the input tensors have the
flag requires_grad set to True. The backward pass does not need to be implemented, it
is dealt with by the automatic differentiation provided by PyTorch.
"""
def __init__(self, n_neurons, eps=1e-5):
"""
Initializes CustomBatchNormAutograd object.
Args:
n_neurons: int specifying the number of neurons
eps: small float to be added to the variance for stability
TODO:
Save parameters for the number of neurons and eps.
Initialize parameters gamma and beta via nn.Parameter
"""
super(CustomBatchNormAutograd, self).__init__()
########################
# PUT YOUR CODE HERE #
#######################
self.n_neurons = n_neurons
self.eps = eps
self.gamma = torch.nn.Parameter(torch.ones(n_neurons))
self.beta = torch.nn.Parameter(torch.zeros(n_neurons))
########################
# END OF YOUR CODE #
#######################
def forward(self, input):
"""
Compute the batch normalization
Args:
input: input tensor of shape (n_batch, n_neurons)
Returns:
out: batch-normalized tensor
TODO:
Check for the correctness of the shape of the input tensor.
Implement batch normalization forward pass as given in the assignment.
For the case that you make use of torch.var be aware that the flag unbiased=False should be set.
"""
########################
# PUT YOUR CODE HERE #
#######################
assert input.shape[1] == self.n_neurons, "The shape of the input tensor is not correct."
# compute mean
mean = input.mean(dim=0)
# compute variance
var = input.var(dim=0, unbiased=False)
# normalize
input_norm = (input-mean)/(torch.sqrt(var + self.eps))
# scale and shift
out = self.gamma*input_norm + self.beta
########################
# END OF YOUR CODE #
#######################
return out
######################################################################################
# Code for Question 3.2 b)
######################################################################################
class CustomBatchNormManualFunction(torch.autograd.Function):
"""
This torch.autograd.Function implements a functional custom version of the batch norm operation for MLPs.
Using torch.autograd.Function allows you to write a custom backward function.
The function will be called from the nn.Module CustomBatchNormManualModule
Inside forward the tensors are (automatically) not recorded for automatic differentiation since the backward
pass is done via the backward method.
The forward pass is not called directly but via the apply() method. This makes sure that the context objects
are dealt with correctly. Example:
my_bn_fct = CustomBatchNormManualFunction()
normalized = fct.apply(input, gamma, beta, eps)
"""
@staticmethod
def forward(ctx, input, gamma, beta, eps=1e-5):
"""
Compute the batch normalization
Args:
ctx: context object handling storing and retrival of tensors and constants and specifying
whether tensors need gradients in backward pass
input: input tensor of shape (n_batch, n_neurons)
gamma: variance scaling tensor, applied per neuron, shpae (n_neurons)
beta: mean bias tensor, applied per neuron, shpae (n_neurons)
eps: small float added to the variance for stability
Returns:
out: batch-normalized tensor
TODO:
Implement the forward pass of batch normalization
Store constant non-tensor objects via ctx.constant=myconstant
Store tensors which you need in the backward pass via ctx.save_for_backward(tensor1, tensor2, ...)
Intermediate results can be decided to be either recomputed in the backward pass or to be stored
for the backward pass. Do not store tensors which are unnecessary for the backward pass to save memory!
For the case that you make use of torch.var be aware that the flag unbiased=False should be set.
"""
########################
# PUT YOUR CODE HERE #
#######################
####### Forward pass of batch normalization ######
# In this section, we have to perform the forward pass of batch normalization
# with more intermediate steps, since we want to propagate error terms.
# To illustrate it better, we began from the bottom and follow our way to the top.
# In that way, we unfolded every function step by step.
# Step 3.2.3: Calculate variance
var = input.var(dim=0, unbiased=False)
# Step 3.2.2: add eps for numerical stability, then sqrt
sqrt_var = torch.sqrt(var + eps)
# Step 3.2: ivert sqrtwar
inv_sqrt_var = 1./sqrt_var
# Step 3.1.1: Calculate mean
mean = input.mean(dim=0)
# Step 3.1: subtract mean vector of every trainings example
input_mean = input - mean
# Step 3 - Execute normalization
input_norm = input_mean * inv_sqrt_var
# Step 2: Nor the two transformation steps
scaled_input_norm = gamma * input_norm
# Step 1: scale and shift
out = scaled_input_norm + beta
#################################################
# store tensors and non-tensorial constants
ctx.save_for_backward(gamma, inv_sqrt_var, mean, input)
ctx.foo = eps
########################
# END OF YOUR CODE #
#######################
return out
@staticmethod
def backward(ctx, grad_output):
"""
Compute backward pass of the batch normalization.
Args:
ctx: context object handling storing and retrival of tensors and constants and specifying
whether tensors need gradients in backward pass
Returns:
out: tuple containing gradients for all input arguments
TODO:
Retrieve saved tensors and constants via ctx.saved_tensors and ctx.constant
Compute gradients for inputs where ctx.needs_input_grad[idx] is True. Set gradients for other
inputs to None. This should be decided dynamically.
"""
########################
# PUT YOUR CODE HERE #
#######################
# Retrieve saved tensors and constants
gamma, ivar, mean, input = ctx.saved_tensors
eps = ctx.saved_tensors
# Check which inputs need gradients
input_needs_grad, gamma_needs_grad, beta_needs_grad = ctx.needs_input_grad
# Get the batch size (=N)
N, _ = grad_output.shape
# reconstruct the input_norm
input_norm = (input - mean) * ivar
grand_input_norm = grad_output * gamma
##### Gradient wrt beta #####
grad_beta = grad_output.sum(dim=0) if beta_needs_grad else None
#### Gradient wrt gamma ####
grad_gamma = (input_norm*grad_output).sum(dim=0) if gamma_needs_grad else None
#### Gradient wrt input ####
term1 = N*grand_input_norm
term2 = torch.sum(grand_input_norm, dim=0)
term3 = input_norm*torch.sum(grand_input_norm*input_norm, dim=0)
grad_input = (1. / N) * ivar * (term1 - term2 - term3) if input_needs_grad else None
########################
# END OF YOUR CODE #
#######################
# return gradients of the three tensor inputs and None for the constant eps
return grad_input, grad_gamma, grad_beta, None
######################################################################################
# Code for Question 3.2 c)
######################################################################################
class CustomBatchNormManualModule(nn.Module):
"""
This nn.module implements a custom version of the batch norm operation for MLPs.
In self.forward the functional version CustomBatchNormManualFunction.forward is called.
The automatic differentiation of PyTorch calls the backward method of this function in the backward pass.
"""
def __init__(self, n_neurons, eps=1e-5):
"""
Initializes CustomBatchNormManualModule object.
Args:
n_neurons: int specifying the number of neurons
eps: small float to be added to the variance for stability
TODO:
Save parameters for the number of neurons and eps.
Initialize parameters gamma and beta via nn.Parameter
"""
super(CustomBatchNormManualModule, self).__init__()
########################
# PUT YOUR CODE HERE #
#######################
self.n_neurons = n_neurons
self.eps = eps
self.gamma = torch.nn.Parameter(torch.ones(n_neurons))
self.beta = torch.nn.Parameter(torch.zeros(n_neurons))
########################
# END OF YOUR CODE #
#######################
def forward(self, input):
"""
Compute the batch normalization via CustomBatchNormManualFunction
Args:
input: input tensor of shape (n_batch, n_neurons)
Returns:
out: batch-normalized tensor
TODO:
Check for the correctness of the shape of the input tensor.
Instantiate a CustomBatchNormManualFunction.
Call it via its .apply() method.
"""
########################
# PUT YOUR CODE HERE #
#######################
assert input.shape[1] == self.n_neurons, "The shape of the input tensor is not correct."
bn_fct = CustomBatchNormManualFunction()
out = bn_fct.apply(input, self.gamma, self.beta, self.eps)
########################
# END OF YOUR CODE #
#######################
return out
```
#### File: 2-RNNs/part1/lstm.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import math
################################################################################
class LSTM(nn.Module):
def __init__(self, seq_length, input_dim, num_hidden, num_classes, batch_size, device='cpu'):
super(LSTM, self).__init__()
# Initialization here ...
self.seq_length = seq_length
self.input_dim = input_dim
# Initialize cell memory layer and hidden layer
self.h_init = nn.Parameter(torch.zeros(num_hidden, batch_size), requires_grad=False)
self.c_init = nn.Parameter(torch.zeros(num_hidden, batch_size), requires_grad=False)
stdv_h = 1.0 / math.sqrt(num_hidden)
stdv_c = 1.0 / math.sqrt(num_classes)
# LSTM parameters initialization w/ Xavier uniform
self.W_gx = torch.nn.Parameter(torch.randn(num_hidden, input_dim).uniform_(-stdv_h, stdv_h))
self.W_gh = torch.nn.Parameter(torch.randn(num_hidden, num_hidden).uniform_(-stdv_h, stdv_h))
self.W_ix = torch.nn.Parameter(torch.randn(num_hidden, input_dim).uniform_(-stdv_h, stdv_h))
self.W_ih = torch.nn.Parameter(torch.randn(num_hidden, num_hidden).uniform_(-stdv_h, stdv_h))
self.W_fx = torch.nn.Parameter(torch.randn(num_hidden, input_dim).uniform_(-stdv_h, stdv_h))
self.W_fh = torch.nn.Parameter(torch.randn(num_hidden, num_hidden).uniform_(-stdv_h, stdv_h))
self.W_ox = torch.nn.Parameter(torch.randn(num_hidden, input_dim).uniform_(-stdv_h, stdv_h))
self.W_oh = torch.nn.Parameter(torch.randn(num_hidden, num_hidden).uniform_(-stdv_h, stdv_h))
# LSTM biases initialization w/ zeros
self.b_g = torch.nn.Parameter(torch.zeros(num_hidden))
self.b_i = torch.nn.Parameter(torch.zeros(num_hidden))
self.b_f = torch.nn.Parameter(torch.zeros(num_hidden))
self.b_o = torch.nn.Parameter(torch.zeros(num_hidden))
# From hidden to output parameters and biases initialization w/ Xavier uniform and w/ zeros respectively
self.W_ph = torch.nn.Parameter(torch.randn(num_classes, num_hidden).uniform_(-stdv_c, stdv_c))
self.b_p = torch.nn.Parameter(torch.zeros(num_classes, 1))
def forward(self, x):
"""
x is (batch, input_size) --> (input_size x batch)
hx is ((batch, hidden_size), (batch, hidden_size))
"""
# x.shape: (batch x input_size) --> (input_size x batch)
x = x.permute(1, 0)
# Initialize hidden and cell states
h = self.h_init
c = self.c_init
# Recurent pass
for step in range(self.seq_length):
# Compute gates
g = nn.Tanh()(self.W_gx @ x[step, :].unsqueeze(0) + self.W_gh @ h + self.b_g)
i = nn.Sigmoid()(self.W_ix @ x[step, :].unsqueeze(0) + self.W_ih @ h + self.b_i)
f = nn.Sigmoid()(self.W_fx @ x[step, :].unsqueeze(0) + self.W_fh @ h + self.b_f)
o = nn.Sigmoid()(self.W_ox @ x[step, :].unsqueeze(0) + self.W_oh @ h + self.b_o)
# Update hidden and cell layers
c = g*i + c*f
h = nn.Tanh()(c)*o
# Calculate predictions
p = self.W_ph @ h + self.b_p
return p.t()
```
#### File: 2-RNNs/part1/train.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import time
from datetime import datetime
import numpy as np
import torch
from torch.utils.data import DataLoader
from dataset import PalindromeDataset
from vanilla_rnn import VanillaRNN
from lstm import LSTM
# You may want to look into tensorboardX for logging
# from tensorboardX import SummaryWriter
################################################################################
################################ Accuracy ######################################
def accuracy(predictions, targets):
""" Computes the prediction accuracy, i.e. the average of correct predictions
of the network.
"""
# Get the the probability and the predicted class for each image
top_p, top_class = predictions.topk(1, dim=1)
# Check if the predicted classes match the labels
equals = top_class == targets.view(*top_class.shape)
# Calculate the percentage of correct predictions
accuracy = torch.mean(equals.type(torch.FloatTensor)).item()
return accuracy
################################################################################
################################## Train #######################################
def train(config):
assert config.model_type in ('RNN', 'LSTM')
# Initialize the device which to run the model on
# if GPU was chosen, check if CUDA is available
if str(config.device) != "cpu":
if not torch.cuda.is_available():
print('\n* GPU was selected but CUDA is not available.\nTraining on CPU ...')
device = torch.device("cpu")
else:
print('\nCUDA is available! Training on GPU ...')
device = torch.device(config.device)
else:
print('\nTraining on GPU ...')
device = torch.device(config.device)
# Initialize the model that we are going to use
if config.model_type == 'RNN':
model = VanillaRNN(config.input_length, config.input_dim,
config.num_hidden, config.num_classes, config.batch_size, device)
else:
model = LSTM(config.input_length, config.input_dim,
config.num_hidden, config.num_classes, config.batch_size, device)
# Print Configuration
print("Model Type: {!s:5} Input Length: {!s:5} Learning Rate: {}\n"
.format(config.model_type, config.input_length, config.learning_rate))
# Initialize model
model = torch.nn.DataParallel(model).to(device)
# Initialize the dataset and data loader (note the +1)
dataset = PalindromeDataset(config.input_length+1)
data_loader = DataLoader(dataset, config.batch_size, num_workers=1)
# Setup the loss and optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.RMSprop(model.parameters(), lr=config.learning_rate)
train_loss, train_accuracy, train_steps = [], [], []
# Enable train mode
model.train()
for step, (batch_inputs, batch_targets) in enumerate(data_loader):
# Only for time measurement of step through network
t1 = time.time()
# move tensors to GPU, if enabled
batch_targets = batch_targets.long().to(device)
batch_inputs = batch_inputs.to(device)
# Forward pass
predictions = model(batch_inputs)
# Calculate loss
loss = criterion(predictions, batch_targets)
# Back-propagate
loss.backward()
############################################################################
# QUESTION: what happens here and why?
# ANSWER: `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
# ref: https://medium.com/usf-msds/deep-learning-best-practices-1-weight-initialization-14e5c0295b94
############################################################################
torch.nn.utils.clip_grad_norm(model.parameters(), max_norm=config.max_norm)
############################################################################
# Update weights
optimizer.step()
# Clear weights gradients
optimizer.zero_grad()
# Just for time measurement
t2 = time.time()
examples_per_second = config.batch_size/float(t2-t1)
if step % 10 == 0:
# Store accuracy and loss
train_steps.append(step)
train_loss.append(loss.item())
train_accuracy.append(accuracy(predictions, batch_targets))
if step % 100 == 0:
print("[{}] Train Step {:04d}/{:04d}, Batch Size = {}, Examples/Sec = {:.2f}, "
"Accuracy = {:.2f}, Loss = {:.3f}".format(
datetime.now().strftime("%Y-%m-%d %H:%M"), step,
config.train_steps, config.batch_size, examples_per_second,
train_accuracy[-1], train_loss[-1]))
if step == config.train_steps:
# If you receive a PyTorch data-loader error, check this bug report:
# https://github.com/pytorch/pytorch/pull/9655
# Save Train and Test accuracies and losses
file_name = str(config.model_type) + '_' + str(config.input_length) + '.npz'
np.savez(file_name,
train_steps=train_steps,
train_accuracy=train_accuracy,
model_type=config.model_type,
input_length=config.input_length)
break
print('Done training.')
################################################################################
if __name__ == "__main__":
# Parse training configuration
parser = argparse.ArgumentParser()
# Model params
parser.add_argument('--model_type', type=str, default="RNN", help="Model type, should be 'RNN' or 'LSTM'")
parser.add_argument('--input_length', type=int, default=20, help='Length of an input sequence')
parser.add_argument('--input_dim', type=int, default=1, help='Dimensionality of input sequence')
parser.add_argument('--num_classes', type=int, default=10, help='Dimensionality of output sequence')
parser.add_argument('--num_hidden', type=int, default=128, help='Number of hidden units in the model')
parser.add_argument('--batch_size', type=int, default=128, help='Number of examples to process in a batch')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Learning rate')
parser.add_argument('--train_steps', type=int, default=10000, help='Number of training steps')
parser.add_argument('--max_norm', type=float, default=10.0)
parser.add_argument('--device', type=str, default="cuda:0", help="Training device 'cpu' or 'cuda:0'")
config = parser.parse_args()
# Train the model
train(config)
```
#### File: 2-RNNs/part2/model.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
class TextGenerationModel(nn.Module):
def __init__(self, batch_size, seq_length, vocabulary_size,
lstm_num_hidden=256, lstm_num_layers=2, drop_prob=0.5, device='cuda:0'):
super(TextGenerationModel, self).__init__()
# Initialization here...
# Save model confgs
self.batch_size = batch_size
self.seq_length = seq_length
self.vocabulary_size = vocabulary_size
self.lstm_num_hidden = lstm_num_hidden
self.lstm_num_layers = lstm_num_layers
self.drop_prob = drop_prob
self.device = device
# Define the LSTM
self.lstm = nn.LSTM(input_size=vocabulary_size,
hidden_size=lstm_num_hidden,
num_layers=lstm_num_layers,
dropout=drop_prob,
batch_first=True)
# Define the Fully Connected output layer
self.fc = nn.Linear(lstm_num_hidden, vocabulary_size)
def one_hot_encode(self, x, vocab):
# Initialize the the encoded tensor
one_hot = torch.zeros((torch.mul(*x.shape), vocab), dtype=torch.float32).to(self.device)
# Fill the appropriate elements with ones
one_hot[torch.arange(one_hot.shape[0]), x.flatten()] = 1.
# Finally reshape it to get back to the original tensor
one_hot = one_hot.reshape((*x.shape, vocab))
return one_hot
def init_hidden(self, batch_size):
'''
Initializes hidden state.
Create two new tensors with sizes n_layers x batch_size x n_hidden,
initialized to zero, for hidden state and cell state of LSTM.
# Within the batch loop, we detach the hidden state from its history;
# this time setting it equal to a new tuple variable because an LSTM has
# a hidden state that is a tuple of the hidden and cell states.
Comments:
'next': returns the first parameter from the class.
'new' : constructs a new tensor of the same data type (as the first parameter).
'''
weight = next(self.parameters()).data
hidden = (weight.new(self.lstm_num_layers, batch_size, self.lstm_num_hidden).zero_().to(self.device),
weight.new(self.lstm_num_layers, batch_size, self.lstm_num_hidden).zero_().to(self.device))
return hidden
def forward(self, x):
""" Forward pass of model
----------------------
x.shape = batch x sequ x vocabulary_size --> 128x50x83
lstm_output.shape = batch x sequ x hidden
"""
if self.training:
# Initialized lstm hidden layers to zero, for hidden state and cell state of LSTM.
# Otherwise we'd backprop through the entire training history
self.lstm_hidden = self.init_hidden(x.shape[0])
# x to one-hot vector
x = self.one_hot_encode(x, self.vocabulary_size)
# Recurrent pass
lstm_output, self.lstm_hidden = self.lstm(x, self.lstm_hidden)
# Stack LSTM output
lstm_output = lstm_output.contiguous().view(-1, self.lstm_num_hidden)
# Forward pass from the fc layer
predictions = self.fc(lstm_output)
return predictions
```
#### File: 3-Generative_models/code/unittests.py
```python
import unittest
import numpy as np
import torch
from a3_nf_template import Coupling, Flow, get_mask
def mean_error(x, y):
return np.mean(np.abs(x - y))
def mean_rel_error(x, y):
return np.mean(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
def f_layer(layer, x, logdet):
with torch.no_grad():
z, logdet = layer(x, logdet, reverse=False)
recon, logdet = layer(z, logdet, reverse=True)
x, recon, logdet = x.numpy(), recon.numpy(), logdet.numpy()
return x, recon, logdet
class TestLayers(unittest.TestCase):
def test_flow(self):
np.random.seed(42)
error_max = 1e-5
for test_num in range(10):
N = np.random.choice(range(1, 20))
C = 784
x = torch.randn(N, C)
logdet = torch.zeros(N)
layer = Flow([C], n_flows=2)
x, recon, logdet = f_layer(layer, x, logdet)
self.assertLess(mean_rel_error(x, recon), error_max)
self.assertLess(mean_error(logdet, np.zeros(N)), error_max)
def test_coupling(self):
np.random.seed(42)
error_max = 1e-5
for test_num in range(10):
N = np.random.choice(range(1, 20))
C = 784
x = torch.randn(N, C)
logdet = torch.zeros(N)
layer = Coupling(c_in=C, mask=get_mask())
x, recon, logdet = f_layer(layer, x, logdet)
self.assertLess(mean_rel_error(x, recon), error_max)
self.assertLess(mean_error(logdet, np.zeros(N)), error_max)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestLayers)
unittest.TextTestRunner(verbosity=2).run(suite)
``` |
{
"source": "johngatop/sudoku_SAT",
"score": 4
} |
#### File: johngatop/sudoku_SAT/check_sudoku.py
```python
def check_sudoku(true_vars):
"""
Check sudoku.
:param true_vars: List of variables that your system assigned as true. Each var should be in the form of integers.
:return:
"""
import math as m
s = []
row = []
for i in range(len(true_vars)):
row.append(str(int(true_vars[i]) % 10))
if (i + 1) % 9 == 0:
s.append(row)
row = []
correct = True
for i in range(len(s)):
for j in range(len(s[0])):
for x in range(len(s)):
if i != x and s[i][j] == s[x][j]:
correct = False
print("Repeated value in column:", j)
for y in range(len(s[0])):
if j != y and s[i][j] == s[i][y]:
correct = False
print("Repeated value in row:", i)
top_left_x = int(i-i%m.sqrt(len(s)))
top_left_y = int(j-j%m.sqrt(len(s)))
for x in range(top_left_x, top_left_x + int(m.sqrt(len(s)))):
for y in range(top_left_y, top_left_y + int(m.sqrt(len(s)))):
if i != x and j != y and s[i][j] == s[x][y]:
correct = False
print("Repeated value in cell:", (top_left_x, top_left_y))
return correct
```
#### File: johngatop/sudoku_SAT/read_files.py
```python
def read_DIMACS_file(file):
with open(file, 'r') as f:
lines = f.readlines()
# Remove '\n', zeroes, last char and make a list out of it
for i in range(len(lines)):
lines[i] = lines[i].rstrip()[0:-1].split(" ")
del lines[i][-1]
return lines
def init_database(rules):
# pop the element with 'p' and 'cnf' values
if rules[0][0] == 'p':
rules.pop(0)
rules_dict, disjunction, literals_dict = {}, {}, {}
truth_values = set()
assign = '?' # we are going to make them all unknowns initially
for idx, clause in enumerate(rules):
for unknowns, literal in enumerate(clause):
temp_set = set()
literal = int(literal)
disjunction[literal] = assign
literal = abs(literal) # get and the negative position
try: # if it was already in the dictionary
assign, temp_set = literals_dict[literal]
temp_set.add(idx)
except: # if it was not, put it
temp_set.add(idx)
literals_dict[literal] = [assign, temp_set]
rules_dict[idx] = disjunction
if len(disjunction) == 1:
truth_values.add(literal)
if literal > 0:
literals_dict[literal][0] = '1'
else:
literals_dict[-literal][0] = '0'
rules_dict[idx] = disjunction
disjunction = dict()
return rules_dict, literals_dict, truth_values
############################# READ SUDOKU IN DIMACS #################################
def read_sudoku_DIMACS_file(file):
truth_values = set()
with open(file, 'r') as f:
lines = f.readlines()
# Remove '\n', zeroes, last char and make a list out of it
for i in range(len(lines)):
lines[i] = lines[i].rstrip().replace("0", "")[0:-1].split(" ")
truth_values.add(int(lines[i][0]))
return truth_values
############################# READ SUDOKU IN TXT (DOTS) #############################
def read_sudokus_file(file):
truth_values = set()
with open(file, 'r') as f:
truth_values = dict()
truth_values[1] = set()
lines = f.readlines()
# Remove '\n', zeroes, last char and make a list out of it
k = 1 # no. of sudoku
for i in range(len(lines)):
truth_values[k] = set()
sudoku = lines[i].rstrip()
i, j = 1, 1
for literal in sudoku:
if literal != '.':
truth_values[k].add(i*100 + j*10 + int(literal))
j+=1
if j == 10:
j=1
i+=1
k+=1
return truth_values
``` |
{
"source": "john-gaughan/unfurl",
"score": 2
} |
#### File: unfurl/tests/test_conditional_imports.py
```python
from click.testing import CliRunner
from unfurl.job import JobOptions, Runner
from unfurl.yamlmanifest import YamlManifest
from unfurl.localenv import LocalEnv
from .utils import init_project, print_config
ensemble = """
apiVersion: unfurl/v1alpha1
kind: Ensemble
spec:
service_template:
imports:
- file: ./gcp.yaml
when: .primary_provider[type=unfurl.relationships.ConnectsTo.GoogleCloudProject]
- file: ./aws.yaml
when: .primary_provider[type=unfurl.relationships.ConnectsTo.AWSAccount]
"""
aws_import = """
node_types:
aws:
derived_from: tosca:Root
"""
gcp_import = """
node_types:
gcp:
derived_from: tosca:Root
"""
def test_conditional_imports():
cli_runner = CliRunner()
with cli_runner.isolated_filesystem():
init_project(
cli_runner,
args=["init", "--mono", "--template=aws"],
env=dict(UNFURL_HOME=""),
)
with open("ensemble-template.yaml", "w") as f:
f.write(ensemble)
with open("aws.yaml", "w") as f:
f.write(aws_import)
with open("gcp.yaml", "w") as f:
f.write(gcp_import)
manifest = YamlManifest(localEnv=LocalEnv(".", homePath="./unfurl_home"))
# print_config(".")
assert "aws" in manifest.tosca.template.topology_template.custom_defs
assert "gcp" not in manifest.tosca.template.topology_template.custom_defs
```
#### File: unfurl/tests/test_decorators.py
```python
import unittest
from click.testing import CliRunner
from unfurl.yamlmanifest import YamlManifest
from unfurl.eval import Ref, map_value, RefContext
# expressions evaluate on tosca nodespecs (ignore validation errors)
# a compute instant that supports cloudinit and hosts a DockerComposeApp
# root __reflookup__ matches node templates by compatible type or template name
# nodes match relationships by requirement names
# relationships match source by compatible type or template name
class DecoratorTest(unittest.TestCase):
def test_decorator(self):
cliRunner = CliRunner()
with cliRunner.isolated_filesystem():
path = __file__ + "/../examples/decorators-ensemble.yaml"
manifest = YamlManifest(path=path)
ctx = RefContext(manifest.tosca.topology)
result1 = Ref("my_server::dependency::tosca.nodes.Compute").resolve(ctx)
self.assertEqual("my_server", result1[0].name)
self.assertEqual(
{"foo": "bar", "test": "annotated"},
manifest.tosca.nodeTemplates["my_server"].properties,
)
for name in ["anode", "anothernode"]:
node = manifest.tosca.nodeTemplates[name]
self.assertEqual(
{"ports": [], "private_address": "annotated", "imported": "foo"},
node.properties,
)
assert {"foo": "bar"} == (
manifest.tosca.template.tpl["topology_template"]["node_templates"][
"node3"
]["requirements"][0]["a_connection"]["relationship"]["properties"]
)
```
#### File: unfurl/tests/test_docker_cmd.py
```python
import getpass
import os
from pathlib import Path
from unfurl import __version__, version_tuple
from unfurl.__main__ import DockerCmd
class TestDockerCmd:
def test_parse_image(self):
assert DockerCmd.parse_image("docker", "0.2.1") == "onecommons/unfurl:0.2.1"
assert (
DockerCmd.parse_image("docker:unfurl_local", "0.2.1")
== "unfurl_local:0.2.1"
)
assert (
DockerCmd.parse_image("docker:onecommons/unfurl:0.2.0", "0.2.1")
== "onecommons/unfurl:0.2.0"
)
assert (
DockerCmd.parse_image("docker --privileged", "0.2.1")
== "onecommons/unfurl:0.2.1"
)
def test_parse_docker_arrgs(self):
assert DockerCmd.parse_docker_args("docker") == []
assert DockerCmd.parse_docker_args("docker:unfurl_local") == []
assert DockerCmd.parse_docker_args("docker:onecommons/unfurl:0.2.0") == []
assert DockerCmd.parse_docker_args("docker --privileged") == ["--privileged"]
assert DockerCmd.parse_docker_args("docker --privileged -e A=B") == [
"--privileged",
"-e",
"A=B",
]
def test_build(self, monkeypatch):
monkeypatch.setattr(os, "getuid", lambda: 1000)
monkeypatch.setattr(os, "getgid", lambda: 1000)
monkeypatch.setattr(getpass, "getuser", lambda: "joe")
monkeypatch.setattr(Path, "home", lambda: "/home/joe")
monkeypatch.setattr(Path, "cwd", lambda: "/home/joe/project")
cmd = DockerCmd("docker --privileged", {"ANSWER": 42}).build()
tag = "latest" if len(version_tuple()) > 3 else __version__()
assert (
" ".join(cmd)
== "docker run --rm -w /data -u 1000:1000 -e HOME=/home/joe -e USER=joe -e ANSWER=42 "
"-v /home/joe/project:/data -v /home/joe:/home/joe "
"-v /var/run/docker.sock:/var/run/docker.sock "
f"--privileged onecommons/unfurl:{tag} unfurl --no-runtime "
f"--version-check {__version__(True)}"
)
```
#### File: unfurl/tests/test_octodns.py
```python
from pathlib import Path
import os
from unittest.mock import patch
from moto import mock_route53
from unfurl.job import JobOptions, Runner
from unfurl.support import Status
from unfurl.yamlmanifest import YamlManifest
from .utils import lifecycle, DEFAULT_STEPS, Step
class TestOctoDnsConfigurator:
@mock_route53
def test_configure(self):
runner = Runner(YamlManifest(ENSEMBLE_ROUTE53))
job = runner.run(JobOptions(workflow="deploy"))
assert job.status == Status.ok
assert not job.unexpectedAbort, job.unexpectedAbort.get_stack_trace()
node = job.rootResource.find_resource("test_node")
assert node.attributes["zone"][""]["type"] == "A"
assert node.attributes["zone"][""]["values"] == [
"2.3.4.5",
"2.3.4.6",
]
assert node.attributes["zone"]["www"]["values"] == [
"2.3.4.5",
"2.3.4.6",
]
@mock_route53
def test_relationships(self):
runner = Runner(YamlManifest(ENSEMBLE_WITH_RELATIONSHIPS))
job = runner.run(JobOptions(workflow="deploy"))
assert job.status == Status.ok
assert not job.unexpectedAbort, job.unexpectedAbort.get_stack_trace()
node = job.rootResource.find_resource("test_zone")
assert node
assert node.attributes["zone"]["www"]["type"] == "A"
assert node.attributes["zone"]["www"]["value"] == "10.10.10.1"
assert node.attributes["managed_records"]["www"]["value"] == "10.10.10.1"
# if the compute ip address changeses (here via check), the zone should be updated
try:
os.environ["OCTODNS_TEST_IP"] = "10.10.10.2"
job = runner.run(JobOptions(workflow="check"))
finally:
del os.environ["OCTODNS_TEST_IP"]
assert job.status == Status.ok
assert not job.unexpectedAbort, job.unexpectedAbort.get_stack_trace()
compute = job.rootResource.find_resource("compute")
assert compute
assert compute.attributes["public_address"] == "10.10.10.2"
node = job.rootResource.find_resource("test_zone")
assert node.status == Status.error # it's now out of sync
assert node.attributes["zone"]["www"]["value"] == "10.10.10.1"
assert node.attributes["managed_records"]["www"]["value"] == "10.10.10.2"
job = runner.run(JobOptions(workflow="undeploy"))
assert job.status == Status.ok
assert not job.unexpectedAbort, job.unexpectedAbort.get_stack_trace()
node = job.rootResource.find_resource("test_zone")
assert dict(node.attributes["zone"]) == {}
@mock_route53
def test_lifecycle_relationships(self):
manifest = YamlManifest(ENSEMBLE_WITH_RELATIONSHIPS)
steps = list(DEFAULT_STEPS)
# steps[0] = Step("check", Status.ok)
jobs = lifecycle(manifest, steps)
for job in jobs:
assert job.status == Status.ok, job.workflow
@mock_route53
def test_delete(self):
runner = Runner(YamlManifest(ENSEMBLE_ROUTE53))
job = runner.run(JobOptions(workflow="deploy"))
assert job.status == Status.ok
assert not job.unexpectedAbort, job.unexpectedAbort.get_stack_trace()
node = job.rootResource.find_resource("test_node")
assert node and len(node.attributes["zone"]) == 2
job = runner.run(JobOptions(workflow="undeploy"))
assert job.status == Status.ok
assert not job.unexpectedAbort, job.unexpectedAbort.get_stack_trace()
node = job.rootResource.find_resource("test_node")
assert dict(node.attributes["zone"]) == {}
@mock_route53
def test_check(self):
runner = Runner(YamlManifest(ENSEMBLE_ROUTE53))
runner.run(JobOptions(workflow="deploy"))
job = runner.run(JobOptions(workflow="check"))
assert job.status == Status.ok
task = list(job.workDone.values())[0]
# this means that dns records were correctly set during deploy:
assert task.target_status == Status.ok
assert task.result.result == "DNS records in sync"
@mock_route53
def test_lifecycle(self):
manifest = YamlManifest(ENSEMBLE_ROUTE53)
jobs = lifecycle(manifest)
for job in jobs:
assert job.status == Status.ok, job.workflow
@patch("unfurl.configurators.dns.Manager.sync")
def test_exclusive(self, manager_sync):
runner = Runner(YamlManifest(ENSEMBLE_EXCLUSIVE))
job = runner.run(JobOptions(workflow="deploy"))
assert job.status == Status.ok
node = job.rootResource.find_resource("test_node")
# records are replaced by instance
assert len(node.attributes["zone"]) == 1
assert manager_sync.called
@mock_route53
def test_lifecycle_exclusive(self):
manifest = YamlManifest(
ENSEMBLE_ROUTE53.replace("exclusive: false", "exclusive: true")
)
jobs = lifecycle(manifest)
for job in jobs:
assert job.rootResource.find_resource("test_node").attributes["exclusive"]
assert job.status == Status.ok, job.workflow
DNS_FIXTURE = Path(__file__).parent / "fixtures" / "dns"
ENSEMBLE_ROUTE53 = """
apiVersion: unfurl/v1alpha1
kind: Ensemble
spec:
service_template:
imports:
- repository: unfurl
file: configurators/dns-template.yaml
topology_template:
node_templates:
test_node:
type: unfurl.nodes.DNSZone
properties:
name: test-domain.com.
exclusive: false
provider:
class: octodns.provider.route53.Route53Provider
access_key_id: my_AWS_ACCESS_KEY_ID
secret_access_key: my_AWS_SECRET_ACCESS_KEY
records:
'':
ttl: 60
type: A
values:
- 172.16.58.3
- 192.168.127.12
www:
type: A
values:
- 172.16.58.3
- 192.168.127.12
"""
ENSEMBLE_EXCLUSIVE = f"""
apiVersion: unfurl/v1alpha1
kind: Ensemble
spec:
service_template:
imports:
- repository: unfurl
file: configurators/dns-template.yaml
topology_template:
node_templates:
test_node:
type: unfurl.nodes.DNSZone
properties:
name: test-domain.com.
exclusive: true
provider:
class: octodns.source.axfr.ZoneFileSource
directory: {DNS_FIXTURE}
file_extension: .tst
records:
'':
type: A
values:
- 172.16.58.3
- 192.168.127.12
"""
ENSEMBLE_WITH_RELATIONSHIPS = """
apiVersion: unfurl/v1alpha1
kind: Ensemble
spec:
service_template:
imports:
- repository: unfurl
file: configurators/dns-template.yaml
decorators:
tosca.nodes.WebServer::dns:
relationship:
type: unfurl.relationships.DNSRecords
properties:
records:
www:
type: A
value:
q:
eval: .source::.requirements::[.name=host]::.target::public_address
topology_template:
node_templates:
test_zone:
type: unfurl.nodes.DNSZone
properties:
name: test-domain.com.
provider:
class: octodns.provider.route53.Route53Provider
access_key_id: my_AWS_ACCESS_KEY_ID
secret_access_key: my_AWS_SECRET_ACCESS_KEY
test_app:
type: tosca.nodes.WebServer
requirements:
- host: compute
- dns:
node: test_zone
compute:
type: tosca.nodes.Compute
interfaces:
Install:
operations:
check:
inputs:
done:
status: "{%if '.status' | eval == 4 %}absent{%endif%}"
Standard:
operations:
create:
delete:
inputs:
done:
status: absent
defaults:
implementation: Template
inputs:
done:
status: ok
resultTemplate: |
- name: .self
attributes:
public_address: {get_env: [OCTODNS_TEST_IP, 10.10.10.1]}
"""
```
#### File: unfurl/unfurl/init.py
```python
import datetime
import os
import os.path
import random
import shutil
import string
import sys
import uuid
import logging
from jinja2.loaders import FileSystemLoader
from pathlib import Path
from . import DefaultNames, __version__, get_home_config_path, is_version_unreleased
from .localenv import LocalEnv, Project, LocalConfig
from .repo import GitRepo, Repo, is_url_or_git_path, split_git_url, commit_secrets
from .util import UnfurlError
from .yamlloader import make_yaml, make_vault_lib
_templatePath = os.path.join(os.path.abspath(os.path.dirname(__file__)), "templates")
def rename_for_backup(dir):
ctime = datetime.datetime.fromtimestamp(os.stat(dir).st_ctime)
new = dir + "." + ctime.strftime("%Y-%m-%d-%H-%M-%S")
os.rename(dir, new)
return new
def get_random_password(count=12, prefix="uv", extra=None):
srandom = random.SystemRandom()
start = string.ascii_letters + string.digits
if extra is None:
extra = "%&()*+,-./:<>?=@^_`~"
source = string.ascii_letters + string.digits + extra
return prefix + "".join(
srandom.choice(source if i else start) for i in range(count)
)
def _write_file(folder, filename, content):
if not os.path.isdir(folder):
os.makedirs(os.path.normpath(folder))
filepath = os.path.join(folder, filename)
with open(filepath, "w") as f:
f.write(content)
return filepath
def write_template(folder, filename, template, vars, templateDir=None):
from .eval import RefContext
from .runtime import NodeInstance
from .support import apply_template
if templateDir and not (os.path.isabs(templateDir) or templateDir[0] == "."):
# built-in template
templateDir = os.path.join(_templatePath, templateDir)
if templateDir:
searchPath = [templateDir, _templatePath]
else:
searchPath = [_templatePath]
if not templateDir or not os.path.exists(os.path.join(templateDir, template)):
# use default file if missing from templateDir
templateDir = _templatePath
with open(os.path.join(templateDir, template)) as f:
source = f.read()
instance = NodeInstance()
instance._baseDir = _templatePath
overrides = dict(loader=FileSystemLoader(searchPath))
content = apply_template(source, RefContext(instance, vars), overrides)
return _write_file(folder, filename, content)
def write_project_config(
projectdir,
filename=DefaultNames.LocalConfig,
templatePath=DefaultNames.LocalConfig + ".j2",
vars=None,
templateDir=None,
):
_vars = dict(include="", manifestPath=None)
if vars:
_vars.update(vars)
return write_template(projectdir, filename, templatePath, _vars, templateDir)
def create_home(
home=None, render=False, replace=False, runtime=None, no_runtime=None, **kw
):
"""
Create the home project if missing
"""
homePath = get_home_config_path(home)
if not homePath:
return None
exists = os.path.exists(homePath)
if exists and not replace:
return None
homedir, filename = os.path.split(homePath)
if render: # just render
repo = Repo.find_containing_repo(homedir)
# XXX if repo and update: git stash; git checkout rendered
ensembleDir = os.path.join(homedir, DefaultNames.EnsembleDirectory)
ensembleRepo = Repo.find_containing_repo(ensembleDir)
configPath, ensembleDir, password_vault = render_project(
homedir, repo, ensembleRepo, None, "home"
)
# XXX if repo and update: git commit -m"updated"; git checkout master; git stash pop
return configPath
else:
if exists:
rename_for_backup(homedir)
newHome, configPath, repo = create_project(
homedir,
template="home",
runtime=runtime or "venv:",
no_runtime=no_runtime,
msg="Create the unfurl home repository",
creating_home=True,
)
if repo:
repo.repo.git.branch("rendered") # now create a branch
return configPath
def _create_repo(gitDir, ignore=True):
import git
if not os.path.isdir(gitDir):
os.makedirs(gitDir)
repo = git.Repo.init(gitDir)
repo.index.add(add_hidden_git_files(gitDir))
repo.index.commit(f"Initial Commit for {uuid.uuid1()}")
if ignore:
Repo.ignore_dir(gitDir)
return GitRepo(repo)
def write_service_template(projectdir):
from .tosca import TOSCA_VERSION
vars = dict(version=TOSCA_VERSION)
return write_template(
projectdir, "service-template.yaml", "service-template.yaml.j2", vars
)
def write_ensemble_manifest(
destDir, manifestName, specRepo, specDir=None, extraVars=None, templateDir=None
):
if specDir:
specDir = os.path.abspath(specDir)
else:
specDir = ""
vars = dict(specRepoUrl=specRepo.get_url_with_path(specDir))
if extraVars:
vars.update(extraVars)
return write_template(destDir, manifestName, "manifest.yaml.j2", vars, templateDir)
def add_hidden_git_files(gitDir):
# write .gitignore and .gitattributes
gitIgnorePath = write_template(gitDir, ".gitignore", "gitignore.j2", {})
gitAttributesContent = (
f"**/*{DefaultNames.JobsLog} merge=union\n*.remotelock lockable\n"
)
gitAttributesPath = _write_file(gitDir, ".gitattributes", gitAttributesContent)
return [os.path.abspath(gitIgnorePath), os.path.abspath(gitAttributesPath)]
def _set_ensemble_vars(vars, externalProject, ensemblePath, context):
if externalProject:
vars["manifestPath"] = externalProject.get_relative_path(ensemblePath)
vars["external"] = externalProject.name
vars["context"] = context
def _warn_about_new_password(localProjectConfig):
logger = logging.getLogger("unfurl")
logger.warning(
"A password was generated and included in the local config file at %s -- "
"please keep this password safe, without it you will not be able to decrypt any encrypted files "
"committed to the repository.",
localProjectConfig,
)
def render_project(
projectdir,
repo,
ensembleRepo,
homePath,
templateDir=None,
names=DefaultNames,
use_context=None,
mono=False,
):
"""
Creates a folder named `projectdir` with a git repository with the following files:
unfurl.yaml
local/unfurl.yaml
ensemble-template.yaml
ensemble/ensemble.yaml
Returns the absolute path to unfurl.yaml
"""
assert os.path.isabs(projectdir), projectdir + " must be an absolute path"
# write the project files
localConfigFilename = names.LocalConfig
externalProject = None
ensembleDir = os.path.join(projectdir, names.EnsembleDirectory)
if ensembleRepo:
if ensembleRepo.working_dir not in projectdir:
externalProject = find_project(ensembleRepo.working_dir, homePath)
if externalProject:
dirname, ensembleDirName = os.path.split(projectdir)
if ensembleDirName == DefaultNames.ProjectDirectory:
ensembleDirName = os.path.basename(dirname)
relPath = externalProject.get_relative_path(
os.path.join(ensembleRepo.working_dir, ensembleDirName)
)
ensembleDir = externalProject.get_unique_path(relPath)
manifestName = names.Ensemble
ensemblePath = os.path.join(ensembleDir, manifestName)
vaultpass = get_random_password()
# use project name plus a couple of random digits to avoid collisions
vaultid = (
Project.get_name_from_dir(projectdir) + get_random_password(2, "", "").upper()
)
vars = dict(vaultpass=vaultpass, vaultid=vaultid)
# only commit external ensembles references if we are creating a mono repo
# otherwise record them in the local config:
localExternal = use_context and externalProject and not mono
if ensembleRepo and (ensembleRepo.is_local_only() or localExternal):
_set_ensemble_vars(vars, externalProject, ensemblePath, use_context)
if localExternal:
# since this is specified while creating the project set this as the default context
vars["default_context"] = use_context
localProjectConfig = write_project_config(
os.path.join(projectdir, "local"),
localConfigFilename,
"unfurl.local.yaml.j2",
vars,
templateDir,
)
_warn_about_new_password(localProjectConfig)
write_project_config(
os.path.join(projectdir, "secrets"),
names.SecretsConfig,
"secrets.yaml.j2",
vars,
templateDir,
)
localInclude = "+?include-local: " + os.path.join("local", localConfigFilename)
secretsInclude = "+?include-secrets: " + os.path.join(
"secrets", names.SecretsConfig
)
# note: local overrides secrets
vars = dict(include=secretsInclude + "\n" + localInclude, vaultid=vaultid)
if use_context and not localExternal:
# since this is specified while creating the project set this as the default context
vars["default_context"] = use_context
if ensembleRepo and not (ensembleRepo.is_local_only() or localExternal):
_set_ensemble_vars(vars, externalProject, ensemblePath, use_context)
projectConfigPath = write_project_config(
projectdir,
names.LocalConfig,
"unfurl.yaml.j2",
vars,
templateDir,
)
write_project_config(
projectdir,
names.LocalConfigTemplate,
"local-unfurl-template.yaml.j2",
vars,
templateDir,
)
# write ensemble-template.yaml
write_template(
projectdir,
names.EnsembleTemplate,
"manifest-template.yaml.j2",
{},
templateDir,
)
if ensembleRepo:
extraVars = dict(
ensembleUri=ensembleRepo.get_url_with_path(ensemblePath),
# include the ensembleTemplate in the root of the specDir
ensembleTemplate=names.EnsembleTemplate,
)
# write ensemble/ensemble.yaml
write_ensemble_manifest(
ensembleDir,
manifestName,
repo,
projectdir,
extraVars=extraVars,
templateDir=templateDir,
)
if externalProject:
# add the external project to the project and localRepositories configuration sections
# split repos should not have references to ensembles
# so register it with the local project config if not a mono repo
configPath = localProjectConfig if localExternal else projectConfigPath
LocalConfig(configPath).register_project(externalProject)
externalProject.register_ensemble(
ensemblePath, managedBy=find_project(projectdir, homePath)
)
return projectConfigPath, ensembleDir, make_vault_lib(vaultpass, vaultid)
def _find_project_repo(projectdir):
repo = Repo.find_containing_repo(projectdir)
if not repo:
raise UnfurlError("Could not find an existing repository")
if not repo.repo.head.is_valid():
raise UnfurlError(
"Existing repository is empty: unable to create project in empty git repositories"
)
return repo
def _find_ensemble_repo(projectdir, shared, submodule, ensemble_name):
if shared:
ensembleRepo = Repo.find_containing_repo(shared)
if not ensembleRepo:
raise UnfurlError("can not find shared repository " + shared)
else:
ensembleDir = os.path.join(projectdir, ensemble_name)
ensembleRepo = _create_repo(ensembleDir, not submodule)
return ensembleRepo
def _commit_repos(projectdir, repo, ensembleRepo, shared, kw, ensembleDir, newHome):
if ensembleRepo:
ensembleRepo.add_all(ensembleDir)
if shared:
message = "Adding ensemble"
else:
message = "Default ensemble repository boilerplate"
ensembleRepo.repo.index.commit(message)
if kw.get("submodule"):
repo.add_sub_module(ensembleDir)
if not newHome and not kw.get("no_runtime") and kw.get("runtime"):
# if runtime was explicitly set and we aren't creating the home project
# then initialize the runtime here
try:
init_engine(projectdir, kw.get("runtime"))
except:
pass # don't stop even if this fails
repo.add_all(projectdir)
repo.repo.index.commit(kw.get("msg") or "Create a new Unfurl project")
def _get_shared(kw, homePath):
shared = kw.get("shared_repository")
if shared:
return shared
context = kw.get("use_environment")
if context and homePath:
homeProject = find_project(homePath, None)
assert homeProject
return homeProject.get_default_project_path(context)
return None
def create_project(
projectdir,
ensemble_name=None,
home=None,
mono=False,
existing=False,
empty=False,
template=None,
creating_home=False,
**kw,
):
create_context = kw.get("create_environment")
use_context = kw.get("use_environment")
if existing:
repo = _find_project_repo(projectdir)
else:
repo = None
if create_context:
# set context to the project name
create_context = os.path.basename(projectdir)
# defaults for a repository for an entire context
mono = True
if not ensemble_name:
empty = True
names = DefaultNames(EnsembleDirectory=ensemble_name)
newHome = ""
homePath = get_home_config_path(home)
# don't try to create the home project if we are already creating the home project
if (
not creating_home
and homePath is not None
and projectdir != os.path.dirname(homePath)
):
# create the home project (but only if it doesn't exist already)
newHome = create_home(
home, runtime=kw.get("runtime"), no_runtime=kw.get("no_runtime")
)
if repo:
add_hidden_git_files(projectdir)
else:
repo = _create_repo(projectdir)
shared = _get_shared(kw, homePath)
submodule = kw.get("submodule")
if mono and not shared:
ensembleRepo = repo
else:
ensembleRepo = _find_ensemble_repo(
projectdir, shared, submodule, names.EnsembleDirectory
)
projectConfigPath, ensembleDir, password_vault = render_project(
projectdir,
repo,
not empty and ensembleRepo,
homePath,
template,
names,
create_context or use_context,
mono,
)
if homePath and create_context:
newProject = find_project(projectConfigPath, homePath)
assert newProject
homeProject = newProject.parentProject
assert homeProject
homeProject.localConfig.register_project(newProject, create_context)
if password_vault:
yaml = make_yaml(password_vault)
commit_secrets(os.path.dirname(projectConfigPath), yaml)
_commit_repos(
projectdir,
repo,
not mono and ensembleRepo,
shared,
kw,
ensembleDir,
newHome,
)
return newHome, projectConfigPath, repo
def clone_local_repos(manifest, sourceProject, targetProject):
# We need to clone repositories that are local to the source project
# otherwise we won't be able to find them
for repoView in manifest.repositories.values():
repoSpec = repoView.repository
if repoSpec.name == "self":
continue
repo = sourceProject.find_git_repo_from_repository(repoSpec)
if repo:
targetProject.find_or_clone(repo)
def _create_ensemble_repo(manifest, repo):
destDir = os.path.dirname(manifest.manifest.path)
if not repo:
repo = _create_repo(destDir)
elif not os.path.isdir(destDir):
os.makedirs(destDir)
manifest.metadata["uri"] = repo.get_url_with_path(manifest.manifest.path)
with open(manifest.manifest.path, "w") as f:
manifest.dump(f)
repo.repo.index.add([manifest.manifest.path])
repo.repo.index.commit("Default ensemble repository boilerplate")
return repo
def _looks_like(path, name):
# in case path is a directory:
if os.path.isfile(os.path.join(path, name)):
return path, name
if path.endswith(name): # name is explicit so don't need to check if file exists
return os.path.split(path)
return None
def _get_ensemble_paths(sourcePath, sourceProject):
"""
Returns either a pointer to the ensemble to clone
or a dict of variables to pass to an ensemble template to create a new one
if sourcePath doesn't exist, return {}
look for an ensemble given sourcePath (unless sourcePath looks like a service template)
if that fails look for (ensemble-template, service-template) if sourcePath is a directory
otherwise
return {}
"""
template = None
relPath = sourcePath or "."
if not os.path.exists(relPath):
raise UnfurlError(
f'Given clone source "{os.path.abspath(relPath)}" does not exist.'
)
# we only support cloning TOSCA service templates if their names end in "service-template.yaml"
isServiceTemplate = sourcePath.endswith(DefaultNames.ServiceTemplate)
if not isServiceTemplate:
try:
localEnv = LocalEnv(relPath, project=sourceProject)
sourceDir = sourceProject.get_relative_path(
os.path.dirname(localEnv.manifestPath)
)
# note: if sourceDir.startswith("..") then ensemble lives in another's project's repo
return dict(sourceDir=sourceDir, localEnv=localEnv)
except UnfurlError:
# XXX if UnfurlError is "could not find external project", reraise
pass
# didn't find the specified file (or the default ensemble if none was specified)
# so if sourcePath was a directory try for one of the default template files
if isServiceTemplate or os.path.isdir(relPath):
# look for an ensemble-template or service-template in source path
if os.path.isdir(os.path.join(sourcePath, DefaultNames.ProjectDirectory)):
sourcePath = os.path.join(sourcePath, DefaultNames.ProjectDirectory)
template = _looks_like(sourcePath, DefaultNames.EnsembleTemplate)
if template:
sourceDir = sourceProject.get_relative_path(template[0])
return dict(sourceDir=sourceDir, ensembleTemplate=template[1])
template = _looks_like(sourcePath, DefaultNames.ServiceTemplate)
if template:
sourceDir = sourceProject.get_relative_path(template[0])
return dict(sourceDir=sourceDir, serviceTemplate=template[1])
# nothing valid found
return {}
def _create_ensemble_from_template(templateVars, project, destDir, manifestName):
from unfurl import yamlmanifest
assert project
sourceDir = os.path.normpath(
os.path.join(project.projectRoot, templateVars["sourceDir"])
)
specRepo, relPath, revision, bare = project.find_path_in_repos(sourceDir)
if not specRepo:
raise UnfurlError(
'"%s" is not in a git repository. Cloning from plain file directories not yet supported'
% os.path.abspath(sourceDir)
)
manifestPath = write_ensemble_manifest(
os.path.join(project.projectRoot, destDir),
manifestName,
specRepo,
sourceDir,
templateVars,
)
localEnv = LocalEnv(manifestPath, project=project)
manifest = yamlmanifest.ReadOnlyManifest(localEnv=localEnv)
return localEnv, manifest
def find_project(source, home_path):
sourceRoot = Project.find_path(source)
if sourceRoot:
if home_path:
return Project(sourceRoot, Project(home_path))
return Project(sourceRoot)
return None
def _get_context_and_shared_repo(project, options):
# when creating ensemble, get the default project for the given context if set
# XXX if not --new-repository
shared_repo = None
shared = options.get("shared_repository")
context = options.get("use_environment")
if not context:
context = project.get_default_context()
if not shared and context:
shared = project.get_default_project_path(context)
if shared:
shared_repo = Repo.find_containing_repo(shared)
if not shared_repo:
raise UnfurlError("can not find shared repository " + shared)
return context, shared_repo
class EnsembleBuilder:
def __init__(self, source: str, ensemble_name: str, options: dict):
# user specified url or path
self.input_source = source
self.options = options
self.ensemble_name = ensemble_name
self.mono = options.get("mono") or options.get("existing")
self.home_path = get_home_config_path(options.get("home"))
self.source_project = None # step 1
self.source_path = None # step 1 relative path in source_project
self.templateVars = None # step 2
self.environment = None # step 2 environment name
self.shared_repo = None # step 2
self.dest_project = None # step 3
self.dest_path = None # step 3 relative path in dest_project
self.manifest = None # final step
def create_project_from_ensemble(self, dest):
# XXX create a new project from scratch for the ensemble
# if os.path.exists(dest) and os.listdir(dest):
# raise UnfurlError(
# 'Can not create a project in "%s": folder is not empty' % dest
# )
# newHome, projectConfigPath, repo = createProject(
# dest, empty=True, **options
# )
# return Project(projectConfigPath)
raise UnfurlError(
f"Can't clone \"{self.input_source}\": it isn't in an Unfurl project or repository"
)
def configure(self):
assert not self.templateVars
# source is a path into the project relative to the current directory
source_path = os.path.join(self.source_project.projectRoot, self.source_path)
self.templateVars = _get_ensemble_paths(
source_path,
self.source_project,
)
(self.environment, self.shared_repo) = _get_context_and_shared_repo(
self.source_project, self.options
)
@staticmethod
def _get_ensemble_dir(targetPath):
assert not os.path.isabs(targetPath)
if not targetPath or targetPath == ".":
destDir, manifestName = (
DefaultNames.EnsembleDirectory,
DefaultNames.Ensemble,
)
elif targetPath.endswith(".yaml") or targetPath.endswith(".yml"):
destDir, manifestName = os.path.split(targetPath)
else:
destDir = targetPath
manifestName = DefaultNames.Ensemble
return destDir, manifestName
def create_new_ensemble(self):
"""
If "localEnv" is in templateVars, clone that ensemble;
otherwise create one from a template with templateVars
"""
from unfurl import yamlmanifest
if self.shared_repo:
destProject = find_project(self.shared_repo.working_dir, self.home_path)
assert destProject
else:
destProject = self.dest_project
assert destProject
assert self.templateVars
assert not self.manifest
assert self.dest_path is not None
destDir, manifestName = self._get_ensemble_dir(self.dest_path)
# choose a destDir that doesn't conflict with an existing folder
# (i.e. if default ensemble already exists)
destDir = destProject.get_unique_path(destDir)
# destDir is now absolute
targetPath = os.path.normpath(os.path.join(destDir, manifestName))
assert (not self.shared_repo) or targetPath.startswith(
self.shared_repo.working_dir
), (
targetPath,
self.shared_repo.working_dir,
)
templateVars = self.templateVars
if "localEnv" not in templateVars:
# we found a template file to clone
localEnv, manifest = _create_ensemble_from_template(
self.templateVars, destProject, destDir, manifestName
)
else:
# didn't find a template file
# look for an ensemble at the given path or use the source project's default
localEnv = templateVars["localEnv"]
manifest = yamlmanifest.clone(localEnv, targetPath)
_create_ensemble_repo(
manifest,
self.shared_repo or self.mono and self.dest_project.project_repoview.repo,
)
if destProject.projectRoot != self.dest_project.projectRoot:
# cross reference each other
destProject.register_ensemble(
manifest.path, managedBy=self.dest_project, context=self.environment
)
self.dest_project.register_ensemble(
manifest.path, project=destProject, context=self.environment
)
else:
destProject.register_ensemble(manifest.path, context=self.environment)
self.manifest = manifest
return destDir
def clone_local_project(self, sourceProject, dest_dir):
# clone the source project's git repo
self.source_path = sourceProject.get_relative_path(self.input_source)
assert not self.source_path.startswith(
".."
), f"{self.source_path} should be inside the project"
newrepo = sourceProject.project_repoview.repo.clone(dest_dir)
search = os.path.join(
dest_dir, sourceProject.project_repoview.path, self.source_path
)
self.source_project = find_project(search, self.home_path)
assert (
self.source_project
), f"project not found in {search}, cloned to {newrepo.working_dir}"
return self.source_project
def clone_remote_project(self, destDir):
# check if source is a git url
repoURL, filePath, revision = split_git_url(self.input_source)
if os.path.exists(destDir) and os.listdir(destDir):
raise UnfurlError(
f'Can not clone project into "{destDir}": folder is not empty'
)
# #lone the remote repo to destDir
Repo.create_working_dir(repoURL, destDir, revision)
targetDir = os.path.join(destDir, filePath)
sourceRoot = Project.find_path(targetDir)
if not sourceRoot:
raise UnfurlError(
f'Error: cloned "{self.input_source}" to "{destDir}" but couldn\'t find an Unfurl project'
)
self.source_project = find_project(sourceRoot, self.home_path)
# set source to point to the cloned project
self.source_path = self.source_project.get_relative_path(targetDir)
return self.source_project
def set_dest_project_and_path(
self, existingSourceProject, existingDestProject, dest
):
assert self.dest_project is None
new_project = self.source_project is not existingSourceProject
if existingDestProject:
# set that as the dest_project
self.dest_project = existingDestProject
if existingSourceProject is not existingDestProject and new_project:
# we cloned a new source project inside of an existing project
# add the cloned project's repo to the currentProject so we can find it later
# to set it as the ensemble's spec repository
existingDestProject.workingDirs[
self.source_project.projectRoot
] = self.source_project.project_repoview
# path from dest to source
else:
# otherwise set source_project as the dest_project
self.dest_project = self.source_project
if new_project:
# finishing creating the new project
# create local/unfurl.yaml in the new project
_create_local_config(self.source_project)
# set "" as dest because we already "consumed" dest by cloning the project to that location
dest = ""
if os.path.isabs(dest):
relDestDir = self.dest_project.get_relative_path(dest)
assert not relDestDir.startswith(".."), relDestDir
else:
relDestDir = dest.lstrip(".")
if (
self.ensemble_name
and self.ensemble_name != DefaultNames.EnsembleDirectory
or relDestDir == "."
or not relDestDir
):
relDestDir = self.ensemble_name
self.dest_path = relDestDir
def set_existing_ensemble(self, sourceProject):
from unfurl import yamlmanifest
if self.source_project is not sourceProject and not self.shared_repo:
if "localEnv" in self.templateVars and os.path.exists(
Path(self.dest_project.projectRoot) / self.dest_path
):
# the ensemble is already part of the source project repository or a submodule
localEnv = self.templateVars["localEnv"]
self.manifest = yamlmanifest.ReadOnlyManifest(localEnv=localEnv)
return self.manifest
return None
def set_source(self, sourceProject):
self.source_project = sourceProject
# make source relative to the source project
source_path = sourceProject.get_relative_path(self.input_source)
assert not source_path.startswith("..")
self.source_path = source_path
def set_ensemble(self, isRemote, existingSourceProject, existingDestProject):
sourceWasCloned = self.source_project is not existingSourceProject
destIsNew = not existingDestProject
if destIsNew and self.set_existing_ensemble(existingSourceProject):
# if dest_project is new (we just cloned it)
# check if we cloned the ensemble already
# if so we done, we don't need to create a new one
return (
"Cloned project with a pre-existing ensemble to "
+ self.dest_project.projectRoot
)
if not self.templateVars:
# source wasn't pointing to an ensemble to clone
if sourceWasCloned:
# but we cloned a project
return "Cloned empty project to " + self.dest_project.projectRoot
else:
# can't find anything to do, so raise an error
raise UnfurlError(
f'Can\'t find anything to clone in "{self.input_source}"'
)
destDir = self.create_new_ensemble()
assert self.manifest
if not isRemote and existingSourceProject is not self.source_project:
# we need to clone the referenced local repos so the new project has access to them
clone_local_repos(self.manifest, existingSourceProject, self.source_project)
return f'Created new ensemble at "{os.path.abspath(destDir)}"'
def clone(source, dest, ensemble_name=DefaultNames.EnsembleDirectory, **options):
"""
Clone the ``source`` ensemble to ``dest``. If ``dest`` isn't in a project, create one.
``source`` can point to an ensemble_template, a service_template, an existing ensemble
or a folder containing one of those. If it points to a project its default ensemble will be cloned.
Referenced `repositories` will be cloned if a git repository or copied if a regular file folder,
If the folders already exist they will be copied to new folder unless the git repositories have the same HEAD.
but the local repository names will remain the same.
======================= =============================================
dest result
======================= =============================================
Inside source project new ensemble
missing or empty folder clone project, new or cloned ensemble
another project new or cloned ensemble with source as spec
non-empty folder error
======================= =============================================
"""
if not dest:
dest = Repo.get_path_for_git_repo(source) # choose dest based on source url
# XXX else: # we're assuming dest is directory, handle case where filename is included
builder = EnsembleBuilder(source, ensemble_name, options)
currentProject = find_project(dest, builder.home_path)
### step 1: clone the source repository and set the the source path
sourceProject = None
isRemote = is_url_or_git_path(source)
if isRemote:
builder.clone_remote_project(dest)
else:
sourceProject = find_project(source, builder.home_path)
if not sourceProject:
# source wasn't in a project
# XXX currently just raises error
return builder.create_project_from_ensemble(dest)
relDestDir = sourceProject.get_relative_path(dest)
if relDestDir.startswith(".."):
# dest is outside the source project, so clone the source project
builder.clone_local_project(sourceProject, dest)
else:
# dest is in the source project
# so don't need to clone, just need to create an ensemble
builder.set_source(sourceProject)
assert builder.source_project
##### step 2: examine source for template details and determine destination project
builder.configure()
##### step 3: create destination project if neccessary
builder.set_dest_project_and_path(sourceProject, currentProject, dest)
##### step 4 create ensemble in destination project if needed
if options.get("empty"):
# don't create an ensemble
return "Cloned empty project to " + builder.dest_project.projectRoot
return builder.set_ensemble(isRemote, sourceProject, currentProject)
def _create_local_config(clonedProject):
local_template = os.path.join(
clonedProject.projectRoot, DefaultNames.LocalConfigTemplate
)
PLACEHOLDER = "$generate_new_vault_password"
if os.path.isfile(local_template):
with open(local_template) as s:
contents = s.read()
contents = "\n".join(
[line for line in contents.splitlines() if not line.startswith("##")]
)
dest = os.path.join(
clonedProject.projectRoot, "local", DefaultNames.LocalConfig
)
if PLACEHOLDER in contents:
contents = contents.replace(PLACEHOLDER, get_random_password())
_warn_about_new_password(dest)
_write_file(
os.path.join(clonedProject.projectRoot, "local"),
DefaultNames.LocalConfig,
contents,
)
logging.info(
f'Generated new a local project configuration file at "{dest}"\n'
"Please review it for any instructions on configuring this project."
)
def _get_unfurl_requirement_url(spec):
"""Expand the given string in an URL for installing the local Unfurl package.
If @ref is omitted the tag for the current release will be used,
if empty ("@") the latest revision will be used
If no path or url is specified https://github.com/onecommons/unfurl.git will be used.
For example:
@tag
./path/to/local/repo
./path/to/local/repo@tag
./path/to/local/repo@
git+https://example.com/forked/unfurl.git
@
Args:
spec (str): can be a path to a git repo, git url or just a revision or tag.
Returns:
str: Description of returned object.
"""
if not spec:
return spec
if "egg=unfurl" in spec:
# looks fully specified, just return it
return spec
url, sep, ref = spec.rpartition("@")
if sep:
if ref:
ref = "@" + ref
else:
ref = "@" + __version__()
if not url:
return "git+https://github.com/onecommons/unfurl.git" + ref + "#egg=unfurl"
if not url.startswith("git+"):
return "git+file://" + os.path.abspath(url) + ref + "#egg=unfurl"
else:
return url + ref + "#egg=unfurl"
def init_engine(projectDir, runtime):
runtime = runtime or "venv:"
kind, sep, rest = runtime.partition(":")
if kind == "venv":
pipfileLocation, sep, unfurlLocation = rest.partition(":")
return create_venv(
projectDir, pipfileLocation, _get_unfurl_requirement_url(unfurlLocation)
)
# XXX else kind == 'docker':
return "unrecognized runtime uri"
def _run_pip_env(do_install, kw):
# create the virtualenv and install the dependencies specified in the Pipefiles
sys_exit = sys.exit
try:
retcode = 0
def noexit(code):
retcode = code
sys.exit = noexit
do_install(**kw)
finally:
sys.exit = sys_exit
return retcode
# XXX provide an option for an unfurl installation can be shared across runtimes.
def _add_unfurl_to_venv(projectdir):
"""
Set the virtualenv inside `projectdir` to use the unfurl package currently being executed.
"""
# this should only be used when the current unfurl is installed in editor mode
# otherwise it will be exposing all packages in the current python's site-packages
base = os.path.dirname(os.path.dirname(_templatePath))
sitePackageDir = None
libDir = os.path.join(projectdir, os.path.join(".venv", "lib"))
for name in os.listdir(libDir):
sitePackageDir = os.path.join(libDir, name, "site-packages")
if os.path.isdir(sitePackageDir):
break
else:
return "Pipenv failed: can't find site-package folder"
_write_file(sitePackageDir, "unfurl.pth", base)
_write_file(sitePackageDir, "unfurl.egg-link", base)
return ""
def create_venv(projectDir, pipfileLocation, unfurlLocation):
"""Create a virtual python environment for the given project."""
os.environ["PIPENV_IGNORE_VIRTUALENVS"] = "1"
VIRTUAL_ENV = os.environ.get("VIRTUAL_ENV")
os.environ["PIPENV_VENV_IN_PROJECT"] = "1"
if "PIPENV_PYTHON" not in os.environ:
os.environ["PIPENV_PYTHON"] = sys.executable
if pipfileLocation:
pipfileLocation = os.path.abspath(pipfileLocation)
try:
cwd = os.getcwd()
os.chdir(projectDir)
# need to set env vars and change current dir before importing pipenv
from pipenv import environments
from pipenv.core import do_install
from pipenv.utils import python_version
pythonPath = os.environ["PIPENV_PYTHON"]
assert pythonPath, pythonPath
if not pipfileLocation:
versionStr = python_version(pythonPath)
assert versionStr, versionStr
version = versionStr.rpartition(".")[0] # 3.8.1 => 3.8
# version = subprocess.run([pythonPath, "-V"]).stdout.decode()[
# 7:10
# ] # e.g. Python 3.8.1 => 3.8
pipfileLocation = os.path.join(
_templatePath, "python" + version
) # e.g. templates/python3.8
if not os.path.isdir(pipfileLocation):
return f'Pipfile location is not a valid directory: "{pipfileLocation}"'
# copy Pipfiles to project root
if os.path.abspath(projectDir) != os.path.abspath(pipfileLocation):
for filename in ["Pipfile", "Pipfile.lock"]:
path = os.path.join(pipfileLocation, filename)
if os.path.isfile(path):
shutil.copy(path, projectDir)
kw = dict(python=pythonPath)
# need to run without args first so lock isn't overwritten
retcode = _run_pip_env(do_install, kw)
if retcode:
return f"Pipenv (step 1) failed: {retcode}"
# we need to set these so pipenv doesn't try to recreate the virtual environment
environments.PIPENV_USE_SYSTEM = 1
environments.PIPENV_IGNORE_VIRTUALENVS = False
os.environ["VIRTUAL_ENV"] = os.path.join(projectDir, ".venv")
environments.PIPENV_VIRTUALENV = os.path.join(projectDir, ".venv")
# we need to set skip_lock or pipenv will not honor the existing lock
kw["skip_lock"] = True
if unfurlLocation:
kw["editable_packages"] = [unfurlLocation]
else:
if is_version_unreleased():
return _add_unfurl_to_venv(projectDir)
else:
kw["packages"] = [
"unfurl==" + __version__()
] # use the same version as current
retcode = _run_pip_env(do_install, kw)
if retcode:
return f"Pipenv (step 2) failed: {retcode}"
return ""
finally:
if VIRTUAL_ENV:
os.environ["VIRTUAL_ENV"] = VIRTUAL_ENV
else:
os.environ.pop("VIRTUAL_ENV", None)
os.chdir(cwd)
```
#### File: unfurl/unfurl/repo.py
```python
import os
import os.path
from pathlib import Path
import git
from git.repo.fun import is_git_dir
import logging
from six.moves.urllib.parse import urlparse
from .util import UnfurlError, save_to_file, to_text
import toscaparser.repositories
from ruamel.yaml.comments import CommentedMap
logger = logging.getLogger("unfurl")
def normalize_git_url(url):
if url.startswith("git-local://"): # truncate url after commit digest
return "git-local://" + urlparse(url).netloc.partition(":")[0]
if "://" not in url: # not an absolute URL, convert some common patterns
if url.startswith("/"):
return "file://" + url
elif "@" in url: # scp style used by git: user@server:project.git
# convert to ssh://user@server/project.git
return "ssh://" + url.replace(":", "/", 1)
return url
def is_url_or_git_path(url):
if "://" in url and not url.startswith("file:"):
return True
if "@" in url:
return True
candidate, sep, frag = url.partition("#")
if frag or candidate.rstrip("/").endswith(".git"):
return True
return False
def split_git_url(url):
"""
Returns (repoURL, filePath, revision)
RepoURL will be an empty string if it isn't a path to a git repo
"""
parts = urlparse(url)
if parts.scheme == "git-local":
return parts.scheme + "://" + parts.netloc, parts.path[1:], parts.fragment
if parts.fragment:
# treat fragment as a git revision spec; see https://git-scm.com/docs/gitrevisions
# or https://docs.docker.com/engine/reference/commandline/build/#git-repositories
# just support <ref>:<path> for now
# e.g. myrepo.git#mybranch, myrepo.git#pull/42/head, myrepo.git#:myfolder, myrepo.git#master:myfolder
revision, sep, path = parts.fragment.partition(":")
giturl, sep, frag = url.partition("#")
return giturl, path, revision
return url, "", ""
class _ProgressPrinter(git.RemoteProgress):
def update(self, op_code, cur_count, max_count=None, message=""):
# we use print instead of logging because we don't want to clutter logs with this message
if message and logger.getEffectiveLevel() <= logging.INFO:
print(f"fetching from {self.gitUrl}, received: {message} ")
class Repo:
@staticmethod
def find_containing_repo(rootDir, gitDir=".git"):
"""
Walk parents looking for a git repository.
"""
current = os.path.abspath(rootDir)
while current and current != os.sep:
if is_git_dir(os.path.join(current, gitDir)):
return GitRepo(git.Repo(current))
current = os.path.dirname(current)
return None
@staticmethod
def find_git_working_dirs(rootDir, gitDir=".git"):
working_dirs = {}
for root, dirs, files in os.walk(rootDir):
if Repo.update_git_working_dirs(working_dirs, root, dirs, gitDir):
del dirs[:] # don't visit sub directories
return working_dirs
@staticmethod
def update_git_working_dirs(working_dirs, root, dirs, gitDir=".git"):
if gitDir in dirs and is_git_dir(os.path.join(root, gitDir)):
assert os.path.isdir(root), root
repo = GitRepo(git.Repo(root))
key = os.path.abspath(root)
working_dirs[key] = repo.as_repo_view()
return key
return None
@staticmethod
def ignore_dir(dir):
parent = Repo.find_containing_repo(os.path.dirname(dir))
if parent:
path = parent.find_repo_path(dir)
if path: # can be None if dir is already ignored
parent.add_to_local_git_ignore("/" + path)
return path
return None
def find_repo_path(self, path):
localPath = self.find_path(path)[0]
if localPath is not None and not self.is_path_excluded(localPath):
return localPath
return None
def is_path_excluded(self, localPath):
return False
def find_path(self, path, importLoader=None):
base = self.working_dir
if not base: # XXX support bare repos
return None, None, None
repoRoot = os.path.abspath(base)
abspath = os.path.abspath(path).rstrip("/")
if repoRoot in abspath:
# XXX find pinned
# if importLoader:
# revision = importLoader.getRevision(self)
# else:
if True:
revision = self.revision
bare = not self.working_dir or revision != self.revision
return abspath[len(repoRoot) + 1 :], revision, bare
return None, None, None
def as_repo_view(self, name=""):
return RepoView(dict(name=name, url=self.url), self)
def is_local_only(self):
return self.url.startswith("git-local://") or os.path.isabs(self.url)
@staticmethod
def get_path_for_git_repo(gitUrl):
parts = urlparse(gitUrl)
if parts.scheme == "git-local":
# e.g. extract spec from git-local://0cfeee6571c4276ce1a63dc37aa8cbf8b8085d60:spec
name = parts.netloc.partition(":")[1]
else:
# e.g. extract tosca-parser from https://github.com/onecommons/tosca-parser.git
name = (
os.path.splitext(os.path.basename(parts.path.strip("/")))[0]
or parts.netloc
)
assert not name.endswith(".git"), name
return name
@classmethod
def create_working_dir(cls, gitUrl, localRepoPath, revision=None):
localRepoPath = localRepoPath or "."
if os.path.exists(localRepoPath):
if not os.path.isdir(localRepoPath) or os.listdir(localRepoPath):
raise UnfurlError(
f"couldn't create directory, it already exists and isn't empty: {localRepoPath}"
)
logger.info("Fetching %s %s to %s", gitUrl, revision or "", localRepoPath)
progress = _ProgressPrinter()
progress.gitUrl = gitUrl
try:
kwargs = dict(recurse_submodules=True)
if revision:
kwargs["branch"] = revision
repo = git.Repo.clone_from(gitUrl, localRepoPath, progress, **kwargs)
except git.exc.GitCommandError as err:
raise UnfurlError(
f'couldn\'t create working directory, clone failed: "{err._cmdline}"\nTry re-running that command to diagnose the problem.'
)
Repo.ignore_dir(localRepoPath)
return GitRepo(repo)
def commit_secrets(working_dir, yaml):
vault = yaml and getattr(yaml.representer, "vault", None)
if not vault:
return []
saved = []
for filepath, dotsecrets in find_dirty_secrets(working_dir):
with open(filepath, "r") as vf:
vaultContents = vf.read()
encoding = None if vaultContents.startswith("$ANSIBLE_VAULT;") else "vault"
secretpath = dotsecrets / filepath.name
logger.verbose("encrypting file to %s with %s", secretpath, vault.secrets[0][0])
save_to_file(str(secretpath), vaultContents, yaml, encoding)
saved.append(secretpath)
return saved
def find_dirty_secrets(working_dir):
# compare .secrets with secrets
for root, dirs, files in os.walk(working_dir):
if "secrets" not in Path(root).parts:
continue
for filename in files:
dotsecrets = Path(root.replace("secrets", ".secrets"))
filepath = Path(root) / filename
if (
not dotsecrets.is_dir()
or filename not in list([p.name for p in dotsecrets.iterdir()])
or filepath.stat().st_mtime > (dotsecrets / filename).stat().st_mtime
):
yield filepath, dotsecrets
class RepoView:
# view of Repo optionally filtered by path
# XXX and revision too
def __init__(self, repository, repo, path=""):
if isinstance(repository, dict):
# required keys: name, url
tpl = repository.copy()
name = tpl.pop("name")
tpl["url"] = normalize_git_url(tpl["url"])
repository = toscaparser.repositories.Repository(name, tpl)
assert repository or repo
self.repository = repository
self.repo = repo
self.path = path
if repo and path and repository:
self.repository.url = repo.get_url_with_path(path)
self.readOnly = not repo
self.yaml = None
@property
def working_dir(self):
if self.repo:
return os.path.join(self.repo.working_dir, self.path)
else:
return os.path.join(self.repository.url, self.path)
@property
def name(self):
return self.repository.name if self.repository else ""
@property
def url(self):
return self.repository.url if self.repository else self.repo.url
def is_local_only(self):
# if it doesn't have a repo then it most be local
return not self.repo or self.repo.is_local_only()
@property
def origin(self):
if (
self.repo
and normalize_git_url(self.repo.url) != split_git_url(self.url)[0]
and self.repo.url != self.repo.working_dir
):
return self.repo.url
return ""
def is_dirty(self):
if self.readOnly:
return False
for filepath, dotsecrets in find_dirty_secrets(self.working_dir):
return True
return self.repo.is_dirty(untracked_files=True, path=self.path)
def add_all(self):
assert not self.readOnly
self.repo.repo.git.add("--all", self.path or ".")
def load_secrets(self, _loader):
logger.trace("looking for secrets %s", self.working_dir)
for root, dirs, files in os.walk(self.working_dir):
if ".secrets" not in Path(root).parts:
continue
logger.trace("checking if secret files where changed or added %s", files)
for filename in files:
secretsdir = Path(root.replace(".secrets", "secrets"))
filepath = Path(root) / filename
stinfo = filepath.stat()
target = secretsdir / filename
if not target.is_file() or stinfo.st_mtime > target.stat().st_mtime:
target = secretsdir / filename
try:
contents = _loader.load_from_file(str(filepath))
except Exception as err:
logger.warning("could not decrypt %s: %s", filepath, err)
continue
with open(str(target), "w") as f:
f.write(contents)
os.utime(target, (stinfo.st_atime, stinfo.st_mtime))
logger.verbose("decrypted secret file to %s", target)
def save_secrets(self):
return commit_secrets(self.working_dir, self.yaml)
def commit(self, message, addAll=False):
assert not self.readOnly
if self.yaml:
for saved in self.save_secrets():
self.repo.repo.git.add(str(saved.relative_to(self.repo.working_dir)))
if addAll:
self.add_all()
self.repo.repo.index.commit(message)
return 1
def get_default_commit_message(self):
return "Commit by Unfurl"
def git_status(self):
assert not self.readOnly
return self.repo.run_cmd(["status", self.path or "."])[1]
def _secrets_status(self):
modified = "\n ".join(
[
str(filepath.relative_to(self.repo.working_dir))
for filepath, dotsecrets in find_dirty_secrets(self.working_dir)
]
)
if modified:
return f"\n\nSecrets to be committed:\n {modified}"
return ""
def get_repo_status(self, dirty=False):
if self.repo and (not dirty or self.is_dirty()):
git_status = self.git_status()
if self.name:
header = f"for {self.name} at {self.working_dir}"
else:
header = f"for {self.working_dir}"
secrets_status = self._secrets_status()
return f"Status {header}:\n{git_status}{secrets_status}\n\n"
else:
return ""
def get_initial_revision(self):
if not self.repo:
return ""
return self.repo.get_initial_revision()
def get_current_revision(self):
if not self.repo:
return ""
if self.is_dirty():
return self.repo.revision + "-dirty"
else:
return self.repo.revision
def lock(self):
record = CommentedMap(
[
("url", self.url),
("revision", self.get_current_revision()),
("initial", self.get_initial_revision()),
]
)
if self.name:
record["name"] = self.name
if self.origin:
record["origin"] = self.origin
return record
class GitRepo(Repo):
def __init__(self, gitrepo):
self.repo = gitrepo
self.url = self.working_dir or gitrepo.git_dir
if gitrepo.remotes:
# note: these might not look like absolute urls, e.g. [email protected]:onecommons/unfurl.git
try:
remote = gitrepo.remotes["origin"]
except:
remote = gitrepo.remotes[0]
self.url = remote.url
@property
def working_dir(self):
dir = self.repo.working_tree_dir
if not dir or dir[-1] == "/":
return dir
else:
return dir + "/"
@property
def revision(self):
if not self.repo.head.is_valid():
return ""
return self.repo.head.commit.hexsha
def resolve_rev_spec(self, revision):
try:
return self.repo.commit(revision).hexsha
except:
return None
def get_url_with_path(self, path):
if is_url_or_git_path(self.url):
if os.path.isabs(path):
# get path relative to repository's root
path = os.path.relpath(path, self.working_dir)
return normalize_git_url(self.url) + "#:" + path
else:
return self.get_git_local_url(path)
def find_excluded_dirs(self, root):
root = os.path.relpath(root, self.working_dir)
status, stdout, stderr = self.run_cmd(
[
"ls-files",
"--exclude-standard",
"-o",
"-i",
"--full-name",
"--directory",
root,
]
)
for file in stdout.splitlines():
path = os.path.join(self.working_dir, file)
yield path
def is_path_excluded(self, localPath):
# XXX cache and test
# excluded = list(self.findExcludedDirs(self.working_dir))
# success error code means it's ignored
return not self.run_cmd(["check-ignore", "-q", localPath])[0]
def run_cmd(self, args, **kw):
"""
:return:
tuple(int(status), str(stdout), str(stderr))
"""
gitcmd = self.repo.git
call = [gitcmd.GIT_PYTHON_GIT_EXECUTABLE]
# add persistent git options
call.extend(gitcmd._persistent_git_options)
call.extend(list(args))
# note: sets cwd to working_dir
return gitcmd.execute(
call, with_exceptions=False, with_extended_output=True, **kw
)
def add_to_local_git_ignore(self, rule):
with open(os.path.join(self.repo.git_dir, "info", "exclude"), "a") as f:
f.write("\n" + rule + "\n")
def show(self, path, commitId):
if self.working_dir and os.path.isabs(path):
path = os.path.abspath(path)[len(self.working_dir) :]
# XXX this won't work if path is in a submodule
# if in path startswith a submodule: git log -1 -p [commitid] -- [submodule]
# submoduleCommit = re."\+Subproject commit (.+)".group(1)
# return self.repo.submodules[submodule].git.show(submoduleCommit+':'+path[len(submodule)+1:])
return self.repo.git.show(commitId + ":" + path)
def checkout(self, revision=""):
# if revision isn't specified and repo is not pinned:
# save the ref of current head
self.repo.git.checkout(revision)
logger.info(
"checking out '%s' at %s to %s",
self.url,
revision or "HEAD",
self.working_dir,
)
return self.working_dir
def add_sub_module(self, gitDir):
gitDir = os.path.abspath(gitDir)
status, stdout, stderr = self.run_cmd(["submodule", "add", gitDir])
success = not status
if success:
logging.debug("added submodule %s: %s %s", gitDir, stdout, stderr)
else:
logging.error("failed to add submodule %s: %s %s", gitDir, stdout, stderr)
return success
def get_initial_revision(self):
if not self.repo.head.is_valid():
return "" # an uninitialized repo
firstCommit = next(self.repo.iter_commits("HEAD", max_parents=0))
return firstCommit.hexsha
def add_all(self, path="."):
path = os.path.relpath(path, self.working_dir)
self.repo.git.add("--all", path)
def commit_files(self, files, msg):
# note: this will also commit existing changes in the index
index = self.repo.index
index.add([os.path.abspath(f) for f in files])
return index.commit(msg)
def is_dirty(self, untracked_files=False, path=None):
# diff = self.repo.git.diff() # "--abbrev=40", "--full-index", "--raw")
# https://gitpython.readthedocs.io/en/stable/reference.html?highlight=is_dirty#git.repo.base.Repo.is_dirty
return self.repo.is_dirty(untracked_files=untracked_files, path=path or None)
def clone(self, newPath):
# note: repo.clone uses bare path, which breaks submodule path resolution
cloned = git.Repo.clone_from(
self.working_dir, os.path.abspath(newPath), recurse_submodules=True
)
Repo.ignore_dir(newPath)
return GitRepo(cloned)
def get_git_local_url(self, path, name=""):
if os.path.isabs(path):
# get path relative to repository's root
path = os.path.relpath(path, self.working_dir)
return f"git-local://{self.get_initial_revision()}:{name}/{path}"
# XXX: def getDependentRepos()
# XXX: def canManage()
# def canMakeClean(self):
# for repo in self.getDependentRepos():
# if not repo.canMakeClean():
# return False
# elif repo.isDirty() and not self.canManage(repo):
# return False
# return True
#
# def _commitAll(self, parent=None):
# committed = []
# for repo in self.getDependentRepos():
# if repo.isDirty():
# assert self.canManage(repo)
# repo._commitAll(self)
# committed.append(repo)
# self.updateChildCommits(committed)
# self._commit()
#
# def getDirtyDependents(self):
# for repo in self.getDependentRepos():
# if repo.isDirty():
# yield repo
# XXX unused.. currently yamlmanifest.commitJob() calls commitFiles()
# def commit(self):
# # before run referenced dirty repos should be committed?
# # at the very least the state of any declared repo should be saved
# # otherwise two different runs of the same commit could pull different versions
# # this is true for the spec repos also -- save in spec's manifest-template?
# repo = self.repo
# repo.index.add("*")
# # commit the manifest first so we can get a commit ref for the changerecord
# commit = repo.git.commit("")
# changeFiles = self.manifest.saveChanges(commit.hexsha)
# repo.index.add(changeFiles)
# repo.git.commit("")
class RevisionManager:
def __init__(self, manifest, localEnv=None):
self.manifest = manifest
self.revisions = None
self.localEnv = localEnv
def get_revision(self, change):
if self.revisions is None:
self.revisions = {self.manifest.specDigest: self.manifest}
digest = change["specDigest"]
commitid = change["startCommit"]
if digest in self.revisions:
return self.revisions[digest]
else:
from .manifest import SnapShotManifest
manifest = SnapShotManifest(self.manifest, commitid)
self.revisions[digest] = manifest
return manifest
``` |
{
"source": "JohnGBaker/GLEAM",
"score": 2
} |
#### File: GLEAM/script/tests.py
```python
import sys
import os
import numpy as np
import math
import subprocess
import shlex
import argparse
import time
#functions:
#read test info from file and do runs
def make_runs(fname,commandbase,tag,postprocess_only):
filenames=[];
lines=[];
with open(fname) as f:
for line in f:
if(not line.startswith("#")):
#print "line is: '"+line+"'"
lines.append(line)
cols=line.split()
if(len(cols)<4):
continue
center=int(cols[0])
width=float(cols[1])
qval=float(cols[2])
Lval=float(cols[3])
outname=tag+"_"+str(center)+"_"+str(width)+"_"+str(qval)+"_"+str(Lval)
if(not postprocess_only):
command=commandbase+" -mm_center="+str(center)+" -mm_log_q="+str(qval)+" -mm_log_L="+str(Lval)+" -mm_width="+str(width)+" "+str(outname)
print "\n**********************************************"
print command
sys.stdout.flush()
subprocess.call(command.split())
filenames.append(outname)
return filenames,lines
# make_runs
#run ndiff on the relevant runs and collect results
def compare_runs(filesA,filesB,lines):
suf="_mmap.dat"
good=True
absrel=[]
for i in range(len(filesA)):
result=""
dataA=np.loadtxt(filesA[i]+suf).flatten();
dataB=np.loadtxt(filesB[i]+suf).flatten();
deltas=np.array([[abs(a-b),2.0*abs(a-b)/(abs(a)+abs(b))] for a,b in zip(dataA,dataB)])
#print deltas[:,0]
#print deltas[:,1]
absreli=[max(deltas[:,0]),max(deltas[:,1])]
print "Compared files "+filesA[i]+suf+" and "+filesB[i]+suf+" : "+str(absreli)
absrel.append(absreli);
return absrel
# compare_runs
##########
# setup stuff
# determine basename from first argument.
scriptpath=os.path.dirname(os.path.realpath(__file__))
parser = argparse.ArgumentParser(description='Run tests on gleam lens inversion.')
parser.add_argument('testlist',
help='filename for the test list')
parser.add_argument('prec', type=float,
help='log precision level of test')
parser.add_argument('--tag', default="",
help='an extra tag for the output')
parser.add_argument('--post', action='store_true',
help='run only post-processing')
parser.add_argument('--exec', default=scriptpath+"/../gleam",dest='executable',
help='path to the executable (default to expected location relative script)')
parser.add_argument('--altex', default="",dest='altex',
help='path to an alternate executable relative to standard executable dir (default=none)')
parser.add_argument('--exec-arg', default="",dest='exarg',
help='additional arguments for the gleam executable')
parser.add_argument('--no-quad', action='store_false',dest='do_quad',
help='don\'t do quad tests')
args = parser.parse_args()
#if(len(sys.argv)<3 or len(sys.argv)>4):
# print "Usage:\n"+sys.argv[0]+" testlistfile prec_digits [execname]"
# exit()
#testfname = sys.argv[1]
testfname=args.testlist
basename=testfname.replace(".dat","")
outdir =basename+"-"+args.tag+"-tests/"
reportfile =basename+"-"+args.tag+"-report.dat"
print "basename=",testfname
print "outdir=",outdir
#prec=10**-float(sys.argv[2])
prec=10**-float(args.prec)
postprocess_only=args.post
#get execname
#if(len(sys.argv)>3):
# execname=sys.argv[3]
#else:
# execname="../src/gleam"
execname=args.executable
print "execname="+execname
#create/empty output directory
if(execname.count("echo")<1 and not postprocess_only):
command= "mkdir "+outdir
print command
subprocess.call(command.split())
#os.chdir(outdir)
for fileName in os.listdir(outdir):
os.remove(outdir+fileName)
#process different types of runs
command= execname+" "+args.exarg+" -magmap -poly=true -precision=16 -GLB_rWide=5 "
tag="poly_r5.0"
start = time.time()
poly5files,lines=make_runs(testfname,command,outdir+tag,postprocess_only)
end=time.time()
timeP5=end-start;
command= execname+" "+args.exarg+" -magmap -poly=true -precision=16 -GLB_rWide=4.5 "
tag="poly_r4.5"
start = time.time()
poly4files,lines=make_runs(testfname,command,outdir+tag,postprocess_only)
timeP4=time.time()-start;
command= execname+" "+args.exarg+" -magmap -poly=true -precision=16 -GLB_rWide=4.5 "
tag="poly_r7.5"
start = time.time()
poly7files,lines=make_runs(testfname,command,outdir+tag,postprocess_only)
timeP7=time.time()-start;
if(args.do_quad):
command= execname+"_quad "+args.exarg+" -magmap -poly=true -precision=16 -GLB_rWide=5 "
tag="qpoly_r5.0"
start = time.time()
qpoly5files,lines=make_runs(testfname,command,outdir+tag,postprocess_only)
timeQP5=time.time()-start;
if(not args.altex==""):
command= scriptpath+"/../"+args.altex+" "+args.exarg+" -magmap -poly=true -precision=16 -GLB_rWide=5 "
tag="altex_r5.0"
start = time.time()
altex5files,lines=make_runs(testfname,command,outdir+tag,postprocess_only)
timeAX=time.time()-start;
#command= execname+"_quad "+args.exarg+" -magmap -poly=true -precision=16 -GLB_rWide=4.5 "
#tag="qpoly_r4.5"
#start = time.time()
#qpoly4files,lines=make_runs(testfname,command,outdir+tag,postprocess_only)
#timeQP4=time.time()-start;
command= execname+" "+args.exarg+" -magmap -precision=16 -GLB_rWide=5 "
tag="int_r5.0"
start = time.time()
int5files,lines=make_runs(testfname,command,outdir+tag,postprocess_only)
timeI5=time.time()-start;
#compare
good=goodr=True
with open(reportfile,'w') as report:
print" Comparing WideBinary versus WittMao (4.5/5)"
report.write("\n# Comparing WideBinary versus WittMao.\n")
resultWBWM=compare_runs(poly5files,poly4files,lines)
for l,r in zip(lines,resultWBWM):
ls=l.split()
report.write(ls[0]+" "+ls[1]+" "+ls[2]+" "+ls[3]+" "+str(r[0])+" "+str(r[1])+"\n")
print" Comparing WideBinary versus WittMao 7.5/5)"
report.write("\n# Comparing WideBinary versus WittMao.\n")
resultWBWM=compare_runs(poly5files,poly7files,lines)
for l,r in zip(lines,resultWBWM):
ls=l.split()
report.write(ls[0]+" "+ls[1]+" "+ls[2]+" "+ls[3]+" "+str(r[0])+" "+str(r[1])+"\n")
print"\n Comparing Polynomial versus Integration"
report.write("\n# Comparing Polynomial versus Integration")
resultPI=compare_runs(poly5files,int5files,lines)
for l,r in zip(lines,resultPI):
ls=l.split()
report.write(ls[0]+" "+ls[1]+" "+ls[2]+" "+ls[3]+" "+str(r[0])+" "+str(r[1])+"\n")
if(args.do_quad):
print"\n Comparing quad versus double precision."
report.write("\n# Comparing quad versus double precision.")
resultQD=compare_runs(poly5files,qpoly5files,lines)
for l,r in zip(lines,resultQD):
ls=l.split()
report.write(ls[0]+" "+ls[1]+" "+ls[2]+" "+ls[3]+" "+str(r[0])+" "+str(r[1])+"\n")
print"\n Comparing Quad-precision Polynomial versus Integration"
report.write("\n# Comparing Quad-precision Polynomial versus Integration")
resultQPI = compare_runs(qpoly5files,int5files,lines)
for l,r in zip(lines,resultQPI):
ls=l.split()
report.write(ls[0]+" "+ls[1]+" "+ls[2]+" "+ls[3]+" "+str(r[0])+" "+str(r[1])+"\n")
if(not args.altex==""):
print"\n Comparing versus alternate executable "+args.altex+"."
report.write("\n# Comparing versus alternate executable "+args.altex+".")
resultAX=compare_runs(poly5files,altex5files,lines)
for l,r in zip(lines,resultAX):
ls=l.split()
report.write(ls[0]+" "+ls[1]+" "+ls[2]+" "+ls[3]+" "+str(r[0])+" "+str(r[1])+"\n")
if( not postprocess_only):
print "\nTiming summary:"
print " Poly 7.5 - ",timeP7
print " Poly 5.0 - ",timeP5
print " Poly 4.5 - ",timeP4
if(args.do_quad):
print " Quad Poly 5.0 - ",timeQP5
# print " Quad Poly 4.5 - ",timeQP4
print " Int 5.0 - ",timeI5
if(not args.altex==""):
print " AltEx Poly 5.0 - ",timeAX
print "\nTest summary at relative prec="+str(prec)+":"
print " WB vs WM (7.5/5): "+("OK "+str(max(np.array(resultWBWM)[:,1])) if max(np.array(resultWBWM)[:,1])<=prec else "FAIL")
for l,r in zip(lines,resultWBWM):
ls=l.split()
if(r[1]>prec):
print ls[0]+" "+ls[1]+" "+ls[2]+" "+ls[3]+" "+str(r[1])
print " WB vs WM (4.5/5): "+("OK "+str(max(np.array(resultWBWM)[:,1])) if max(np.array(resultWBWM)[:,1])<=prec else "FAIL")
for l,r in zip(lines,resultWBWM):
ls=l.split()
if(r[1]>prec):
print ls[0]+" "+ls[1]+" "+ls[2]+" "+ls[3]+" "+str(r[1])
print " poly vs int: "+("OK "+str(max(np.array(resultPI)[:,1])) if max(np.array(resultPI)[:,1])<=prec else "FAIL")
for l,r in zip(lines,resultPI):
ls=l.split()
if(r[1]>prec):
print ls[0]+" "+ls[1]+" "+ls[2]+" "+ls[3]+" "+str(r[1])
if(args.do_quad):
print " quad vs double: "+("OK "+str(max(np.array(resultQD)[:,1])) if max(np.array(resultQD)[:,1])<=prec else "FAIL")
for l,r in zip(lines,resultQD):
ls=l.split()
if(r[1]>prec):
print ls[0]+" "+ls[1]+" "+ls[2]+" "+ls[3]+" "+str(r[1])
print " quad poly vs int: "+("OK "+str(max(np.array(resultQPI)[:,1])) if max(np.array(resultQPI)[:,1])<=prec else "FAIL")
for l,r in zip(lines,resultQPI):
ls=l.split()
if(r[1]>prec):
print ls[0]+" "+ls[1]+" "+ls[2]+" "+ls[3]+" "+str(r[1])
if(not args.altex==""):
print " gleam vs "+args.altex+": "+("OK "+str(max(np.array(resultAX)[:,1])) if max(np.array(resultAX)[:,1])<=prec else "FAIL")
for l,r in zip(lines,resultAX):
ls=l.split()
if(r[1]>prec):
print ls[0]+" "+ls[1]+" "+ls[2]+" "+ls[3]+" "+str(r[1])
print "\nTest summary at absolute prec="+str(prec)+":"
print " WB vs WM: "+("OK "+str(max(np.array(resultWBWM)[:,0])) if max(np.array(resultWBWM)[:,0])<=prec else "FAIL")
for l,r in zip(lines,resultWBWM):
ls=l.split()
if(r[0]>prec):
print ls[0]+" "+ls[1]+" "+ls[2]+" "+ls[3]+" "+str(r[0])
print " poly vs int: "+("OK "+str(max(np.array(resultPI)[:,0])) if max(np.array(resultPI)[:,0])<=prec else "FAIL")
for l,r in zip(lines,resultPI):
ls=l.split()
if(r[0]>prec):
print ls[0]+" "+ls[1]+" "+ls[2]+" "+ls[3]+" "+str(r[0])
if(args.do_quad):
print " quad vs double: "+("OK "+str(max(np.array(resultQD)[:,0])) if max(np.array(resultQD)[:,0])<=prec else "FAIL")
for l,r in zip(lines,resultQD):
ls=l.split()
if(r[0]>prec):
print ls[0]+" "+ls[1]+" "+ls[2]+" "+ls[3]+" "+str(r[0])
print " quad poly vs int: "+("OK "+str(max(np.array(resultQPI)[:,0])) if max(np.array(resultQPI)[:,0])<=prec else "FAIL")
for l,r in zip(lines,resultQPI):
ls=l.split()
if(r[0]>prec):
print ls[0]+" "+ls[1]+" "+ls[2]+" "+ls[3]+" "+str(r[0])
if(not args.altex==""):
print " gleam vs "+args.altex+": "+("OK "+str(max(np.array(resultAX)[:,0])) if max(np.array(resultAX)[:,0])<=prec else "FAIL")
for l,r in zip(lines,resultAX):
ls=l.split()
if(r[0]>prec):
print ls[0]+" "+ls[1]+" "+ls[2]+" "+ls[3]+" "+str(r[0])
``` |
{
"source": "JohnGBaker/ptmcmc",
"score": 2
} |
#### File: ptmcmc/cython/exampleGaussian.py
```python
import numpy as np
import ptmcmc
import random
import pyximport; pyximport.install()
import math
from scipy.stats import wishart
import sys
MTSUN_SI=4.9254923218988636432342917247829673e-6
PI=3.1415926535897932384626433832795029
I = complex(0.0, 1.0)
class gaussian_likelihood(ptmcmc.likelihood):
def __init__(self,opt):
#Note: We define a multivariate Gaussian likelihood with the covariance provided
#The prior is uniform over a hyper-rectangular larger than the Gaussian core by priorscale*sigma
opt.add("priorscale","Factor by which the prior is larger than the Gaussian 1-sigma scale. (Default=100)","100")
opt.add("fisher_cov_rescale","Factor by which 'fisher' proposal is rescaled from nominal value (Default=1,theoretical optimum for Gaussian target dist.)","1")
opt.add("fisher_basescale_fac","Factor by prior widths are rescaled for addition to fisher_proposal_precision matrix. (Default=0, nothing added)","0")
opt.add("fisher_update_len","Mean number of steps before drawing an update of the Fisher-matrix based proposal. Default 0 (Never update)","0");
self.opt=opt
def setup(self,cov,reporting=True):
cov=np.array(cov)
self.cov=cov
npar=cov.shape[0]
self.npar=npar
lndetcov=np.linalg.slogdet(self.cov)[1]
self.like0=-0.5*(self.npar*np.log(2*np.pi)+lndetcov)
if reporting:
print("Seting up likelihood with ln(max)=",self.like0)
sig=np.sqrt(np.diag(self.cov))
print("Sigmas:",sig)
print("Corr:\n"+"\n".join( ('{:6.2f}'*npar).format(*[self.cov[i,j]/sig[i]/sig[j] for j in range(npar)]) for i in range(npar)),'\n')
self.invcov=np.linalg.inv(self.cov)
self.reporting=reporting
#Set up stateSpace with trival boundaries
space=ptmcmc.stateSpace(dim=npar);
names=["x"+str(i) for i in range(npar)]
self.names=names
space.set_names(names);
#Set up prior
priorscale=100
centers= [0]*npar
scales= [np.sqrt(self.cov[i,i])*priorscale for i in range(npar)]
types= [ "uni" ]*npar
self.basic_setup(space, types, centers, scales);
#Set up "Fisher" proposal stuff
propspace=ptmcmc.stateSpace(dim=npar)
propspace.set_names(self.names)
#See Optimal Proposal Distributions and Adaptive MCMC,<NAME>* [Chapter for MCMC Handbook]
# ... based on <NAME>., et al, "WEAK CONVERGENCE AND OPTIMAL SCALING OF RANDOM WALK METROPOLIS ALGORITHMS" Ann Appl Prob,Vol. 7, No. 1, 110-120 (1997)
#Expect optimal convergence for gaussian with large ndim with fisher_cov_rescale=1.
self.fisher_update_len=int(self.opt.value("fisher_update_len"))
self.fisher_cov_rescale=float(self.opt.value("fisher_cov_rescale"))
self.fisher_basescale_factor=float(self.opt.value("fisher_basescale_fac"))
self.fish_cov_fac=2.38**2/npar*self.fisher_cov_rescale
self.basescale_invcov=0
if self.fisher_basescale_factor>0: #We simulate the effect of the prior, pretending it is Gaussian.
basescales=self.fisher_basescale_factor*np.array(scales)
self.basescale_invcov=np.diag(basescales**-2)
fish_cov=np.linalg.inv(self.invcov+self.basescale_invcov)*self.fish_cov_fac
else:
fish_cov=self.cov*self.fish_cov_fac
if self.fisher_update_len>0:
default_data={}
proposal=ptmcmc.gaussian_prop(self,fisher_check_update,propspace,fish_cov, 2, "Evolving Fisher-like proposal",default_instance_data=default_data)
else:
proposal=ptmcmc.gaussian_prop(self,frozen_fisher_check_update,propspace,fish_cov, 0, "Frozen Fisher-like proposal")
self.addProposal(proposal)
def evaluate_log(self,s):
params=s.get_params()
params=np.array(params)
llike=self.like0-0.5*np.dot(params,np.dot(self.invcov,params))
return llike
def writeCovar(self,filename,pars=None):
cov=self.cov
names=self.names
n=cov.shape[0]
with open(filename,'w') as f:
if names is not None:
f.write('#')
for name in names[:n]: f.write(name+' ')
f.write('\n')
if pars is not None:
f.write('#State ')
for par in pars[:n]: f.write(str(par)+' ')
f.write('\n')
f.write("#Covariance\n")
for i in range(n):
for j in range(n):
f.write(str(cov[i,j])+" ")
f.write('\n')
f.write("#Sigmas\n")
sigmas=[np.sqrt(cov[i,i]) for i in range(n)]
for i in range(n):
f.write(str(sigmas[i])+" ")
f.write('\n')
f.write("#Correlation\n")
for i in range(n):
for j in range(n):
f.write(str(cov[i,j]/sigmas[i]/sigmas[j])+" ")
f.write('\n')
#This will be the callback for a gaussian_prop, so it must be declared outside the class
def fisher_check_update(likelihood, instance, s, invtemp, randoms, covarray):
if likelihood.fisher_update_len<=0: return False #Frozen
if randoms[0]*likelihood.fisher_update_len>1:return False #No update this time
cov=np.linalg.inv( likelihood.invcov * invtemp + likelihood.basescale_invcov ) * likelihood.fish_cov_fac
np.copyto(covarray,cov)
verbose=(likelihood.reporting and randoms[1]<0.1) or randoms[1]<0.01
if verbose:
print("Fisher Covariance: temp =",1/invtemp)
#print(cov)
sigs=np.sqrt(np.diag(cov))
print("New Fisher, sigmas:",sigs)
n=len(sigs)
print("Corr:\n"+"\n".join( ('{:6.2f}'*n).format(*[cov[i,j]/sigs[i]/sigs[j] for j in range(n)]) for i in range(n)),'\n')
return True
def frozen_fisher_check_update(likelihood, s, invtemp, randoms, covarray):return False
#//***************************************************************************************8
#//main test program
def main(argv):
ptmcmc.Init()
#//prep command-line options
#Options opt(true);
opt=ptmcmc.Options()
#//Add some command more line options
opt.add("seed","Pseudo random number grenerator seed in [0,1). (Default=-1, use clock to seed.)","-1")
opt.add("outname","Base name for output files (Default 'mcmc_output').","mcmc_output")
opt.add("p","Parameter dimension for the test.(Default 3)","3")
#//Create the sampler and likelihood
s0=ptmcmc.sampler(opt)
like=gaussian_likelihood(opt)
print('calling opt.parse')
opt.parse(argv)
print("flags=\n"+opt.report())
#setup
p=int(opt.value("p"))
nu=5+p
cov=wishart.rvs(nu,np.diag([1]*p))
like.setup(cov,s0.reporting());
seed=float(opt.value('seed'))
if seed<0:seed=random.random();
outname=opt.value('outname')
#//report
#cout.precision(output_precision);
print("\noutname = '"+outname+"'")
#cout<<"seed="<<seed<<endl;
#cout<<"Running on "<<omp_get_max_threads()<<" thread"<<(omp_get_max_threads()>1?"s":"")<<"."<<endl;
#//Should probably move this to ptmcmc/bayesian
ptmcmc.resetRNGseed(seed);
#globalRNG.reset(ProbabilityDist::getPRNG());//just for safety to keep us from deleting main RNG in debugging.
#//Get the space/prior for use here
#stateSpace space;
#shared_ptr<const sampleable_probability_function> prior;
space=like.getObjectStateSpace();
print("like.nativeSpace=\n"+space.show())
like.writeCovar(outname+"_covar.dat")
#//Read Params
Npar=space.size();
print("Npar=",Npar)
#//Bayesian sampling
s0.setup(like)
s=s0.clone();
s.initialize();
print('initialization done')
s.run(outname,0);
if __name__ == "__main__":
import sys
argv=sys.argv[:]
del argv[0]
main(argv)
```
#### File: ptmcmc/python/ptmcmc_analysis.py
```python
import sys
import os
import numpy as np
import math
import subprocess
import argparse
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cm
import ess as esspy
#import matplotlib
#matplotlib.use('TkAgg')
#from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.widgets import Slider, Button, RadioButtons
filestyle=0
useLikeDefault=False
noPostDefault=False
class chainData:
def __init__(self,fname,noPost=None,useLike=None):
if noPost is not None:
self.noPost=noPost #set true if data does not include Post and Like in first two cols
else:self.noPost=noPostDefault
if useLike is not None:
self.useLike=useLike #set true to retain like if present
else:self.useLike=useLikeDefault
self.have_ess=False
print('noPost=',self.noPost)
self.basename,self.chainfilepath,self.filestyle=self.get_basename(fname)
self.read_chain(self.chainfilepath)
print("Chain:", self.N,"steps, every",self.dSdN)
def get_basename(self,fname):
filestyle=0 #ptmcmc style
if(fname.endswith(".out")):
#we assume ".out does not otherwise appear in the name... that
#could check... if(fname.count(".out")>1):...
basename=fname.replace(".out","")
fname=fname.replace(".out","_t0.dat")
elif(fname.endswith("_t0.dat")):
basename=fname.replace("_t0.dat","")
elif(fname.endswith(".dat")):
basename=fname.replace(".dat","")
filestyle=1 #general style (post,like, pars,...) by default
print('general style detected')
print ("basename="+basename)
return basename,fname,filestyle
def read_chain(self, chainfilepath):
print("Reading data from :",chainfilepath)
i0=0
if self.filestyle==0: #ptmcmc style
data=np.loadtxt(chainfilepath,converters={4: lambda s:-1})
if self.noPost: print("Warning noPost incompatible with ptmcmc chainstyle!")
while(i0<len(data) and data[i0,0]<0):i0+=1
data=data[i0:]
self.dSdN=data[4,0]-data[3,0]
parnames=["post",]+self.get_par_names(chainfilepath)
data=np.delete(data,[3,4,len(data[0])-1],1)
parnames=["samp","post",]
if self.useLike:
parnames+=['like']
self.ipar0=3
else:
self.ipar0=2
data=np.delete(data,[2],1)
parnames+=self.get_par_names(chainfilepath)
elif self.filestyle==1:
data=np.loadtxt(chainfilepath)
self.dSdN=1
N=len(data)
data=np.hstack((np.reshape(range(N),(N,1)),data))
if self.noPost: #pars only
parnames=["samp"]+self.get_par_names(chainfilepath,startcol=0)
self.ipar0=1
else: #post, like, pars
havelike=False
i0=0
varparnames=self.get_par_names(chainfilepath,startcol=0)
if 'samp' in varparnames:
isamp=varparnames.index('samp')
if isamp>i0-1:i0=isamp+1
data=np.delete(data,[isamp],1)
del varparnames[isamp]
i0-=1
if 'like' in varparnames:
ilike=varparnames.index('like')
if ilike>i0-1:i0=ilike+1
if not self.useLike:
data=np.delete(data,[ilike],1)
del varparnames[ilike]
i0-=1
if 'post' in varparnames:
ipost=varparnames.index('post')
if ipost>i0-1:i0=ipost+1
parnames=['samp']+varparnames
self.ipar0=i0+1
self.npar=len(parnames)-self.ipar0
self.names=parnames
self.N=len(data)
print ("Data have ",self.N," rows representing ",self.N*self.dSdN," steps.")
#if "post" in parnames:
# self.maxPost=max(data[:,parnames.index("post")])
self.data=data
print ("data[1]=",data[1])
print(self.npar,"params:",parnames)
def get_par_names(self,fname,startcol=5):
with open(fname) as f:
names = read_par_names(f,startcol)
for i in range(len(names)):
if names[i].startswith('#'): names[i]=names[i][1:]
if names is None:
names=['p'+str(i) for i in range(len(line1.split()))]
return names
def getSteps(self):
return self.N*self.dSdN
def getState(self,idx): #fixme for non-filetype==0
i=int(idx/self.dSdN)
return self.data[i,self.ipar0:]
def get_samples(self,nsamp,good_length,nmax=None,return_rows=False):
ngood=int(good_length/self.dSdN)
if(nsamp>ngood):nsamp=ngood
if nmax is None:
nmax=len(self.data)
else:
nmax=int(nmax/self.dSdN)
n0=int(nmax-ngood)
rows=n0+np.random.choice(int(ngood),nsamp)
i0=self.names.index('post')+1
if self.useLike: i0+=1
if return_rows:
return rows,self.data[rows,i0:]
else:
return self.data[rows,i0:]
def readCovar(self,filename=None):
if filename is None:
filename=self.basename+"_covar.dat"
pars=[]
done=False
with open(filename,'r') as f:
names=read_par_names(f)
idxs=[]
try:
for name in names:
tryname=name
idxs.append(self.parnames.index(name))
except:
if names[0]!='p0': print("Failed to find '"+tryname+"' among chain's parnames.")
print("Assuming params align with chain params")
idxs=range(len(names))
idxs=[self.ipar0+idx for idx in idxs]
line=f.readline()
while(not "#Covariance" in line): line=f.readline() #Skip until the good stuff
covar=np.zeros((self.npar,self.npar))
for iidx in idxs:
line=f.readline()
print(i,":",line)
elems=np.array(line.split())
for j,val in elems:
covar[iidx,idxs[j]]=val
return covar
def estimate_ess(self,esslimit=10000):
if not self.have_ess:
ess,length=esspy.report_param_effective_samples(self,esslimit=esslimit)
self.ess=ess
self.acl=(1.0*length)/ess
print("ess,acl:",self.ess,self.acl)
self.have_ess=True
return self.ess,self.acl
def KLdivergence(self,other,upsample=1,esslimit=10000):
ess,acl=self.estimate_ess(esslimit=esslimit)
nP=int(ess*upsample)
#print("nP:",ess*upsample,nP,ess,acl)
length=int(ess*acl)
samplesP=self.get_samples(nP,length)
ess,acl=other.estimate_ess(esslimit=esslimit)
nQ=int(ess*upsample)
length=int(ess*acl)
samplesQ=other.get_samples(nQ,length)
#print("nP,nQ:",nP,nQ)
return KL_divergence(samplesP,samplesQ)
def fakeKLdivergence(self,other,upsample=1,esslimit=10000):
ess,acl=self.estimate_ess(esslimit=esslimit)
nP=int(ess*upsample)
#print("nP:",ess*upsample,nP,ess,acl)
length=int(ess*acl)
samplesP=self.get_samples(nP,length)
ess,acl=other.estimate_ess(esslimit=esslimit)
nQ=int(ess*upsample)
length=int(ess*acl)
samplesQ=other.get_samples(nQ,length)
#print("nP,nQ:",nP,nQ)
i0=self.names.index('post')+1
#print('pars:',self.names[i0:])
return fake_KL_divergence(samplesP,samplesQ)
#####################################
#general functions
def read_par_names(f,startcol=0):
pos=f.tell()
line1=f.readline()
if(line1[0]=='#'):
pos=f.tell()
line2=f.readline()
if(line2[0]=='#'):
names=line2.split()
names=names[startcol:]
return names
else:
f.seek(pos)
names=line1.split()
names=names[startcol:]
return names
else:
f.seek(pos)
names=['p'+str(i) for i in range(len(line1.split()))]
return names
def KL_divergence(samplesP, samplesQ):
'''
///Interesting to apply to both target samples and transformed samples
/// Implements eqn 2 of Perez-Cruz 2008 "Kullback-Leibler Divergence Estimation of Continuous Distributions"
/// also https://www.princeton.edu/~kulkarni/Papers/Journals/j068_2009_WangKulVer_TransIT.pdf
/// with k=1. We infer that the sign is wrong on the sum term there.
'''
k=2;
result=0;
N=len(samplesP);
r1sqs=all_nnd2(samplesP);
#print('r1sqs',r1sqs)
s1sqs=np.zeros(N);
for i in range(N):s1sqs[i]=one_nnd2(samplesP[i],samplesQ);
#print('s1sqs',s1sqs)
#Here we put a floor on the smallest value of all NN distances
#based on the kfloorth smallest distance within the P set
kfloor=5;
if(kfloor>0):
dists=r1sqs;
sdists=np.sort(dists)
floor=dists[kfloor+1]
r1sqs[r1sqs<floor]=floor
s1sqs[s1sqs<floor]=floor
result+=-sum(np.log(r1sqs/s1sqs))
dim=len(samplesP[0])
M=len(samplesQ);
result *= (0.5*dim)/N;#//factor of 1/2 because we use nearest neighbor dist^2
result += np.log(M/(N-1.0));
return result;
def all_nnd2(samples):
#Computation of all nearest neighbor distances, brute force.
N=len(samples)
nni=[-1]*N
nnd2=np.zeros(N)-1
for i in range(N):
for j in range(i+1,N):
#print("i,j=",i,j)
diff=samples[i]-samples[j]
dist2=np.dot(diff.T,diff)
if (nnd2[i]<0 or nnd2[i]>dist2):
nni[i]=j
nnd2[i]=dist2;
if(nnd2[i]<0 or nnd2[j]>dist2):
nni[j]=i
nnd2[j]=dist2;
return nnd2
def one_nnd2(x,samples):
#Computation of nearest neighbor distance, brute force.
N=len(samples)
i0=-1
for i in range(N):
diff=samples[i]-x
dist2=np.dot(diff.T,diff)
if(i0<0 or nnd2>dist2):
i0=i
nnd2=dist2;
return nnd2
def KLdivergence_Wang09(samplesP, samplesQ, k=1):
"""
KL-Divergence estimator based on Wang09:
https://www.princeton.edu/~kulkarni/Papers/Journals/j068_2009_WangKulVer_TransIT.pdf
Using brute-force kNN
k: Number of neighbours considered (default 1)
"""
n, m = len(samplesP), len(samplesQ)
KLdiv = np.log(m / (n - 1.0))
d = float(s1.shape[1])
for p1 in s1:
nu = np.sort(np.linalg.norm(s2-p1, axis=1))[k-1]
rho = np.linalg.norm(s1-p1, axis=1)[k]
D += (d/n)*np.log(nu/rho)
return D
def knn_distance(point, sample, k):
""" Euclidean distance from `point` to it's `k`-Nearest
Neighbour in `sample` """
norms = np.linalg.norm(sample-point, axis=1)
return np.sort(norms)[k]
def get_sample_cov(samples):
N=len(samples)
dim=len(samples[0])
cov=np.zeros((dim,dim));
mean=samples.mean(axis=0)
out_mean=np.copy(mean)
ssum=np.zeros(dim)
for s in samples:
for j in range(dim):
jdiff=s[j]-mean[j]
cov[j,j]+=jdiff*jdiff
for i in range(j+1,dim):
idiff=s[i]-mean[i]
val=idiff*jdiff
cov[i,j]+=val
cov[j,i]+=val
cov/=(N-1.0)
return cov,out_mean
def fake_KL_divergence(samplesP, samplesQ, verbose=False):
#This applies a simplified alternative to the KL divergence (which is difficult to compute accurately from samples).
#The calculation is based on the means and variances of the two samples and would agree with the KL diverences
#for large samples of Gaussian distributions.
#The KL-divergence between two Gaussians is
# 2 KLdiv(P,Q) = Tr [ cov(P) cov(Q)^-1 ]- dim - log | cov(P) cov(Q)^-1 | - (mu(P)-mu(Q))^t cov(Q)^-1 (mu(P)-mu(Q))^t
covP,meanP=get_sample_cov(samplesP)
covQ,meanQ=get_sample_cov(samplesQ)
dim=len(covP)
if verbose:
print("meanP:",[x for x in meanP])
print("meanQ:",[x for x in meanQ])
print("sigmaP:",[covP[i,i] for i in range(dim)])
print("sigmaQ:",[covQ[i,i] for i in range(dim)])
nQ=len(samplesQ)
nP=len(samplesP)
unbiasing_factor=(nQ-dim-2.0)/(nQ-1.0) #The final factor is to make unbiased for finite nQ, assuming nQ=nP
#unbiasing_factor=1
invCovQ=np.linalg.pinv(covQ)*unbiasing_factor
covPinvCovQ=np.matmul(covP,invCovQ)
dmu=meanP-meanQ
result=0
result += -dim + covPinvCovQ.trace();
s,val = np.linalg.slogdet(covPinvCovQ/unbiasing_factor)
result+=-val
result += np.dot(np.dot(dmu,invCovQ),dmu)
result +=- (dim + covPinvCovQ.trace())/nP;
#result += (0.5*dim*(dim+1)+1)*(1.0/nP-1.0/nQ) - covPinvCovQ.trace()/nP;
return 0.5*result;
#Read in a set of chain files
def read_all_chains(names):
global allparnames,Nmax,Smax
datanames=[]
Nmax=0
Smax=0
allparnames=[]
allchains=[]
for fname in names:
chain=chainData(fname)
#print("chain:",chain)
allchains.append(chain)
#print("allchains-->",allchains)
if(chain.N>Nmax):Nmax=chain.N
S=chain.getSteps()
if(S>Smax):Smax=S
#maxPost=max(data[:,1])
print (chain.names)
for name in chain.names[1:]:
if(not name in allparnames):
allparnames.append(name)
return allchains
#make sample names from the most relevant part of the filenames
def make_short_labels(files):
longnames=[os.path.basename(filename) for filename in files]
if(len(set(longnames))<len(longnames)):
longnames=[os.path.basename(os.path.dirname(os.path.abspath(filename))) for filename in files]
si=0;ei=1
sgood=True;egood=True
if(len(longnames)>1):
for c in longnames[0]:
#print("sitest",[name[si] for name in longnames])
#print("s set:",set([name[si] for name in longnames]))
if(sgood and len(set([name[si] for name in longnames]))==1):
si+=1
else:sgood=False
#print("eitest",[name[-ei] for name in longnames])
#print("e set:",set([name[-ei] for name in longnames]))
if(egood and len(set([name[-ei] for name in longnames]))==1):
ei+=1
else:egood=False
#print("si,ei=",si,ei)
if(not (sgood or egood)):break
#print([si+ei-len(name) for name in longnames])
if(np.min([si+ei-len(name) for name in longnames])<0):
si=0
ei=1
if(ei<=1):
sample_labels=[name[si:] for name in longnames]
else:
sample_labels=[name[si:-(ei-1)] for name in longnames]
#print("si/ei:",si,ei)
print(sample_labels)
return sample_labels
#get an appropriate set of data for a plot
def get_xydata(data,i,j,dens,samps):
d=data[data[:,0]>samps]
#d=d[d[:,6]>4]
Nd=len(d)
#print("Reduced data len =",Nd)
every=max([1,int(Nd/dens)])
#print(i,j,every)
#print(Nd,dens,every)
x=d[::every,i]
y=d[::every,j]
#print (len(x))
return x,y
##################
#Widget functions
##################
import matplotlib.patches as mpatches
class viewer:
def __init__(self,fnames,selectX=False):
self.fnames=fnames
self.labels=make_short_labels(fnames)
self.allchains=read_all_chains(self.fnames)
#print('allchains:',self.allchains)
self.fig, self.ax = plt.subplots()
if selectX:
leftlim=0.35
bottomlim=0.30
else:
leftlim=0.25
bottomlim=0.25
plt.subplots_adjust(left=leftlim, bottom=bottomlim)
cx0 = 1
cy0 = 0
s0 = 3
d0 = 2
x,y=get_xydata(self.allchains[0].data,cx0,cy0,10**d0,10**s0)
try:
cmapname='tab10'
self.cmap = matplotlib.cm.get_cmap(cmapname)
except ValueError:
cmapname='Vega10'
self.cmap = matplotlib.cm.get_cmap(cmapname)
self.cmap_norm=10
self.scat = plt.scatter(x, y, s=1, c=x, cmap=cmapname,norm=colors.Normalize(0,self.cmap_norm))
#scat = plt.scatter([], [], s=1,cmap="tab10")
axcolor = 'lightgoldenrodyellow'
axstart = plt.axes([leftlim, 0.1, 0.9-leftlim, 0.03])
axdens = plt.axes([leftlim, 0.15, 0.9-leftlim, 0.03])
height=(len(allparnames)-1)*0.05
resetax = plt.axes([0.8, 0.025, 0.1, 0.04])
if not selectX:
#rax = plt.axes([0.025, 0.5-height/2, 0.1, height], facecolor=axcolor)
rYax = plt.axes([0.025, 0.5-height/2, 0.1, height])
else:
rXax = plt.axes([0.025, 0.5-height/2, 0.1, height])
rXax.text(0.9, 0.95, "X", transform=rXax.transAxes, fontsize=11,
verticalalignment='top',horizontalalignment='right')
rYax = plt.axes([0.15, 0.5-height/2, 0.1, height])
rYax.text(0.9, 0.95, "Y", transform=rYax.transAxes, fontsize=11,
verticalalignment='top',horizontalalignment='right')
ilogS=(math.log10(Smax))
ilogSamps=int(math.log10(Nmax))
print("ilogSmax=",ilogS)
#Start slider
print('axstart',axstart)
self.sstart = Slider(axstart, 'log-Start', 2, ilogS, valinit=s0)
self.sstart.on_changed(self.update)
#Density slider
self.sdens = Slider(axdens, 'log-Density', 1, ilogSamps, valinit=d0)
self.sdens.on_changed(self.update)
#X/y-axis radio buttons
if selectX:
self.radioX = RadioButtons(rXax, allparnames, active=cx0)
self.radioY = RadioButtons(rYax, allparnames, active=cy0)
parnameswidth=max([len(x) for x in allparnames])
fontsize=self.radioX.labels[0].get_fontsize()/max([1,parnameswidth/5.])
#print("fontsize=",fontsize)
for label in self.radioX.labels:
label.set_fontsize(fontsize)
for label in self.radioY.labels:
label.set_fontsize(fontsize)
for circle in self.radioX.circles: # adjust radius here. The default is 0.05
circle.set_radius(0.03)
self.radioX.on_clicked(self.update)
self.haveX=True
#print('set radio')
else:
self.radioY = RadioButtons(rYax, allparnames, active=0)
parnameswidth=max([len(x) for x in allparnames])
fontsize=self.radioY.labels[0].get_fontsize()/max([1,parnameswidth/5.])
#print("fontsize=",fontsize)
for label in self.radioY.labels:
label.set_fontsize(fontsize)
self.haveX=False
for circle in self.radioY.circles: # adjust radius here. The default is 0.05
circle.set_radius(0.03)
self.radioY.on_clicked(self.update)
#Reset button
self.button = Button(resetax, 'Reset', color=axcolor, hovercolor='0.975')
self.button.on_clicked(self.reset)
#print('calling update')
self.update()
#print('save fig')
#plt.savefig('junk.png')
#print('calling show')
plt.show()
#print('finished init')
def update(self,val=0): #argument is not used
#c0=1+allparnames[1:].index(radio.value_selected)
#print("index->",c0)
start = int(10**self.sstart.val)
samps = int(10**self.sdens.val)
xmin=1e100;xmax=-1e100
ymin=1e100;ymax=-1e100
xy=np.array([])
cc=np.array([])
ic0=0.5;
ind=0
plotlabels=[]
for chain in self.allchains:
includeChain=True
if self.haveX:
if(self.radioX.value_selected in chain.names):
cx=chain.names.index(self.radioX.value_selected)
else: includeChain=False
else: cx=0
if(self.radioY.value_selected in chain.names):
cy=chain.names.index(self.radioY.value_selected)
print("have par name '"+self.radioY.value_selected+"' in",chain.names)
else: includeChain=False
if includeChain:
x,y=get_xydata(chain.data,cx,cy,samps,start)
n=len(x)
#xy=np.array([xyi for xyi in xy if np.all(np.isfinite(xyi))])
colorval=ic0+ind
if(cc.size>0):
cc = np.concatenate((cc,[colorval]*n))
xy = np.vstack((xy,np.vstack((x, y)).T))
else:
cc=np.array([colorval]*n)
xy = np.vstack((x, y)).T
if(n==0):
ind+=1
continue
lim=x.min()
if(xmin>lim):xmin=lim
lim=x.max()
if(xmax<lim):xmax=lim
lim=y.min()
if(ymin>lim):ymin=lim
lim=y.max()
if(ymax<lim):ymax=lim
plotlabels.append(mpatches.Patch(color=self.cmap(colorval/self.cmap_norm), label=self.labels[ind],hatch='.'))
ind=ind+1
else: ind+=1
self.scat.set_offsets(xy)
self.scat.set_array(cc)
self.ax.set_xlim(xmin-0.1*(xmax-xmin),xmax+0.1*(xmax-xmin))
self.ax.set_ylim(ymin-0.1*(ymax-ymin),ymax+0.1*(ymax-ymin))
self.ax.legend(handles=plotlabels,fontsize=8)
for tick in self.ax.get_xticklabels():
tick.set_rotation(20)
self.fig.canvas.draw_idle()
def reset(self,event): #fixme
self.allchains=read_all_chains(self.fnames)
self.sstart.reset()
self.sdens.reset()
###################
#main code
###################
if __name__ == "__main__":
##########
# Arguments and argument injest
# determine basename from first argument.
parser = argparse.ArgumentParser(description='Provide snapshot of chain state.')
parser.add_argument('fname', metavar='chain_file', nargs='+', type=str,
help='chain file path')
parser.add_argument('-uselike',action='store_true',help='Include the likelihood')
parser.add_argument('-noPost',action='store_true',help='Data has no Posterior or Likelihood in first columns ')
parser.add_argument('-selectX',action='store_true',help='View two param projections')
args = parser.parse_args()
print(args)
useLikeDefault=args.uselike
noPostDefault=args.noPost
viewer(args.fname,selectX=args.selectX)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.