ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 7dff68d6263045ec5066bc609ff8f3e9a87c65ca | import tempfile
import pytest
from pgbackup import storage
@pytest.fixture
def infile():
f = tempfile.TemporaryFile()
f.write(b"Testing")
f.seek(0)
return f
def test_storing_file_locally(infile):
"""
Writes content from one file-like to another
"""
outfile = tempfile.NamedTemporaryFile(delete=False)
storage.local(infile, outfile)
with open(outfile.name, "rb") as f:
assert f.read() == b"Testing"
def test_storing_file_on_s3(mocker, infile):
"""
Writes content from one file-like to S3
"""
client = mocker.Mock()
storage.s3(client, infile, "bucket", "file-name")
client.upload_fileobj.assert_called_with(infile, "bucket", "file-name")
|
py | 7dff68d87bff36a10d0276f2955e31b40b0af611 | #!/usr/bin/env python
# Creates Earth instance
# Author: Sandeep Baskar
# Import native modules
import numpy as np
import sys, os
# Add root directory of SPOTS
sys.path.insert(0, os.path.abspath('../..'))
from common.astro.Planet import Planet
from common.astro.planets.earth import *
from common.astro.planets.mars import *
def earth(km = True):
earth = Planet('earth', 6378, 398600, earth_atmo, km = km)
return earth
def mars(km = True):
mars = Planet('mars', 3389.5, 0.042828e6, mars_atmo, km = km)
return mars
|
py | 7dff6965cf0efe8752a0d0f6f31d59f12a93334a | # -*- coding: utf-8 -*-
# Copyright (c) 2017, openetech and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Thickness(Document):
pass
|
py | 7dff69aa04779f12b23140673abba81f4bfa4a08 | # Generated by Django 3.2.8 on 2021-11-08 03:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ProjectManager', '0017_auto_20211104_0206'),
]
operations = [
migrations.RemoveField(
model_name='task',
name='fieldEnabled',
),
migrations.AddField(
model_name='task',
name='fileEnabled',
field=models.BooleanField(default=False),
),
]
|
py | 7dff6a27a513bd9a77acf5b6f353aa5a38f6d2f2 | try:
import torch
import xentropy_cuda
from .softmax_xentropy import SoftmaxCrossEntropyLoss
del torch
del xentropy_cuda
del softmax_xentropy
except ImportError as err:
print("apex was installed without --xentropy flag, contrib.xentropy is not available")
|
py | 7dff6a375504c3bc28b8c78b1e4878332d6932c6 | # RL models from elegantrl
import torch
from elegantrl.agents import AgentDDPG
from elegantrl.agents import AgentPPO
from elegantrl.agents import AgentSAC
from elegantrl.agents import AgentTD3
from elegantrl.train.run import train_and_evaluate, init_agent
from elegantrl.train.config import Arguments
MODELS = {"ddpg": AgentDDPG, "td3": AgentTD3, "sac": AgentSAC, "ppo": AgentPPO}
#MODELS = {"ddpg": AgentDDPG, "td3": AgentTD3, "sac": AgentSAC, "ppo": AgentPPO, "a2c": AgentA2C}
OFF_POLICY_MODELS = ["ddpg", "td3", "sac"]
ON_POLICY_MODELS = ["ppo"]
#ON_POLICY_MODELS = ["ppo", "a2c"]
"""MODEL_KWARGS = {x: config.__dict__[f"{x.upper()}_PARAMS"] for x in MODELS.keys()}
NOISE = {
"normal": NormalActionNoise,
"ornstein_uhlenbeck": OrnsteinUhlenbeckActionNoise,
}"""
class DRLAgent:
"""Provides implementations for DRL algorithms
Attributes
----------
env: gym environment class
user-defined class
Methods
-------
get_model()
setup DRL algorithms
train_model()
train DRL algorithms in a train dataset
and output the trained model
DRL_prediction()
make a prediction in a test dataset and get results
"""
def __init__(self, env, price_array, tech_array, turbulence_array):
self.env = env
self.price_array = price_array
self.tech_array = tech_array
self.turbulence_array = turbulence_array
def get_model(self, model_name, model_kwargs):
env_config = {
"price_array": self.price_array,
"tech_array": self.tech_array,
"turbulence_array": self.turbulence_array,
"if_train": True,
}
env = self.env(config=env_config)
env.env_num = 1
agent = MODELS[model_name]
if model_name not in MODELS:
raise NotImplementedError("NotImplementedError")
model = Arguments(agent=agent, env=env)
if model_name in OFF_POLICY_MODELS:
model.if_off_policy = True
else:
model.if_off_policy = False
if model_kwargs is not None:
try:
model.learning_rate = model_kwargs["learning_rate"]
model.batch_size = model_kwargs["batch_size"]
model.gamma = model_kwargs["gamma"]
model.seed = model_kwargs["seed"]
model.net_dim = model_kwargs["net_dimension"]
model.target_step = model_kwargs["target_step"]
model.eval_gap = model_kwargs["eval_gap"]
model.eval_times = model_kwargs["eval_times"]
except BaseException:
raise ValueError(
"Fail to read arguments, please check 'model_kwargs' input."
)
return model
def train_model(self, model, cwd, total_timesteps=5000):
model.cwd = cwd
model.break_step = total_timesteps
train_and_evaluate(args=model)
@staticmethod
def DRL_prediction(model_name, cwd, net_dimension, environment):
if model_name not in MODELS:
raise NotImplementedError("NotImplementedError")
agent = MODELS[model_name]
environment.env_num = 1
args = Arguments(agent=agent, env=environment)
args.cwd = cwd
args.net_dim = net_dimension
# load agent
try:
agent = init_agent(args, gpu_id = 0)
act = agent.act
device = agent.device
except BaseException:
raise ValueError("Fail to load agent!")
# test on the testing env
_torch = torch
state = environment.reset()
episode_returns = list() # the cumulative_return / initial_account
episode_total_assets = list()
episode_total_assets.append(environment.initial_total_asset)
with _torch.no_grad():
for i in range(environment.max_step):
s_tensor = _torch.as_tensor((state,), device=device)
a_tensor = act(s_tensor) # action_tanh = act.forward()
action = (
a_tensor.detach().cpu().numpy()[0]
) # not need detach(), because with torch.no_grad() outside
state, reward, done, _ = environment.step(action)
total_asset = (
environment.cash
+ (
environment.price_array[environment.time] * environment.stocks
).sum()
)
episode_total_assets.append(total_asset)
episode_return = total_asset / environment.initial_total_asset
episode_returns.append(episode_return)
if done:
break
print("Test Finished!")
# return episode total_assets on testing data
print("episode_return", episode_return)
return episode_total_assets
|
py | 7dff6ac68f5fa6683d9013fd09bde5bf8ec53423 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Management command entry point for working with migrations
"""
import sys
import django
from django.conf import settings
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
# The ordering here, the apps using the organization base models
# first and *then* the organizations app itself is an implicit test
# that the organizations app need not be installed in order to use
# its base models.
"test_accounts",
"test_abstract",
"test_vendors",
"organizations",
"test_custom",
]
settings.configure(
DEBUG=True,
USE_TZ=True,
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
}
},
MIDDLEWARE_CLASSES=(), # Silence Django 1.7 warnings
SITE_ID=1,
FIXTURE_DIRS=['tests/fixtures'],
ORGS_SLUGFIELD='autoslug.AutoSlugField',
# ORGS_SLUGFIELD='django_extensions.db.fields.AutoSlugField',
INSTALLED_APPS=INSTALLED_APPS,
ROOT_URLCONF="tests.urls",
)
django.setup()
if __name__ == '__main__':
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
py | 7dff6b82afd874ed170688b4da5f50215974e37e | # Generated by Django 3.1.5 on 2021-01-17 12:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0011_auto_20210117_1215'),
]
operations = [
migrations.AddField(
model_name='product',
name='product_source',
field=models.CharField(choices=[('LT', 'Lt'), ('DN', 'Dn')], default='LT', editable=False, max_length=2),
),
]
|
py | 7dff6e5f7f1ad24d8b0c24574f21e738ae390c99 | import os
import re
import logging
import multiprocessing as mp
bind = '0.0.0.0:5000'
worker_class = 'gevent'
workers = os.environ.get('WORKERS', min(10, (int(mp.cpu_count()) * 2) + 1))
# Logging
# Read log config environment variables
version = os.environ.get('VERSION')
artifact_id = os.environ.get('MARATHON_APP_DOCKER_IMAGE', f'qvantel/kafka-explorer:{version}?')
service_name = os.environ.get('SERVICE_5000_NAME', os.environ.get('SERVICE_NAME', 'kafka-explorer'))
# Set log formats
log_fmt = re.sub(
r'\s+',
'',
f"""
{{
"@timestamp": "%(asctime)s",
"@version": "1",
"log_type": "LOG",
"log_level": "%(levelname)s",
"level_value": %(levelno)s000,
"service_name": "{service_name}",
"logger_name": "%(name)s",
"artifact_id": "{artifact_id}",
"thread_name": "%(process)d",
"message": "%(message)s"
}}
""".rstrip()
)
access_fmt = re.sub(
r'\s+',
'',
f"""
{{
"@timestamp": "%(asctime)s",
"@version": "1",
"log_type": "LOG",
"log_level": "TRACE",
"level_value": 5000,
"service_name": "{service_name}",
"logger_name": "%(name)s",
"artifact_id": "{artifact_id}",
"thread_name": "%(process)d",
"message": "%(message)s"
}}
""".rstrip()
)
access_log_format = "%(h)s %(l)s %(u)s '%(r)s' %(s)s %(b)s '%(f)s' '%(a)s'"
date_fmt = '%Y-%m-%dT%H:%M:%S.000%z'
loggers = {}
# Set level (pseudo supporting the TRACE level)
if (log_level := os.environ.get('LOG_LEVEL', 'INFO')) == 'TRACE':
# Enable and configure access logging on stdout
disable_redirect_access_to_syslog = True
loggers.update({
"gunicorn.access": {
"level": 'DEBUG',
"handlers": ["access_console"],
"propagate": 0,
"qualname": "gunicorn.access"
}
})
# Set level to the closest supported value
log_level = 'DEBUG'
loggers.update({
'app': {
'level': log_level,
'handlers': [],
'propagate': True,
'qualname': 'app'
},
'gunicorn.error': {
'level': os.environ.get('GUNICORN_LOG_LEVEL', 'INFO'),
'handlers': [],
'propagate': True,
'qualname': 'gunicorn.error'
},
'kafka': {
'level': os.environ.get('KAFKA_LOG_LEVEL', 'ERROR'),
'handlers': [],
'propagate': True,
'qualname': 'kafka'
}
})
class JsonEscape(logging.Filter):
"""
Python logging filter created for making sure that we have no double quotes or line breaks in the message field of
our log records (this would result in syntactically invalid json objects)
"""
def filter(self, record: logging.LogRecord):
record.msg = record.msg.replace('"', '\\"').replace('\n', '\\n')
return True
logconfig_dict = {
'root': {'level': log_level, 'handlers': ['console']},
'loggers': loggers,
'filters': {
'json_escape': {
'()': JsonEscape
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'generic',
'filters': ['json_escape'],
'stream': 'ext://sys.stdout'
},
'access_console': {
'class': 'logging.StreamHandler',
'formatter': 'access',
'filters': ['json_escape'],
'stream': 'ext://sys.stdout'
}
},
'formatters': {
'generic': {
'format': log_fmt,
'datefmt': date_fmt,
'class': 'logging.Formatter'
},
'access': {
'format': access_fmt,
'datefmt': date_fmt,
'class': 'logging.Formatter'
}
}
}
|
py | 7dff6e73a5a9a37e015017820f71ada90e203ad8 | from __future__ import absolute_import, division, print_function
from builtins import super, range, zip, round, map
from .base import (
DiTToHasTraits,
Float,
Complex,
Unicode,
Any,
Int,
List,
observe,
Instance,
)
from .position import Position
from .wire import Wire
class Line(DiTToHasTraits):
"""
Inheritance:
Line (self._line)
-> Equipment: ACLineSegment (self._acls)
->BaseVoltage (self._bv)
->PSRType (self._psr)
Asset (self._asset)
-> Location (self._loc)
-> WireSpacingInfo (self._wsi)
"""
name = Unicode(help="""Name of the line object""")
nominal_voltage = Float(
help="""This parameter defines the base voltage of the wire.""",
default_value=None,
)
line_type = Unicode(
help="""Whether the line is overhead or underground""", default_value="overhead"
)
length = Float(
help="""This parameter is the length of the Line.""", default_value=0
)
from_element = Any(
help="""Name of the node which connects to the 'from' end of the line""",
default_value=None,
)
to_element = Any(
help="""'Name of the node which connects to the 'to' end of the line""",
default_value=None,
)
from_element_connection_index = Int(
help="""Index of the position in the node that the 'from' end of the line connects to (e.g. for a long bus)""",
default_value=None,
)
to_element_connection_index = Int(
help="""Index of the position in the node that the 'to' end of the line connects to (e.g. for a long bus)""",
default_value=None,
)
is_fuse = Int(
help="""This flag indicates whether or not the line is also a fuse""",
default_value=None,
)
is_switch = Int(
help="""This flag indicates whether or not the line is also a switch""",
default_value=None,
)
is_banked = Int(
help="""This flag indicates whether or not the switch is banked. If this is true, the switch objects are controlled together""",
default_value=None,
)
faultrate = Float(
help="""The number of faults that occur per year""", default_value=None
)
wires = List(
Instance(Wire),
help="""This parameter is a list of all the wires included on the line. The wires are objects containing elements of phase, X and Y. """,
default_value=None,
)
positions = List(
Instance(Position),
help="""This parameter is a list of positional points describing the line. The positions are objects containing elements of long, lat and elevation. The points can be used to map the position of the line. """,
default_value=None,
)
impedance_matrix = List(
List(Complex),
help="""This provides the matrix representation of the line impedance in complex form. Computed from the values of GMR and distances of individual wires. Kron reduction is applied to make this a 3x3 matrix.""",
)
capacitance_matrix = List(
List(Complex),
help="""This provides the matrix representation of the line capacitance in complex form. Computed from the values of diameters and distances of individual wires. Kron reduction is applied to make this a 3x3 matrix.""",
)
# Modification: Nicolas (December 2017)
# Multiple feeder support. Each element keeps track of the name of the substation it is connected to, as well as the name of the feeder.
# I think we need both since a substation might have multiple feeders attached to it.
# These attributes are filled once the DiTTo model has been created using the Network module
substation_name = Unicode(
help="""The name of the substation to which the object is connected.""",
default_value=None,
)
feeder_name = Unicode(
help="""The name of the feeder the object is on.""", default_value=None
)
# Modification: Nicolas (December 2017)
# Add a is_recloser attribute as an easy and quick way to handle reclosers in DiTTo
is_recloser = Int(
help="""This flag indicates whether or not the line is also a recloser""",
default_value=None,
)
# Modification: Nicolas (January 2018)
is_breaker = Int(
help="""This flag indicates whether or not the line is also a breaker""",
default_value=None,
)
# Modification: Nicolas (March 2018)
is_sectionalizer = Int(
help="""This flag indicates whether or not the line is also a sectionalizer""",
default_value=None,
)
# Modification: Nicolas (March 2018)
nameclass = Unicode(help="""Nameclass of the line object.""", default_value=None)
# Modification: Nicolas (May 2018)
is_substation = Int(
help="""Flag that indicates wheter the element is inside a substation or not.""",
default_value=0,
)
# Modification: Nicolas (June 2018)
is_network_protector = Int(
help="""This flag indicates whether or not the line is also a network protector.""",
default_value=None,
)
def build(
self,
model,
Asset=None,
Line=None,
ACLineSegment=None,
PSRType=None,
baseVoltage=None,
wireSpacingInfo=None,
Location=None,
Terminal1=None,
Terminal2=None,
):
pass
#
# self._model = model
#
# if Line is None:
# self._line = self._model.env.Line()
# else:
# self._line = Line
#
# if Asset is None:
# self._asset = self._model.env.Asset()
# else:
# self._asset=Asset
# self._asset.PowerSystemResources = self._asset.PowerSystemResources + (self._line, )
#
# if ACLineSegment is None:
# self._acls = self._model.env.ACLineSegment()
# else:
# self._acls = ACLineSegment
# self._line.Equipments = self._line.Equipments + (self._acls, )
# self._acls.EquipmentContainer = self._line
#
# if baseVoltage is None:
# self._bv = self._model.env.BaseVoltage()
# else:
# self._bv = baseVoltage
# self._acls.BaseVoltage = self._bv
# self._bv.ConductingEquipment = self._bv.ConductingEquipment + (self._acls, )
#
# if PSRType is None:
# self._psr = self._model.env.PSRType()
# else:
# self._psr = PSRType
# self._acls.PSRType = self._psr
# self._psr.PowerSystemResources = self._psr.PowerSystemResources + (self._acls, )
#
# if wireSpacingInfo is None:
# self._wsi = self._model.env.WireSpacingInfo()
# else:
# self._wsi = wireSpacingInfo
# self._asset.AssetInfo = self._wsi
# self._wsi.Assets = self._wsi.Assets + (self._asset, )
#
# if Location is None:
# self._loc = self._model.env.Location()
# else:
# self._loc = Location
# self._asset.Location = self._loc
# self._loc.Assets = self._loc.Assets + (self._asset, )
#
# if Terminal1 is None:
# self._t1 = self._model.env.Terminal()
# else:
# self._t1 = Terminal1
#
# if Terminal2 is None:
# self._t2 = self._model.env.Terminal()
# else:
# self._t2 = Terminal2
#
# self._model.model_store[self.name] = self
# self._model.model_store[self.name] = self
#
# @observe('name', type='change')
# def _set_name(self, bunch):
# self._line.name = bunch['new']
#
# @observe('name', type='fetch')
# def _get_name(self, bunch):
# return self._line.name
#
# @observe('line_type', type='change')
# def _set_line_type(self, bunch):
# self._psr.aliasName = bunch['new']
#
# @observe('line_type', type='fetch')
# def _get_line_type(self, bunch):
# return self._psr.aliasName
#
# @observe('nominal_voltage', type='change')
# def _set_nominal_voltage(self, bunch):
# self._bv.nominal_voltage = self._model.env.Voltage(value=bunch['new'])
#
# @observe('nominal_voltage', type='fetch')
# def _get_nominal_voltage(self, bunch):
# if self._bv.nominal_voltage is None:
# return None
# return self._bv.nominalVoltage.value
#
# @observe('length', type='change')
# def _set_length(self, bunch):
# self._acls.length = self._model.env.Length(value=bunch['new'])
#
# @observe('length', type='fetch')
# def _get_length(self, bunch):
# if self._acls.length is None:
# return None
# return self._acls.length.value
#
# @observe('resistance', type='change')
# def _set_resistance(self, bunch):
# self._acls.r = self._model.env.Resistance(value=bunch['new'])
#
# @observe('resistance', type='fetch')
# def _get_resistance(self, bunch):
# if self._acls.r is None:
# return None
# return self._acls.r.value
#
# @observe('resistance0', type='change')
# def _set_resistance0(self, bunch):
# self._acls.r0 = self._model.env.Resistance(value=bunch['new'])
#
# @observe('resistance0', type='fetch')
# def _get_resistance0(self, bunch):
# if self._acls.r0 is None:
# return None
# return self._acls.r0.value
#
# @observe('reactance', type='change')
# def _set_reactance(self, bunch):
# self._acls.x = self._model.env.Reactance(value=bunch['new'])
#
# @observe('reactance', type='fetch')
# def _get_reactance(self, bunch):
# if self._acls.x is None:
# return None
# return self._acls.x.value
#
# @observe('reactance0', type='change')
# def _set_reactance0(self, bunch):
# self._acls.x0 = self._model.env.Reactance(value=bunch['new'])
#
# @observe('reactance0', type='fetch')
# def _get_reactance0(self, bunch):
# if self._acls.x0 is None:
# return None
# return self._acls.x0.value
#
# @observe('susceptance', type='change')
# def _set_susceptance(self, bunch):
# self._acls.bch = self._model.env.Susceptance(value=bunch['new'])
#
# @observe('susceptance', type='fetch')
# def _get_susceptance(self, bunch):
# if self._acls.bch is None:
# return None
# return self._acls.bch.value
#
# @observe('susceptance0', type='change')
# def _set_susceptance0(self, bunch):
# self._acls.b0ch = self._model.env.Susceptance(value=bunch['new'])
#
# @observe('susceptance0', type='fetch')
# def _get_susceptance0(self, bunch):
# if self._acls.b0ch is None:
# return None
# return self._acls.b0ch.value
#
# @observe('conductance', type='change')
# def _set_conductance(self, bunch):
# self._acls.gch = self._model.env.Conductance(value=bunch['new'])
#
# @observe('conductance', type='fetch')
# def _get_conductance(self, bunch):
# if self._acls.gch is None:
# return None
# return self._acls.gch.value
#
# @observe('conductance0', type='change')
# def _set_conductance0(self, bunch):
# self._acls.g0ch = self._model.env.Conductance(value=bunch['new'])
#
# @observe('conductance0', type='fetch')
# def _get_conductance0(self, bunch):
# if self._acls.g0ch is None:
# return None
# return self._acls.g0ch.value
#
# @observe('max_temperature', type='change')
# def _set_max_temperature(self, bunch):
# self._acls.shortCircuitEndTemperature = self._model.env.Temperature(value=bunch['new'])
#
# @observe('max_temperature', type='fetch')
# def _get_max_temperature(self, bunch):
# if self._acls.shortCircuitEndTemperature is None:
# return None
# return self._acls.shortCircuitEndTemperature.value
#
# @observe('wires', type='change')
# def _set_wires(self, bunch):
# wire_list = bunch['new']
# self._wsi.WirePositions=[]
# for wire in wire_list:
# wp = self._model.env.WirePosition()
# wp.phase = wire.phase
# wp.xCoord = self._model.Displacement(value=wire.X)
# wp.yCoord = self._model.Displacement(value=wire.Y)
# wp.WireSpacingInfo = self._wsi
# self._wsi.WirePositions = self._wsi.WirePositions + (wp, )
#
# @observe('wires', type='fetch')
# def _get_wires(self, bunch):
# wires = []
# for wp in self._wsi.WirePositions:
# wire = Wire()
# wire.phase = wp.phase
# wire.X = wp.XCoord.value
# wire.Y = wp.YCoord.value
# wires.append(wire)
# return wires
#
# @observe('positions', type='change')
# def _set_positions(self, bunch):
# position_list = bunch['new']
# self._loc.PositionPoints=[]
# for position in position_list:
# p = self._model.env.PositionPoint()
# p.xPosition = position.long
# p.yPosition = position.lat
# p.zPosition = position.elevation
# p.Location = self._loc
# self._loc.PositionPoints = self._loc.PositionPoints + (p, )
#
# @observe('positions', type='fetch')
# def _get_positions(self, bunch):
# positions = []
# for p in self._loc.PositionPoints:
# position = Position()
# position.lat = p.xPosition
# position.long = p.yPosition
# position.elevation = p.zPosition
# positions.append(position)
# return positions
#
# @observe('from_element', type='change')
# def _set_from_element(self, bunch):
# self._t1.ConnectivityNode = bunch['new']
# self._t1.ConductingEquipment = self._acls
#
# @observe('from_element', type='fetch')
# def _get_from_element(self, bunch):
# return self._t1.ConnectivityNode
#
# @observe('to_element', type='change')
# def _set_to_element(self, bunch):
# self._t2.ConnectivityNode = bunch['new']
# self._t2.ConductingEquipment = self._acls
#
# @observe('to_element', type='fetch')
# def _get_to_element(self, bunch):
# return self._t2.ConnectivityNode
|
py | 7dff6e76f57b96d303ab782832e159a805d125bd | from __future__ import unicode_literals
from django.db.models.base import ModelBase
try:
from django.contrib.contenttypes.fields import GenericRelation
except ImportError:
# Django < 1.7
from django.contrib.contenttypes.generic import GenericRelation
from .exceptions import ModelNotRegistered, ModelAlreadyRegistered
from .models import MaatRanking
def get_handler_instance(model, handler_class, options):
""" Returns an handler instance for the given *model*. """
handler = handler_class(model)
for key, value in options.items():
setattr(handler, key, value)
return handler
def contribute_to_class(model):
"""
Adds a 'maat_ranking' attribute to each instance of model.
The attribute is a generic relation to MaatRanking, used by the
handler to retrieve the ordered queryset.
"""
try:
generic_relation = GenericRelation(MaatRanking)
except TypeError:
# Django < 1.7
generic_relation = GenericRelation(MaatRanking)
generic_relation.contribute_to_class(model, 'maat_ranking')
class MaatRegister(object):
"""
Register class.
"""
def __init__(self):
self._registry = {}
def get_handler_for_model(self, model):
"""
Returns an handler for the given *model*. If the model has not been
registered, it raises a *ModelNotRegistered* exception.
"""
try:
return self._registry[model]
except KeyError:
raise ModelNotRegistered('Model {} is not handled'.format(model))
def get_registered_handlers(self):
""" Returns a list of all the registered handlers. """
return list(self._registry.values())
def register(self, model_or_iterable, handler_class, **kwargs):
"""
Registers a model or a list of models to be handled by *handler_class*.
Once registered, a model gains a new attribute *maat* which can be
used to retrieve an ordered queryset.
Eg:
from djangomaat.register import maat
maat.register(Article, ArticleMaatHandler)
ordered_article_list = Article.maat.ordered_by('popularity')
Plus, the management command `populate_maat_ranking` will
automatically process the model.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model in self._registry:
try:
model_name = model._meta.model_name
except AttributeError:
# Django < 1.6
model_name = model._meta.module_name
raise ModelAlreadyRegistered(
"The model {} is already registered.".format(model_name))
handler = get_handler_instance(model, handler_class, kwargs)
self._registry[model] = handler
contribute_to_class(model)
def unregister(self, model_or_iterable):
""" Do not use it. Just for testing, really. """
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model in self._registry:
del self._registry[model]
def flush(self):
""" Remove all registered models. """
self._registry = {}
maat = MaatRegister()
|
py | 7dff6ebc78640dd16f64df8571adb2693d0afb01 | #!/usr/bin/env python3
import os, pathlib, sys
sys.path.append(os.path.relpath(pathlib.Path(__file__).resolve().parent.parent))
from common import CoordPuzzle, default_main
class MutohTiles(CoordPuzzle):
x_range = -1, 1
y_range = -1, 1
coord_to_object = "colors"
def object_char(self, obj):
if obj == ("Red", 0):
return "O"
if obj == ("Blue", 0):
return "X"
if __name__ == "__main__":
default_main(MutohTiles)
|
py | 7dff6ee85fd3ea64c3474a1adbaeb4bc89986ca6 | from django.contrib import admin
from nonprofit.mailroom.models import Slot
class SlotAdmin(admin.ModelAdmin):
list_display = ('description','forward_to','enabled')
admin.site.register(Slot, SlotAdmin) |
py | 7dff6ee9db8d8e6ae0060641aeab5c9ea57a3c90 | # -*- coding: utf-8 -*-
import unittest
import os, sys
import json
import time
import requests
import subprocess
import shutil
from os import environ
try:
from ConfigParser import ConfigParser # py2
except:
from configparser import ConfigParser # py3
from pprint import pprint
from biokbase.workspace.client import Workspace as workspaceService
from jrb_fastqc.jrb_fastqcImpl import jrb_fastqc
from jrb_fastqc.jrb_fastqcServer import MethodContext
from ReadsUtils.ReadsUtilsClient import ReadsUtils
from jrb_fastqc.authclient import KBaseAuth as _KBaseAuth
class kb_fastqcTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
token = environ.get('KB_AUTH_TOKEN', None)
config_file = environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('jrb_fastqc'):
cls.cfg[nameval[0]] = nameval[1]
authServiceUrl = cls.cfg.get('auth-service-url',
"https://kbase.us/services/authorization/Sessions/Login")
auth_client = _KBaseAuth(authServiceUrl)
user_id = auth_client.get_user(token)
# WARNING: don't call any logging methods on the context object,
# it'll result in a NoneType error
cls.ctx = MethodContext(None)
cls.ctx.update({'token': token,
'user_id': user_id,
'provenance': [
{'service': 'jrb_fastqc',
'method': 'please_never_use_it_in_production',
'method_params': []
}],
'authenticated': 1})
cls.wsURL = cls.cfg['workspace-url']
cls.wsClient = workspaceService(cls.wsURL, token=token)
cls.serviceImpl = jrb_fastqc(cls.cfg)
#retrieve and setup test files
test_fq_filename = "test_1.fastq.gz"
output = subprocess.check_output(["curl",
"-o",
test_fq_filename,
"http://bioseed.mcs.anl.gov/~seaver/Files/Sample_Reads/WT1_S1_L001_R2_001.fastq.gz"])
cls.large_fq_test_file1 = os.path.join(cls.cfg['scratch'], test_fq_filename)
shutil.copy(test_fq_filename, cls.large_fq_test_file1)
fq_filename = "interleaved.fq"
cls.small_fq_test_file2 = os.path.join(cls.cfg['scratch'], fq_filename)
shutil.copy(os.path.join("data", fq_filename), cls.small_fq_test_file2)
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'wsName'):
print("Test run on workspace "+cls.wsName)
cls.wsClient.delete_workspace({'workspace': cls.wsName})
print('Test workspace was deleted')
def getWsClient(self):
return self.__class__.wsClient
def getWsName(self):
if hasattr(self.__class__, 'wsName'):
return self.__class__.wsName
suffix = int(time.time() * 1000)
wsName = "test_jrb_fastqc_" + str(suffix)
self.getWsClient().create_workspace({'workspace': wsName})
self.__class__.wsName = wsName
return wsName
def getImpl(self):
return self.__class__.serviceImpl
def getContext(self):
return self.__class__.ctx
# NOTE: According to Python unittest naming rules test method names should start from 'test'.
def test_local_fastqc(self):
# This assumes, and apparently rightly so, that we're still in the /kb/module/test directory
output = subprocess.check_output(["fastqc", self.large_fq_test_file1])
self.assertTrue("Analysis complete" in output)
pass
def test_fastqc_app(self):
# create ws, and load test reads
wsName = self.getWsName()
ru = ReadsUtils(os.environ['SDK_CALLBACK_URL'])
input_file_ref = ru.upload_reads({'fwd_file': self.small_fq_test_file2,
'sequencing_tech': 'tech1',
'wsname': wsName,
'name': 'reads1',
'interleaved': 1
})['obj_ref']
input_params = {'input_ws': wsName, 'input_file_ref': input_file_ref}
output = self.getImpl().runFastQC(self.getContext(), input_params)[0]
self.assertIn('report_name', output)
self.assertIn('report_ref', output)
# pprint(output)
report = self.getWsClient().get_objects2({'objects': [{'ref': output['report_ref']}]})['data'][0]['data']
# pprint(report)
self.assertIn('direct_html', report)
self.assertIn('file_links', report)
self.assertIn('html_links', report)
self.assertIn('objects_created', report)
self.assertIn('text_message', report)
|
py | 7dff6f8237f870b5b44d2b5bab3bf5cf382bd566 | # coding: utf-8
"""
API v1
DocSpring is a service that helps you fill out and sign PDF templates. # noqa: E501
OpenAPI spec version: v1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class TemplateData(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'expiration_interval': 'str',
'webhook_url': 'str',
'scss': 'str',
'allow_additional_properties': 'bool',
'expire_after': 'float',
'description': 'str',
'public_submissions': 'bool',
'slack_webhook_url': 'str',
'header_html': 'str',
'public_web_form': 'bool',
'editable_submissions': 'bool',
'expire_submissions': 'bool',
'name': 'str',
'footer_html': 'str',
'html': 'str',
'redirect_url': 'str'
}
attribute_map = {
'expiration_interval': 'expiration_interval',
'webhook_url': 'webhook_url',
'scss': 'scss',
'allow_additional_properties': 'allow_additional_properties',
'expire_after': 'expire_after',
'description': 'description',
'public_submissions': 'public_submissions',
'slack_webhook_url': 'slack_webhook_url',
'header_html': 'header_html',
'public_web_form': 'public_web_form',
'editable_submissions': 'editable_submissions',
'expire_submissions': 'expire_submissions',
'name': 'name',
'footer_html': 'footer_html',
'html': 'html',
'redirect_url': 'redirect_url'
}
def __init__(self, expiration_interval=None, webhook_url=None, scss=None, allow_additional_properties=None, expire_after=None, description=None, public_submissions=None, slack_webhook_url=None, header_html=None, public_web_form=None, editable_submissions=None, expire_submissions=None, name=None, footer_html=None, html=None, redirect_url=None): # noqa: E501
"""TemplateData - a model defined in OpenAPI""" # noqa: E501
self._expiration_interval = None
self._webhook_url = None
self._scss = None
self._allow_additional_properties = None
self._expire_after = None
self._description = None
self._public_submissions = None
self._slack_webhook_url = None
self._header_html = None
self._public_web_form = None
self._editable_submissions = None
self._expire_submissions = None
self._name = None
self._footer_html = None
self._html = None
self._redirect_url = None
self.discriminator = None
if expiration_interval is not None:
self.expiration_interval = expiration_interval
self.webhook_url = webhook_url
self.scss = scss
if allow_additional_properties is not None:
self.allow_additional_properties = allow_additional_properties
if expire_after is not None:
self.expire_after = expire_after
self.description = description
if public_submissions is not None:
self.public_submissions = public_submissions
self.slack_webhook_url = slack_webhook_url
self.header_html = header_html
if public_web_form is not None:
self.public_web_form = public_web_form
if editable_submissions is not None:
self.editable_submissions = editable_submissions
if expire_submissions is not None:
self.expire_submissions = expire_submissions
self.name = name
self.footer_html = footer_html
self.html = html
self.redirect_url = redirect_url
@property
def expiration_interval(self):
"""Gets the expiration_interval of this TemplateData. # noqa: E501
:return: The expiration_interval of this TemplateData. # noqa: E501
:rtype: str
"""
return self._expiration_interval
@expiration_interval.setter
def expiration_interval(self, expiration_interval):
"""Sets the expiration_interval of this TemplateData.
:param expiration_interval: The expiration_interval of this TemplateData. # noqa: E501
:type: str
"""
allowed_values = ["minutes", "hours", "days"] # noqa: E501
if expiration_interval not in allowed_values:
raise ValueError(
"Invalid value for `expiration_interval` ({0}), must be one of {1}" # noqa: E501
.format(expiration_interval, allowed_values)
)
self._expiration_interval = expiration_interval
@property
def webhook_url(self):
"""Gets the webhook_url of this TemplateData. # noqa: E501
:return: The webhook_url of this TemplateData. # noqa: E501
:rtype: str
"""
return self._webhook_url
@webhook_url.setter
def webhook_url(self, webhook_url):
"""Sets the webhook_url of this TemplateData.
:param webhook_url: The webhook_url of this TemplateData. # noqa: E501
:type: str
"""
self._webhook_url = webhook_url
@property
def scss(self):
"""Gets the scss of this TemplateData. # noqa: E501
:return: The scss of this TemplateData. # noqa: E501
:rtype: str
"""
return self._scss
@scss.setter
def scss(self, scss):
"""Sets the scss of this TemplateData.
:param scss: The scss of this TemplateData. # noqa: E501
:type: str
"""
self._scss = scss
@property
def allow_additional_properties(self):
"""Gets the allow_additional_properties of this TemplateData. # noqa: E501
:return: The allow_additional_properties of this TemplateData. # noqa: E501
:rtype: bool
"""
return self._allow_additional_properties
@allow_additional_properties.setter
def allow_additional_properties(self, allow_additional_properties):
"""Sets the allow_additional_properties of this TemplateData.
:param allow_additional_properties: The allow_additional_properties of this TemplateData. # noqa: E501
:type: bool
"""
self._allow_additional_properties = allow_additional_properties
@property
def expire_after(self):
"""Gets the expire_after of this TemplateData. # noqa: E501
:return: The expire_after of this TemplateData. # noqa: E501
:rtype: float
"""
return self._expire_after
@expire_after.setter
def expire_after(self, expire_after):
"""Sets the expire_after of this TemplateData.
:param expire_after: The expire_after of this TemplateData. # noqa: E501
:type: float
"""
self._expire_after = expire_after
@property
def description(self):
"""Gets the description of this TemplateData. # noqa: E501
:return: The description of this TemplateData. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this TemplateData.
:param description: The description of this TemplateData. # noqa: E501
:type: str
"""
self._description = description
@property
def public_submissions(self):
"""Gets the public_submissions of this TemplateData. # noqa: E501
:return: The public_submissions of this TemplateData. # noqa: E501
:rtype: bool
"""
return self._public_submissions
@public_submissions.setter
def public_submissions(self, public_submissions):
"""Sets the public_submissions of this TemplateData.
:param public_submissions: The public_submissions of this TemplateData. # noqa: E501
:type: bool
"""
self._public_submissions = public_submissions
@property
def slack_webhook_url(self):
"""Gets the slack_webhook_url of this TemplateData. # noqa: E501
:return: The slack_webhook_url of this TemplateData. # noqa: E501
:rtype: str
"""
return self._slack_webhook_url
@slack_webhook_url.setter
def slack_webhook_url(self, slack_webhook_url):
"""Sets the slack_webhook_url of this TemplateData.
:param slack_webhook_url: The slack_webhook_url of this TemplateData. # noqa: E501
:type: str
"""
self._slack_webhook_url = slack_webhook_url
@property
def header_html(self):
"""Gets the header_html of this TemplateData. # noqa: E501
:return: The header_html of this TemplateData. # noqa: E501
:rtype: str
"""
return self._header_html
@header_html.setter
def header_html(self, header_html):
"""Sets the header_html of this TemplateData.
:param header_html: The header_html of this TemplateData. # noqa: E501
:type: str
"""
self._header_html = header_html
@property
def public_web_form(self):
"""Gets the public_web_form of this TemplateData. # noqa: E501
:return: The public_web_form of this TemplateData. # noqa: E501
:rtype: bool
"""
return self._public_web_form
@public_web_form.setter
def public_web_form(self, public_web_form):
"""Sets the public_web_form of this TemplateData.
:param public_web_form: The public_web_form of this TemplateData. # noqa: E501
:type: bool
"""
self._public_web_form = public_web_form
@property
def editable_submissions(self):
"""Gets the editable_submissions of this TemplateData. # noqa: E501
:return: The editable_submissions of this TemplateData. # noqa: E501
:rtype: bool
"""
return self._editable_submissions
@editable_submissions.setter
def editable_submissions(self, editable_submissions):
"""Sets the editable_submissions of this TemplateData.
:param editable_submissions: The editable_submissions of this TemplateData. # noqa: E501
:type: bool
"""
self._editable_submissions = editable_submissions
@property
def expire_submissions(self):
"""Gets the expire_submissions of this TemplateData. # noqa: E501
:return: The expire_submissions of this TemplateData. # noqa: E501
:rtype: bool
"""
return self._expire_submissions
@expire_submissions.setter
def expire_submissions(self, expire_submissions):
"""Sets the expire_submissions of this TemplateData.
:param expire_submissions: The expire_submissions of this TemplateData. # noqa: E501
:type: bool
"""
self._expire_submissions = expire_submissions
@property
def name(self):
"""Gets the name of this TemplateData. # noqa: E501
:return: The name of this TemplateData. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this TemplateData.
:param name: The name of this TemplateData. # noqa: E501
:type: str
"""
self._name = name
@property
def footer_html(self):
"""Gets the footer_html of this TemplateData. # noqa: E501
:return: The footer_html of this TemplateData. # noqa: E501
:rtype: str
"""
return self._footer_html
@footer_html.setter
def footer_html(self, footer_html):
"""Sets the footer_html of this TemplateData.
:param footer_html: The footer_html of this TemplateData. # noqa: E501
:type: str
"""
self._footer_html = footer_html
@property
def html(self):
"""Gets the html of this TemplateData. # noqa: E501
:return: The html of this TemplateData. # noqa: E501
:rtype: str
"""
return self._html
@html.setter
def html(self, html):
"""Sets the html of this TemplateData.
:param html: The html of this TemplateData. # noqa: E501
:type: str
"""
self._html = html
@property
def redirect_url(self):
"""Gets the redirect_url of this TemplateData. # noqa: E501
:return: The redirect_url of this TemplateData. # noqa: E501
:rtype: str
"""
return self._redirect_url
@redirect_url.setter
def redirect_url(self, redirect_url):
"""Sets the redirect_url of this TemplateData.
:param redirect_url: The redirect_url of this TemplateData. # noqa: E501
:type: str
"""
self._redirect_url = redirect_url
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TemplateData):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 7dff6f8a494b6ebb01bb70dd7ec22d74591d1b70 | """
Send Harry Hillaker a weekly email summarizing the past seven days worth of
RR3 products.
"""
from __future__ import print_function
import os
import datetime
import smtplib
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from pyiem.util import get_dbconn
WFOS = ['KDMX', 'KARX', 'KDVN', 'KFSD', 'KOAX']
def main():
"""Go Main Go"""
pgconn = get_dbconn('afos', user='nobody')
acursor = pgconn.cursor()
now = datetime.datetime.now()
sts = now + datetime.timedelta(days=-7)
sts = sts.replace(hour=0)
acursor.execute("""
SELECT data, source from products where
pil in ('RR3DMX','RR3DVN','RR3ARX','RR3FSD','RR3OAX','RR1FSD')
and entered > '%s' ORDER by entered ASC
""" % (sts.strftime("%Y-%m-%d %H:%M"), ))
files = {}
for wfo in WFOS:
files[wfo] = open('/tmp/%sRR3.txt' % (wfo,), 'w')
for row in acursor:
files[row[1]].write(row[0].replace("\001", ""))
files[row[1]].write("\n")
for wfo in WFOS:
files[wfo].close()
msg = MIMEMultipart()
msg['Subject'] = 'NWS RR3 Data for %s - %s' % (sts.strftime("%d %b %Y"),
now.strftime("%d %b %Y"))
msg['From'] = '[email protected]'
msg['To'] = '[email protected]'
# msg['To'] = 'akrherz@localhost'
msg.preamble = 'RR3 Report'
fn = "RR3-%s-%s.txt" % (sts.strftime("%Y%m%d"), now.strftime("%Y%m%d"))
for wfo in WFOS:
fp = open('/tmp/%sRR3.txt' % (wfo,), 'rb')
b = MIMEBase('Text', 'Plain')
b.set_payload(fp.read())
encoders.encode_base64(b)
fp.close()
b.add_header('Content-Disposition',
'attachment; filename="%s-%s"' % (wfo, fn))
msg.attach(b)
os.unlink('/tmp/%sRR3.txt' % (wfo,))
# Send the email via our own SMTP server.
s = smtplib.SMTP('localhost')
s.sendmail(msg['From'], msg['To'], msg.as_string())
s.quit()
if __name__ == '__main__':
main()
|
py | 7dff701bcb6da4caa0b0c6dee9c01aa8030c1e1b | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
import sys
RPC_INVALID_PARAMETER = -8
class CreditRPCWaitforlogs(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-logevents=1"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def create_contracts_with_logs(self):
contract_addresses = []
send_result = []
block_hashes = []
self.nodes[0].generate(100+COINBASE_MATURITY)
contract_address = self.nodes[0].createcontract("6060604052600d600055341561001457600080fd5b61017e806100236000396000f30060606040526004361061004c576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff168063027c1aaf1461004e5780635b9af12b14610058575b005b61005661008f565b005b341561006357600080fd5b61007960048080359060200190919050506100a1565b6040518082815260200191505060405180910390f35b60026000808282540292505081905550565b60007fc5c442325655248f6bccf5c6181738f8755524172cea2a8bd1e38e43f833e7f282600054016000548460405180848152602001838152602001828152602001935050505060405180910390a17fc5c442325655248f6bccf5c6181738f8755524172cea2a8bd1e38e43f833e7f282600054016000548460405180848152602001838152602001828152602001935050505060405180910390a1816000540160008190555060005490509190505600a165627a7a7230582015732bfa66bdede47ecc05446bf4c1e8ed047efac25478cb13b795887df70f290029")['address']
self.nodes[0].generate(1)
contract_addresses.append(contract_address)
send_result.append(self.nodes[0].sendtocontract(contract_address,"5b9af12b"))
block_hashes.append(self.nodes[0].generate(1))
contract_address = self.nodes[0].createcontract("6060604052341561000f57600080fd5b61029b8061001e6000396000f300606060405260043610610062576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806394e8767d14610067578063b717cfe6146100a6578063d3b57be9146100bb578063f7e52d58146100d0575b600080fd5b341561007257600080fd5b61008860048080359060200190919050506100e5565b60405180826000191660001916815260200191505060405180910390f35b34156100b157600080fd5b6100b961018e565b005b34156100c657600080fd5b6100ce6101a9565b005b34156100db57600080fd5b6100e36101b3565b005b600080821415610117577f30000000000000000000000000000000000000000000000000000000000000009050610186565b5b600082111561018557610100816001900481151561013257fe5b0460010290507f01000000000000000000000000000000000000000000000000000000000000006030600a8481151561016757fe5b06010260010281179050600a8281151561017d57fe5b049150610118565b5b809050919050565b60008081548092919060010191905055506101a76101b3565b565b6101b161018e565b565b7f746f7069632034000000000000000000000000000000000000000000000000007f746f7069632033000000000000000000000000000000000000000000000000007f746f7069632032000000000000000000000000000000000000000000000000007f746f70696320310000000000000000000000000000000000000000000000000060405180807f3700000000000000000000000000000000000000000000000000000000000000815250600101905060405180910390a45600a165627a7a72305820262764914338437fc49c9f752503904820534b24092308961bc10cd851985ae50029")['address']
self.nodes[0].generate(1)
contract_addresses.append(contract_address)
send_result.append(self.nodes[0].sendtocontract(contract_address,"d3b57be9"))
block_hashes.append(self.nodes[0].generate(1))
return contract_addresses, send_result, block_hashes
def check_topics(self, contract_addresses,block_hashes,send_result):
filters = {}
address = []
address.append(contract_addresses[0])
filters["addresses"] = address
topic = []
topic.append("c5c442325655248f6bccf5c6181738f8755524172cea2a8bd1e38e43f833e7f2")
filters["topics"] = topic
ret = self.nodes[0].waitforlogs(COINBASE_MATURITY+102,COINBASE_MATURITY+105,filters)
assert_equal(ret['entries'][0]['blockHash'], block_hashes[0][0])
assert_equal(ret['entries'][0]['blockNumber'], COINBASE_MATURITY+102)
assert_equal(ret['entries'][0]['transactionHash'], send_result[0]['txid'])
assert_equal(ret['entries'][0]['transactionIndex'], 1)
assert_equal(ret['entries'][0]['from'], send_result[0]['hash160'])
assert_equal(ret['entries'][0]['to'], contract_addresses[0])
assert_equal(ret['entries'][0]['gasUsed'], 30183)
assert_equal(ret['entries'][0]['contractAddress'], contract_addresses[0])
assert_equal(ret['entries'][0]['topics'], ["c5c442325655248f6bccf5c6181738f8755524172cea2a8bd1e38e43f833e7f2"])
assert_equal(ret['entries'][0]['data'], "000000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000d0000000000000000000000000000000000000000000000000000000000000000")
assert_equal(ret['count'], 2)
assert_equal(ret['nextblock'], COINBASE_MATURITY+106)
topic.clear()
topic.append("c5c442325655248f6bccf5c6181738f8755524172cea2a8bd1e38e43f833e7f3") #error topics
ret = self.nodes[0].waitforlogs(COINBASE_MATURITY+102,COINBASE_MATURITY+105,filters)
assert_equal(ret,{"entries":[],"count":0,"nextblock":COINBASE_MATURITY+106})
def check_waitforlogs(self, contract_addresses, send_result, block_hashes):
self.nodes[0].sendtocontract(contract_addresses[1],"d3b57be9")
self.nodes[0].generate(10)
ret = self.nodes[0].waitforlogs(COINBASE_MATURITY+104,COINBASE_MATURITY+104)
assert_equal(ret['entries'][0]['blockHash'], block_hashes[1][0])
assert_equal(ret['entries'][0]['blockNumber'], COINBASE_MATURITY+104)
assert_equal(ret['entries'][0]['transactionHash'], send_result[1]['txid'])
assert_equal(ret['entries'][0]['transactionIndex'], 1)
assert_equal(ret['entries'][0]['from'], send_result[1]['hash160'])
assert_equal(ret['entries'][0]['to'], contract_addresses[1])
assert_equal(ret['entries'][0]['gasUsed'], 44071)
assert_equal(ret['entries'][0]['contractAddress'], contract_addresses[1])
assert_equal(ret['entries'][0]['topics'], ["746f706963203100000000000000000000000000000000000000000000000000","746f706963203200000000000000000000000000000000000000000000000000"
,"746f706963203300000000000000000000000000000000000000000000000000","746f706963203400000000000000000000000000000000000000000000000000"])
assert_equal(ret['entries'][0]['data'], "37")
assert_equal(ret['count'], 1)
assert_equal(ret['nextblock'], COINBASE_MATURITY+105)
try:
self.nodes[0].waitforlogs(0,0)
except JSONRPCException as exp:
assert_equal(exp.error["code"], RPC_INVALID_PARAMETER)
try:
self.nodes[0].waitforlogs(1,0)
except JSONRPCException as exp:
assert_equal(exp.error["code"], RPC_INVALID_PARAMETER)
try:
self.nodes[0].waitforlogs(200,100)
except JSONRPCException as exp:
assert_equal(exp.error["code"], RPC_INVALID_PARAMETER)
def run_test(self):
contract_addresses, send_result, block_hashes = self.create_contracts_with_logs()
self.check_waitforlogs(contract_addresses, send_result, block_hashes)
self.check_topics(contract_addresses, block_hashes, send_result)
self.stop_nodes()
self.start_nodes() #start node again
self.check_topics(contract_addresses, block_hashes,send_result)
if __name__ == '__main__':
CreditRPCWaitforlogs().main()
|
py | 7dff72b8f37515984831fd94150257c7bc5c46c3 | import numpy as np
import SlowQuant as HFrun
import slowquant.basissets.BasisSet as BS
import slowquant.hartreefock.DIIS as DIIS
import slowquant.hartreefock.runHartreeFock as HF
from slowquant.hartreefock.HartreeFock import HartreeFock
import slowquant.molecularintegrals.runMolecularIntegrals as MI
from slowquant.molecularintegrals.runMIcython import boysPrun
import slowquant.mollerplesset.runMPn as MP
import slowquant.properties.runProperties as prop
import slowquant.qfit.Qfit as QFIT
import slowquant.integraltransformation.IntegralTransform as UF
## UNIT TESTS
def test_magvec():
check = 5.196152422706632
calc = QFIT.magvec([1,2,3],[4,5,6])
assert abs(check - calc) < 10**-12
def test_centerofcharge():
check1 = 0.1020452034
check2 = 0.162516435
check3 = 0.0
input1 = np.array([[10,0,0,0],[8.0, 0.0, 0.0, 0],[1.0, 1.70075339, 0.0, 0],[1.0, -0.68030136, 1.62516435, 0.0]])
calc1, calc2, calc3 = QFIT.centerofcharge(input1)
assert abs(check1 - calc1) < 10**-8
assert abs(check2 - calc2) < 10**-8
assert abs(check3 - calc3) < 10**-8
def test_solveFit():
check = np.array([-4.0, 4.5])
A = np.array([[1.0,2.0],[3.0,4.0]])
B = np.array([5.0,6.0])
calc = QFIT.solveFit(A,B)
assert abs(check[0] - calc[0]) < 10**-12
assert abs(check[1] - calc[1]) < 10**-12
def test_makepoints():
check = np.array([[ 4.92471737e-16, 0.00000000e+00, -4.02133690e+00,
0.00000000e+00, 0.00000000e+00],
[ 0.00000000e+00, 0.00000000e+00, 4.02133690e+00,
0.00000000e+00, 0.00000000e+00]])
settting = {'Griddensity':0.07, 'vdW scaling':1.4}
input1 = np.array([[10,0,0,0],[8.0, 0.0, 0.0, 0],[1.0, 1.70075339, 0.0, 0],[1.0, -0.68030136, 1.62516435, 0.0]])
calc = QFIT.makepoints(settting, input1)
assert np.sum(np.abs(check-calc)) < 10**-8
def test_boys():
m = [0.5, 13.0, 20.6, 25.0, 64.0, 75.5, 80.3, 4.0, 8.5, 15.3, 1.8, 30, 46.8, 100.0]
x = [6.8, 14.1, 32.4, 6.4, 50.0, 40.8, 78.2, 7.0, 3.6, 20.7, 25.3, 26.0, 37.6, 125.1]
scale = [2,7,14,5,24,20,36,4,3,10,4,13,18,55]
check = [7.34475165333247E-02,
1.56775160456192E-07,
2.17602798734846E-14,
4.28028518677348E-05,
5.67024356263279E-24,
2.63173492081630E-20,
6.35062774057122E-36,
8.03538503977806E-04,
2.31681539108704E-03,
5.40914879973724E-10,
3.45745419193244E-04,
3.57321060811178E-13,
1.91851951160577E-18,
7.75391047694625E-55]
for i in range(0, len(x)):
assert abs(boysPrun(m[i], x[i])-check[i])*10**scale[i] < 10**-8
def test_HartreeFock1():
settings = np.genfromtxt('slowquant/Standardsettings.csv', delimiter = ';', dtype='str')
results = {}
set = {}
for i in range(len(settings)):
set.update({settings[i][0]:settings[i][1]})
input = np.genfromtxt('data/testfiles/inputH2O.csv', delimiter=';')
set['DIIS'] = 'No'
set['basisset'] = 'STO3G'
results['VNN'] = np.load('data/testfiles/enucH2O_STO3G.npy')
results['Te'] = np.load('data/testfiles/EkinH2O_STO3G.npy')
results['S'] = np.load('data/testfiles/overlapH2O_STO3G.npy')
results['VNe'] = np.load('data/testfiles/nucattH2O_STO3G.npy')
results['Vee'] = np.load('data/testfiles/twointH2O_STO3G.npy')
Dcheck = np.genfromtxt('data/testfiles/dH2O_STO3G.csv',delimiter=';')
basis = BS.bassiset(input, set['basisset'])
results = HF.runHartreeFock(input, set, results, print_SCF='No')
D = results['D']
for i in range(0, len(D)):
for j in range(0, len(D)):
assert abs(2*Dcheck[i,j] - D[i,j]) < 10**-7
def test_HartreeFock2():
settings = np.genfromtxt('slowquant/Standardsettings.csv', delimiter = ';', dtype='str')
set = {}
for i in range(len(settings)):
set.update({settings[i][0]:settings[i][1]})
input = np.genfromtxt('data/testfiles/inputCH4.csv', delimiter=';')
set['DIIS'] = 'No'
set['basisset'] = 'STO3G'
results = {}
results['VNN'] = np.load('data/testfiles/enucCH4_STO3G.npy')
results['Te'] = np.load('data/testfiles/EkinCH4_STO3G.npy')
results['S'] = np.load('data/testfiles/overlapCH4_STO3G.npy')
results['VNe'] = np.load('data/testfiles/nucattCH4_STO3G.npy')
results['Vee'] = np.load('data/testfiles/twointCH4_STO3G.npy')
Dcheck = np.genfromtxt('data/testfiles/dCH4_STO3G.csv',delimiter=';')
basis = BS.bassiset(input, set['basisset'])
results = HF.runHartreeFock(input, set, results, print_SCF='No')
D = results['D']
for i in range(0, len(D)):
for j in range(0, len(D)):
assert abs(2*Dcheck[i,j] - D[i,j]) < 10**-7
def test_HartreeFock3():
settings = np.genfromtxt('slowquant/Standardsettings.csv', delimiter = ';', dtype='str')
set = {}
for i in range(len(settings)):
set.update({settings[i][0]:settings[i][1]})
input = np.genfromtxt('data/testfiles/inputH2O.csv', delimiter=';')
set['DIIS'] = 'No'
set['basisset'] = 'DZ'
results = {}
results['VNN'] = np.load('data/testfiles/enucH2O_DZ.npy')
results['Te'] = np.load('data/testfiles/EkinH2O_DZ.npy')
results['S'] = np.load('data/testfiles/overlapH2O_DZ.npy')
results['VNe'] = np.load('data/testfiles/nucattH2O_DZ.npy')
results['Vee'] = np.load('data/testfiles/twointH2O_DZ.npy')
Dcheck = np.genfromtxt('data/testfiles/dH2O_DZ.csv',delimiter=';')
basis = BS.bassiset(input, set['basisset'])
results = HF.runHartreeFock(input, set, results, print_SCF='No')
D = results['D']
for i in range(0, len(D)):
for j in range(0, len(D)):
assert abs(2*Dcheck[i,j] - D[i,j]) < 10**-7
def test_MP2_1():
settings = np.genfromtxt('slowquant/Standardsettings.csv', delimiter = ';', dtype='str')
set = {}
for i in range(len(settings)):
set.update({settings[i][0]:settings[i][1]})
set['basisset'] = 'STO3G'
set['MPn'] = 'MP2'
results = {}
input = np.genfromtxt('data/testfiles/inputCH4.csv', delimiter=';')
basis = BS.bassiset(input, set['basisset'])
results['F'] = np.load('data/testfiles/faoCH4_STO3G.npy')
results['C_MO'] = np.load('data/testfiles/cmoCH4_STO3G.npy')
results['Vee'] = np.load('data/testfiles/twointCH4_STO3G.npy')
results = MP.runMPn(input, results, set)
check = -0.056046676165
assert abs(results['EMP2'] - check) < 10**-7
def test_MP2_2():
settings = np.genfromtxt('slowquant/Standardsettings.csv', delimiter = ';', dtype='str')
set = {}
for i in range(len(settings)):
set.update({settings[i][0]:settings[i][1]})
set['basisset'] = 'DZ'
set['MPn'] = 'MP2'
input = np.genfromtxt('data/testfiles/inputH2O.csv', delimiter=';')
basis = BS.bassiset(input, set['basisset'])
results = {}
results['F'] = np.load('data/testfiles/faoH2O_DZ.npy')
results['C_MO'] = np.load('data/testfiles/cmoH2O_DZ.npy')
results['Vee'] = np.load('data/testfiles/twointH2O_DZ.npy')
results = MP.runMPn(input, results, set)
check = -0.152709879075
assert abs(results['EMP2'] - check) < 10**-7
def test_derivative():
# Tests that a single atom have no geometric gradient
settings = np.genfromtxt('slowquant/Standardsettings.csv', delimiter = ';', dtype='str')
set = {}
results = {}
for i in range(len(settings)):
set.update({settings[i][0]:settings[i][1]})
input = np.array([[8, 0, 0, 0],[8, 0.0, 0.0, 0.0]])
basis = BS.bassiset(input, set['basisset'])
results = MI.rungeometric_derivatives(input, basis, set, results)
VNe = results['1dyVNe']
S = results['1dyS']
Te = results['1dyTe']
VNN = results['1dyVNN']
ERI = results['1dyVee']
assert np.max(np.abs(ERI)) < 10**-12
assert np.max(np.abs(VNN)) < 10**-12
assert np.max(np.abs(Te)) < 10**-12
assert np.max(np.abs(S)) < 10**-12
assert np.max(np.abs(VNe)) < 10**-12
## REGRESSION TESTS
def test_prop():
results = HFrun.run('data/testfiles/inputH2O.csv','data/testfiles/settingsPROP.csv')
check = open('data/testfiles/outPROP.txt','r')
calc = open('out.txt')
for line in check:
if line[0:3] == 'MP2':
checkMP2 = float(line[12:])
if line[0:5] == 'Total':
checkMolDip = float(line[7:])
if line[0:5] == 'Atom1':
checkMulChr = float(line[7:])
for line in calc:
if line[0:3] == 'MP2':
calcMP2 = float(line[12:])
if line[0:5] == 'Total':
calcMolDip = float(line[7:])
if line[0:5] == 'Atom1':
calcMulChr = float(line[7:])
assert checkMP2 == calcMP2
assert checkMolDip == calcMolDip
assert checkMulChr == calcMulChr
def test_qfit():
HFrun.run('data/testfiles/input2_H2O.csv','data/testfiles/settingsQFIT.csv')
check = open('data/testfiles/outQFIT.txt','r')
calc = open('out.txt')
for line in check:
if line[0:4] == 'RMSD':
checkRMSD = float(line[6:])
for line in calc:
if line[0:4] == 'RMSD':
calcRMSD = float(line[6:])
assert checkRMSD == calcRMSD
def test_geoopt():
results = HFrun.run('data/testfiles/H2.csv','data/testfiles/settingsGEO.csv')
e = 0.000001
dp = np.load('data/testfiles/enucp.npy')
dm = np.load('data/testfiles/enucm.npy')
dVNN = results['1dxVNN']
dnVNN = (dp-dm)/(2*e)
assert np.max(np.abs(dVNN-dnVNN)) < 10**-9
e = 0.000001
dp = np.load('data/testfiles/overlapp.npy')
dm = np.load('data/testfiles/overlapm.npy')
dS = results['1dxS']
dnS = (dp-dm)/(2*e)
assert np.max(np.abs(dS-dnS)) < 10**-9
e = 0.000001
dp = np.load('data/testfiles/Ekinp.npy')
dm = np.load('data/testfiles/Ekinm.npy')
dTe = results['1dxTe']
dnTe = (dp-dm)/(2*e)
assert np.max(np.abs(dTe-dnTe)) < 10**-9
e = 0.000001
dp = np.load('data/testfiles/nucattp.npy')
dm = np.load('data/testfiles/nucattm.npy')
dVNe = results['1dxVNe']
dnVNe = (dp-dm)/(2*e)
assert np.max(np.abs(dVNe-dnVNe)) < 10**-9
e = 0.000001
dp = np.load('data/testfiles/twointp.npy')
dm = np.load('data/testfiles/twointm.npy')
dERI = results['1dxVee']
dnERI = (dp-dm)/(2*e)
assert np.max(np.abs(dERI-dnERI)) < 10**-9
def test_UHF():
results = HFrun.run('data/testfiles/inputH2_UHF.csv','data/testfiles/settingsUHF.csv')
check = open('data/testfiles/outUHF.txt','r')
for line in check:
if line[0:2] == '27':
checkUHF = float(line[23:30])
assert abs(checkUHF - results['UHFenergy']) < 10**-4
def test_Lowdin():
HFrun.run('data/testfiles/inputH2O.csv','data/testfiles/settingsLowdin.csv')
check = open('data/testfiles/outLowdin.txt','r')
calc = open('out.txt')
for line in check:
if line[0:5] == 'Atom1':
checkLow = float(line[7:])
for line in calc:
if line[0:5] == 'Atom1':
calcLow = float(line[7:])
assert calcLow == checkLow
def test_Ffunction():
results = HFrun.run('data/testfiles/Hm.csv','data/testfiles/settingFfunctions.csv')
assert results['HFenergy'] + 0.475129018306 < 10**-5
def test_CIS():
results = HFrun.run('data/testfiles/inputH2O.csv','data/testfiles/settingsCIS.csv')
check = [0.2872555,0.2872555,0.2872555,0.344424996,0.344424996,0.344424996,0.356461759,0.365988995,0.365988995,0.365988995,0.394513799,0.394513799,0.394513799,0.416071739,0.505628288,0.514289997,0.514289997,0.514289997,0.555191886,0.563055764,0.563055764,0.563055764,0.655318449,0.910121689,1.108770966,1.108770966,1.108770966,1.200096133,1.200096133,1.200096133,1.300785195,1.325762065,19.95852641,19.95852641,19.95852641,20.01097942,20.01134209,20.01134209,20.01134209,20.05053194]
for i in range(len(results['CIS Exc'])):
assert results['CIS Exc'][i] - check[i] < 10**-6
def test_RPA():
results = HFrun.run('data/testfiles/inputH2O.csv','data/testfiles/settingRPA.csv')
check = [0.285163717,0.285163717,0.285163717,0.299743447,0.299743447,0.299743447,0.352626661,0.352626661,0.352626661,0.354778253,0.365131311,0.365131311,0.365131311,0.415317495,0.50010114,0.510661051,0.510661051,0.510661051,0.546071909,0.546071909,0.546071909,0.551371885,0.650270712,0.873425371,1.103818796,1.103818796,1.103818796,1.195787071,1.195787071,1.195787071,1.283205318,1.323742189,19.95850406,19.95850406,19.95850406,20.01094716,20.01130746,20.01130746,20.01130746,20.05049194]
for i in range(len(results['RPA Exc'])):
assert results['RPA Exc'][i] - check[i] < 10**-6
def test_MP3():
results = HFrun.run('data/testfiles/inputH2.csv','data/testfiles/settingMP3.csv')
print(results)
calc = results['EMP2'] + results['EMP3']
check = -0.0180
assert abs(calc - check) < 10**-5
def test_CCSD():
results = HFrun.run('data/testfiles/inputH2O.csv','data/testfiles/settingCCSD.csv')
check = -0.070680088376-0.000099877272
calc = results['ECCSD']+results['E(T)']
assert abs(calc-check) < 10**-10
def test_GeoOptimization():
# New geometry optimization test, after bug was found
results = HFrun.run('data/testfiles/inputH2O.csv','data/testfiles/settingFullGeoOpt.csv')
check = -74.9658980993
assert abs(check-results['HFenergy']) < 10**-10
def test_GeoOptimizationNUM():
# New geometry optimization test, after bug was found
results = HFrun.run('data/testfiles/inputH2O.csv','data/testfiles/settingsGEONUM.csv')
check = -74.9658980993
assert abs(check-results['HFenergy']) < 10**-9
def test_BOMD():
results = HFrun.run('data/testfiles/inputH2O.csv','data/testfiles/settingBOMD.csv')
check = -75.5667945588
assert abs(check-results['HFenergy']) < 10**-10
|
py | 7dff75101d6b1104a99a52b6a7280abf6fedd7a4 | import unittest
from pathlib import Path
from pdb2sql import interface
from pdb2sql import pdb2sql
from . import pdb_folder
class Test_1_ContactAtoms(unittest.TestCase):
"""Test function get_contact_atoms."""
def setUp(self):
self.pdb = Path(pdb_folder, '3CRO.pdb')
self.db = interface(self.pdb)
def test_get_contact_atoms_default(self):
""""verify get_contact_atoms default."""
contact_atoms = self.db.get_contact_atoms()
self.assertIsInstance(contact_atoms, dict)
self.assertEqual(len(contact_atoms), 2)
self.assertEqual(list(contact_atoms.keys()), ['A', 'B'])
# in pymol `select natoms, chain A within 8.5 of chain B`
# to get the number of contact atoms
self.assertEqual(len(contact_atoms['A']), 341)
self.assertEqual(len(contact_atoms['B']), 333)
def test_get_contact_atoms_cutoff(self):
""""verify get_contact_atoms(cutoff=5.5)"""
cutoff = 5.5
contact_atoms = self.db.get_contact_atoms(cutoff=cutoff)
self.assertIsInstance(contact_atoms, dict)
self.assertEqual(len(contact_atoms), 2)
self.assertEqual(list(contact_atoms.keys()), ['A', 'B'])
self.assertEqual(len(contact_atoms['A']), 185)
self.assertEqual(len(contact_atoms['B']), 174)
def test_get_contact_atoms_allchains(self):
""""verify get_contact_atoms(allchains=True)"""
contact_atoms = self.db.get_contact_atoms(allchains=True)
self.assertIsInstance(contact_atoms, dict)
self.assertEqual(len(contact_atoms), 4)
self.assertEqual(list(contact_atoms.keys()), ['A', 'B', 'L', 'R'])
self.assertEqual(len(contact_atoms['A']), 367)
self.assertEqual(len(contact_atoms['B']), 372)
self.assertEqual(len(contact_atoms['L']), 314)
self.assertEqual(len(contact_atoms['R']), 304)
def test_get_contact_atoms_chain1chain2(self):
""""verify get_contact_atoms(chain1='L', chain2='R')"""
contact_atoms = self.db.get_contact_atoms(chain1='L', chain2='R')
self.assertIsInstance(contact_atoms, dict)
self.assertEqual(len(contact_atoms), 2)
self.assertEqual(list(contact_atoms.keys()), ['L', 'R'])
self.assertEqual(len(contact_atoms['L']), 132)
self.assertEqual(len(contact_atoms['R']), 132)
def test_get_contact_atoms_extend2residue(self):
""""verify get_contact_atoms(extend_to_residue=True)"""
contact_atoms = self.db.get_contact_atoms(extend_to_residue=True)
self.assertIsInstance(contact_atoms, dict)
self.assertEqual(len(contact_atoms), 2)
self.assertEqual(list(contact_atoms.keys()), ['A', 'B'])
# in pymol `select natoms, byres(chain A within 8.5 of chain B)`
# to get the number of contact atoms
self.assertEqual(len(contact_atoms['A']), 405)
self.assertEqual(len(contact_atoms['B']), 409)
def test_get_contact_atoms_onlybackbone_NA(self):
""""verify get_contact_atoms(extend_to_residue=True) for nuclear
acids."""
with self.assertWarns(UserWarning) as ex:
contact_atoms = self.db.get_contact_atoms(only_backbone_atoms=True)
self.assertEqual(len(ex.warnings), 1)
self.assertEqual(ex.warning.args[0],
'No contact atoms detected in pdb2sql')
self.assertIsInstance(contact_atoms, dict)
self.assertEqual(len(contact_atoms), 2)
self.assertEqual(list(contact_atoms.keys()), ['A', 'B'])
self.assertEqual(len(contact_atoms['A']), 0)
self.assertEqual(len(contact_atoms['B']), 0)
def test_get_contact_atoms_onlybackbone_protein(self):
""""verify get_contact_atoms(extend_to_residue=True) for proteins."""
contact_atoms = self.db.get_contact_atoms(
only_backbone_atoms=True,
chain1='L',
chain2='R'
)
self.assertIsInstance(contact_atoms, dict)
self.assertEqual(len(contact_atoms), 2)
self.assertEqual(list(contact_atoms.keys()), ['L', 'R'])
# pymol `select catoms, (chain L and name CA+C+N+O)
# within 8.5 of (chain R and name CA+C+N+O)`
self.assertEqual(len(contact_atoms['L']), 22)
self.assertEqual(len(contact_atoms['R']), 20)
def test_get_contact_atoms_exludeH(self):
""""verify get_contact_atoms(excludeH=True)"""
pdb = Path(pdb_folder, '3CRO_H.pdb')
db = interface(pdb)
contact_atoms = db.get_contact_atoms(excludeH=True)
self.assertIsInstance(contact_atoms, dict)
self.assertEqual(len(contact_atoms), 2)
self.assertEqual(list(contact_atoms.keys()), ['A', 'B'])
self.assertEqual(len(contact_atoms['A']), 341)
self.assertEqual(len(contact_atoms['B']), 333)
def test_get_contact_atoms_contactpairs(self):
""""verify get_contact_atoms(return_conact_pairs=True)"""
contact_atoms = self.db.get_contact_atoms(
return_contact_pairs=True
)
self.assertIsInstance(contact_atoms, dict)
self.assertEqual(len(contact_atoms), 341)
for i in contact_atoms.keys():
with self.subTest(i=i):
self.assertIsInstance(contact_atoms[i], list)
self.assertNotEqual(len(contact_atoms[i]), 0)
self.assertEqual(len(contact_atoms[6]), 1)
self.assertEqual(len(contact_atoms[404]), 19)
def test_get_contact_atoms_alltrue(self):
""""verify get_contact_atoms(True)"""
pdb = Path(pdb_folder, '3CRO_H.pdb')
db = interface(pdb)
contact_atoms = db.get_contact_atoms(
allchains=True,
extend_to_residue=True,
only_backbone_atoms=True,
excludeH=True)
self.assertIsInstance(contact_atoms, dict)
self.assertEqual(len(contact_atoms), 4)
self.assertEqual(list(contact_atoms.keys()), ['A', 'B', 'L', 'R'])
# pymol `select catoms, name CA+C+N+O and byres((chain L and name CA+C+N+O )
# within 8.5 of (chain R and name CA+C+N+O))`
self.assertEqual(len(contact_atoms['A']), 0)
self.assertEqual(len(contact_atoms['B']), 0)
self.assertEqual(len(contact_atoms['L']), 36)
self.assertEqual(len(contact_atoms['R']), 32)
class Test_2_ContactResidues(unittest.TestCase):
"""test get_contact_residues function."""
def setUp(self):
self.pdb = Path(pdb_folder, '3CRO.pdb')
self.db = interface(self.pdb)
def test_get_contact_residues_default(self):
""""verify get_contact_residues default."""
contact_residues = self.db.get_contact_residues()
self.assertIsInstance(contact_residues, dict)
self.assertEqual(len(contact_residues), 2)
self.assertEqual(list(contact_residues.keys()), ['A', 'B'])
# in pymol:
# select natoms, chain A within 8.5 of chain B
# stored.nres = set()
# iterate (natoms), stored.nres.add((chain, resi, resn))
# print(len(stored.nres))
self.assertEqual(len(contact_residues['A']), 20)
self.assertEqual(len(contact_residues['B']), 20)
def test_get_contact_residues_cutoff(self):
""""verify get_contact_residues(cutoff=5.5)"""
cutoff = 5.5
contact_residues = self.db.get_contact_residues(cutoff=cutoff)
self.assertIsInstance(contact_residues, dict)
self.assertEqual(len(contact_residues), 2)
self.assertEqual(list(contact_residues.keys()), ['A', 'B'])
self.assertEqual(len(contact_residues['A']), 20)
self.assertEqual(len(contact_residues['B']), 20)
def test_get_contact_residues_allchains(self):
""""verify get_contact_residues(allchains=True)"""
contact_residues = self.db.get_contact_residues(allchains=True)
self.assertIsInstance(contact_residues, dict)
self.assertEqual(len(contact_residues), 4)
self.assertEqual(list(contact_residues.keys()), ['A', 'B', 'L', 'R'])
self.assertEqual(len(contact_residues['A']), 20)
self.assertEqual(len(contact_residues['B']), 20)
self.assertEqual(len(contact_residues['L']), 47)
self.assertEqual(len(contact_residues['R']), 48)
def test_get_contact_residues_chain1chain2(self):
""""verify get_contact_residues(chain1='L', chain2='R')"""
contact_residues = self.db.get_contact_residues(chain1='L', chain2='R')
self.assertIsInstance(contact_residues, dict)
self.assertEqual(len(contact_residues), 2)
self.assertEqual(list(contact_residues.keys()), ['L', 'R'])
self.assertEqual(len(contact_residues['L']), 20)
self.assertEqual(len(contact_residues['R']), 23)
def test_get_contact_residues_exludeH(self):
""""verify get_contact_residues(excludeH=True)"""
pdb = Path(pdb_folder, '3CRO_H.pdb')
db = interface(pdb)
contact_residues = db.get_contact_residues(
allchains=True, excludeH=True)
self.assertIsInstance(contact_residues, dict)
self.assertEqual(len(contact_residues), 4)
self.assertEqual(list(contact_residues.keys()), ['A', 'B', 'L', 'R'])
self.assertEqual(len(contact_residues['A']), 20)
self.assertEqual(len(contact_residues['B']), 20)
self.assertEqual(len(contact_residues['L']), 47)
self.assertEqual(len(contact_residues['R']), 48)
def test_get_contact_residues_onlybackbone_NA(self):
""""verify get_contact_residues(only_backbone_atoms=True) for NA."""
with self.assertWarns(UserWarning) as ex:
contact_residues = self.db.get_contact_residues(
only_backbone_atoms=True)
self.assertEqual(len(ex.warnings), 1)
self.assertEqual(ex.warning.args[0],
'No contact atoms detected in pdb2sql')
self.assertIsInstance(contact_residues, dict)
self.assertEqual(len(contact_residues), 2)
self.assertEqual(list(contact_residues.keys()), ['A', 'B'])
# pymol `select catoms, (chain L and name CA+C+N+O)
# within 8.5 of (chain R and name CA+C+N+O)`
self.assertEqual(len(contact_residues['A']), 0)
self.assertEqual(len(contact_residues['B']), 0)
def test_get_contact_residues_onlybackbone_protein(self):
""""verify get_contact_residues(only_backbone_atoms=True) for
proteins."""
contact_residues = self.db.get_contact_residues(
only_backbone_atoms=True,
chain1='L',
chain2='R'
)
self.assertIsInstance(contact_residues, dict)
self.assertEqual(len(contact_residues), 2)
self.assertEqual(list(contact_residues.keys()), ['L', 'R'])
# pymol `select catoms, (chain L and name CA+C+N+O)
# within 8.5 of (chain R and name CA+C+N+O)`
self.assertEqual(len(contact_residues['L']), 9)
self.assertEqual(len(contact_residues['R']), 8)
def test_get_contact_residues_contactpairs(self):
""""verify get_contact_residues(return_conact_pairs=True)"""
contact_residues = self.db.get_contact_residues(
chain1='L', chain2='R', return_contact_pairs=True)
self.assertIsInstance(contact_residues, dict)
self.assertEqual(len(contact_residues), 20)
for i in contact_residues.keys():
with self.subTest(i=i):
self.assertIsInstance(contact_residues[i], list)
self.assertNotEqual(len(contact_residues[i]), 0)
# in pymol:
# select natoms, (chain R) within 8.5 of (chain L and resi 60)
self.assertEqual(len(contact_residues[('L', 60, 'GLN')]), 3)
def test_get_contact_residues_alltrue(self):
""""verify get_contact_residues(True)"""
pdb = Path(pdb_folder, '3CRO_H.pdb')
db = interface(pdb)
contact_residues = db.get_contact_residues(
allchains=True, only_backbone_atoms=True, excludeH=True)
self.assertIsInstance(contact_residues, dict)
self.assertEqual(len(contact_residues), 4)
self.assertEqual(list(contact_residues.keys()), ['A', 'B', 'L', 'R'])
self.assertEqual(len(contact_residues['A']), 0)
self.assertEqual(len(contact_residues['B']), 0)
self.assertEqual(len(contact_residues['L']), 9)
self.assertEqual(len(contact_residues['R']), 8)
class Test_3_PDB2SQLInstanceInput(unittest.TestCase):
"""test using pdb2sql instance as input"""
def setUp(self):
self.pdb = Path(pdb_folder, '3CRO.pdb')
def test_get_contact_residues_default(self):
""""verify get_contact_residues default."""
pdb_db = pdb2sql(self.pdb)
self.db = interface(pdb_db)
contact_residues = self.db.get_contact_residues()
self.assertIsInstance(contact_residues, dict)
self.assertEqual(len(contact_residues), 2)
self.assertEqual(list(contact_residues.keys()), ['A', 'B'])
self.assertEqual(len(contact_residues['A']), 20)
self.assertEqual(len(contact_residues['B']), 20)
def test_database_consistency(self):
""""verify initilizing interface with updated pdb2sql database"""
pdb_db = pdb2sql(self.pdb)
pdb_db.update_column('temp', [99]*10)
target = pdb_db.get('*')
self.db = interface(pdb_db)
result = self.db.get('*')
self.assertEqual(target, result)
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
py | 7dff754e629cb18421ee61fefee6c51fb004ffd2 | if __name__ == '__main__':
n = int(input())
num = 1
while num <= n:
print(num, end='')
num += 1
|
py | 7dff75c5399c3c9e160985fd2bf7943882a1d067 | from django.db import models
from django.conf import settings
class Activity(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
device_id = models.PositiveIntegerField()
activity_type = models.CharField(max_length=50, default="standard")
active = models.BooleanField(default=True)
time_started = models.DateTimeField(auto_now=False, auto_now_add=True)
time_ended = models.DateTimeField(auto_now=False, auto_now_add=False, blank=True, null=True)
class GitRepo(models.Model):
name = models.TextField()
activity = models.ForeignKey("Activity", on_delete=models.CASCADE)
time_added = models.DateTimeField(auto_now=False, auto_now_add=True)
class GitCommit(models.Model):
name = models.TextField()
activity = models.ForeignKey("Activity", on_delete=models.CASCADE)
time_added = models.DateTimeField(auto_now=False, auto_now_add=True)
|
py | 7dff76cc01672c3446eb9c2e376b17b4a80ebd1f | import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
coref_op_library = tf.load_op_library("./coref_kernels.so")
spans = coref_op_library.spans
tf.NotDifferentiable("Spans")
antecedents = coref_op_library.antecedents
tf.NotDifferentiable("Antecedents")
extract_mentions = coref_op_library.extract_mentions
tf.NotDifferentiable("ExtractMentions")
distance_bins = coref_op_library.distance_bins
tf.NotDifferentiable("DistanceBins")
|
py | 7dff774b17bde31998d9915c9ab647bbf945d368 | #!/usr/bin/env python3
#
# Copyright 2021 Graviti. Licensed under MIT License.
#
"""Method check_catalog.
:meth:`check_catalog` checks the catalog of :class:`~tensorbay.dataset.dataset.Dataset`
or :class:`~tensorbay.dataset.dataset.FusionDataset`,
including subcatalog, categories and attributes.
For :class:`~tensorbay.label.attributes.AttributeInfo`,
it finds errors in fields such as 'type', 'enum', 'range' and 'parent categories'.
"""
from typing import Iterator, Optional, Tuple
from ..label import AttributeInfo, Catalog, CategoryInfo
from ..utility import NameList
from .pipeline import PipelineForIterable
from .report import Error
class AttributeInfoError(Error):
"""This class defines :class:`AttributeInfoError`.
Arguments:
name: The name of the attribute which has error.
"""
def __init__(self, name: str) -> None:
self._name = name
ATTRIBUTE_INFO_PIPELINE: PipelineForIterable[
AttributeInfo, AttributeInfoError
] = PipelineForIterable()
def check_catalog(catalog: Catalog) -> Iterator[Tuple[str, AttributeInfoError]]:
"""The health check method for :class:`~tensorbay.label.catalog.Catalog`.
Arguments:
catalog: The :class:`~tensorbay.label.catalog.Catalog` needs to be checked.
Yields:
The label type and :class:`AttributeInfoError` indicating that
:class:`~tensorbay.label.attributes.AttributeInfo` has invalid 'type', 'enum', 'range'
or 'parent categories' field.
"""
for key in catalog._attrs_fields: # pylint: disable=protected-access
subcatalog = getattr(catalog, key, None)
if not subcatalog:
continue
categories = getattr(subcatalog, "categories", None)
if hasattr(subcatalog, "attributes"):
attribute_info_pipeline = ATTRIBUTE_INFO_PIPELINE.copy()
attribute_info_pipeline.register(CheckParentCategories(categories))
for error in attribute_info_pipeline(subcatalog.attributes.values()):
yield key, error
class InvalidTypeError(AttributeInfoError):
"""The health check class for invalid.
This error is raised to indicate that
:class:`~tensorbay.label.attributes.AttributeInfo` has invalid 'type' field.
"""
def __str__(self) -> str:
return f'AttributeInfo "{self._name}": "type" field is invalid'
@ATTRIBUTE_INFO_PIPELINE.register
def check_invalid_type(attribute_info: AttributeInfo) -> Iterator[InvalidTypeError]:
"""The health check method for invalid type.
:class:`~tensorbay.label.attributes.AttributeInfo` 'type' field.
Arguments:
attribute_info: The :class:`~tensorbay.label.attributes.AttributeInfo` needs to be checked.
Yields:
:class:`InvalidTypeError` indicating that
:class:`~tensorbay.label.attributes.AttributeInfo` has invalid 'type' field.
"""
if not hasattr(attribute_info, "type"):
return
type_ = attribute_info.type
if type_ == "null":
yield InvalidTypeError(attribute_info.name)
return
if not isinstance(type_, list):
return
length = len(type_)
if length in (0, 1):
yield InvalidTypeError(attribute_info.name)
return
if len(set(type_)) != length:
yield InvalidTypeError(attribute_info.name)
class InvalidEnumError(AttributeInfoError):
"""The health check class for invalid enum.
This error is raised to indicate that
:class:`~tensorbay.label.attributes.AttributeInfo` has invalid 'enum' field.
"""
def __str__(self) -> str:
return f'AttributeInfo "{self._name}": "enum" field is invalid'
@ATTRIBUTE_INFO_PIPELINE.register
def check_invalid_enum(attribute_info: AttributeInfo) -> Iterator[InvalidEnumError]:
"""The health check method for invalid enum.
:class:`~tensorbay.label.attributes.AttributeInfo` 'enum' field.
Arguments:
attribute_info: The :class:`~tensorbay.label.attributes.AttributeInfo` needs to be checked.
Yields:
:class:`InvalidEnumError` indicating that
:class:`~tensorbay.label.attributes.AttributeInfo` has invalid 'enum' field.
"""
if not hasattr(attribute_info, "enum"):
return
enum = attribute_info.enum
length = len(enum)
if length in (0, 1):
yield InvalidEnumError(attribute_info.name)
return
if len(set(enum)) != length:
yield InvalidEnumError(attribute_info.name)
class NeitherTypeNorEnumError(AttributeInfoError):
"""The health check class for either type enum.
This error is raised to indicate
:class:`~tensorbay.label.attributes.AttributeInfo` has neither 'enum' nor 'type'.
"""
def __str__(self) -> str:
return f'AttributeInfo "{self._name}": Neither "type" nor "enum" field exists'
@ATTRIBUTE_INFO_PIPELINE.register
def check_neither_type_nor_enum(attribute_info: AttributeInfo) -> Iterator[NeitherTypeNorEnumError]:
"""The health check method for :class:`~tensorbay.label.attributes.AttributeInfo`.
which has neither 'enum' nor 'type' field.
Arguments:
attribute_info: The :class:`~tensorbay.label.attributes.AttributeInfo` needs to be checked.
Yields:
:class:`NeitherTypeNorEnumError` indicating that
:class:`~tensorbay.label.attributes.AttributeInfo` has neither 'enum' nor 'type' field.
"""
if not hasattr(attribute_info, "enum") and not hasattr(attribute_info, "type"):
yield NeitherTypeNorEnumError(attribute_info.name)
class RedundantTypeError(AttributeInfoError):
"""The health check class for redundant type error.
This error is raised to indicate that
:class:`~tensorbay.label.attributes.AttributeInfo` has both 'enum' and 'type'.
"""
def __str__(self) -> str:
return f'AttributeInfo "{self._name}": "type" field is redundant when "enum" field exists'
@ATTRIBUTE_INFO_PIPELINE.register
def check_redundant_type(attribute_info: AttributeInfo) -> Iterator[RedundantTypeError]:
"""The health check method for redundant type.
:class:`~tensorbay.label.attributes.AttributeInfo`
which has both 'enum' and 'type' field.
Arguments:
attribute_info: The :class:`~tensorbay.label.attributes.AttributeInfo` needs to be checked.
Yields:
:class:`RedundantTypeError` indicating that
:class:`~tensorbay.label.attributes.AttributeInfo` has both 'enum' and 'type' field.
"""
if hasattr(attribute_info, "enum") and hasattr(attribute_info, "type"):
yield RedundantTypeError(attribute_info.name)
class RangeNotSupportError(AttributeInfoError):
"""The health check class for range not support error.
This error is raised to indicate :class:`~tensorbay.label.attributes.AttributeInfo`
has range for non number type.
"""
def __str__(self) -> str:
return f'AttributeInfo "{self._name}": Only "number" and "integer" type supports range'
@ATTRIBUTE_INFO_PIPELINE.register
def check_range_not_support(attribute_info: AttributeInfo) -> Iterator[RangeNotSupportError]:
"""The health check method for range not support.
:class:`~tensorbay.label.attributes.AttributeInfo` which has range for non number type.
Arguments:
attribute_info: The :class:`~tensorbay.label.attributes.AttributeInfo` needs to be checked.
Yields:
:class:`RangeNotSupportError` indicating that
:class:`~tensorbay.label.attributes.AttributeInfo` has range for non number type.
"""
if not hasattr(attribute_info, "maximum") and not hasattr(attribute_info, "minimum"):
return
type_ = getattr(attribute_info, "type", None)
if isinstance(type_, list):
if "number" in type_:
return
if "integer" in type_:
return
elif type_ in ("number", "integer"):
return
yield RangeNotSupportError(attribute_info.name)
class InvalidRangeError(AttributeInfoError):
"""The health check class for invalid range error.
This error is raised to indicate that
:class:`~tensorbay.label.attributes.AttributeInfo` has invalid range.
"""
def __str__(self) -> str:
return f'AttributeInfo "{self._name}": Maximum is not larger than minimum'
@ATTRIBUTE_INFO_PIPELINE.register
def check_invalid_range(attribute_info: AttributeInfo) -> Iterator[InvalidRangeError]:
"""The health check method for invalid range.
:class:`~tensorbay.label.attributes.AttributeInfo`
which has invalid range.
Arguments:
attribute_info: The :class:`~tensorbay.label.attributes.AttributeInfo` needs to be checked.
Yields:
:class:`InvalidRangeError` indicating that
:class:`~tensorbay.label.attributes.AttributeInfo` has invalid range.
"""
if attribute_info.maximum is None or attribute_info.minimum is None:
return
if attribute_info.maximum > attribute_info.minimum:
return
yield InvalidRangeError(attribute_info.name)
class InvalidParentCategories(AttributeInfoError):
"""The health check class for invalid parent categories.
This error is raised to indicate that :class:`~tensorbay.label.attributes.AttributeInfo`
has invalid parent categories.This means the category in parent_categories
cannot be found in Subcatalog.categories.
Arguments:
name: The name of the incorrect attribute.
invalid_parent_category: The name of the incorrect parent_category.
"""
def __init__(self, name: str, invalid_parent_category: str) -> None:
super().__init__(name)
self._invalid_parent_category = invalid_parent_category
def __str__(self) -> str:
return (
f'AttributeInfo "{self._name}":'
f'parent category "{self._invalid_parent_category}" is invalid'
)
class CheckParentCategories:
"""The health check class for parent categories.
This error is raised to indicate that :class:`~tensorbay.label.attributes.AttributeInfo`
has invalid parent_categories.
Arguments:
categories: The dictionary of :class:`~tensorbay.label.supports.CategoryInfo`
which indicates all valid parent categories.
"""
def __init__(self, categories: Optional[NameList[CategoryInfo]]) -> None:
self._keys = set(categories.keys()) if categories else set()
def __call__(self, attribute_info: AttributeInfo) -> Iterator[InvalidParentCategories]:
"""The health check method for parent categories.
:class:`~tensorbay.label.attributes.AttributeInfo`
which has invalid parent categories.
Arguments:
attribute_info: :class:`~tensorbay.label.attributes.AttributeInfo` needs to be checked.
Yields:
:class:`InvalidParentCategories` indicating that
:class:`~tensorbay.label.attributes.AttributeInfo` has invalid parent categories.
"""
for parent_category in attribute_info.parent_categories:
if parent_category not in self._keys:
yield InvalidParentCategories(attribute_info.name, parent_category)
|
py | 7dff77a986c76c9d7b4482a12f6205434db85dbb | import time
from collections import deque
from math import ceil
start = time.time()
with open("18.txt") as f:
rawInput = f.read().splitlines()
# part 1
class Pair:
def __init__(self, nestLevel, parent):
self.nestLevel = nestLevel
self.parent = parent
self.left = None
self.right = None
def setLeft(self, left):
self.left = left
def setRight(self, right):
self.right = right
def incrementLevel(self):
self.nestLevel += 1
if type(self.left) is Pair:
self.left.incrementLevel()
if type(self.right) is Pair:
self.right.incrementLevel()
def checkDeepestLevel(self):
if type(self.left) is Pair:
if type(self.right) is Pair:
return max(self.left.checkDeepestLevel(), self.right.checkDeepestLevel())
else:
return self.left.checkDeepestLevel()
elif type(self.right) is Pair:
return self.right.checkDeepestLevel()
else:
return self.nestLevel
def setParent(self, parent):
self.parent = parent
def incrementLeft(self, value):
if type(self.left) is Pair:
self.left.incrementRightRecursive(value)
else:
self.left += value
def incrementRightRecursive(self, value):
if type(self.right) is Pair:
self.right.incrementRightRecursive(value)
else:
self.right += value
def incrementRight(self, value):
if type(self.right) is Pair:
self.right.incrementLeftRecursive(value)
else:
self.right += value
def incrementLeftRecursive(self, value):
if type(self.left) is Pair:
self.left.incrementLeftRecursive(value)
else:
self.left += value
def parsePair(q, nestLevel, parent):
pair = Pair(nestLevel, parent)
q.popleft()
nextChar = q[0]
if (nextChar == '['):
pair.setLeft(parsePair(q, nestLevel + 1, pair))
elif (nextChar.isdigit()):
q.popleft()
pair.setLeft(int(nextChar))
else:
print("ERROR")
q.popleft()
nextChar = q[0]
if (nextChar == '['):
pair.setRight(parsePair(q, nestLevel + 1, pair))
elif (nextChar.isdigit()):
q.popleft()
pair.setRight(int(nextChar))
else:
print("ERROR")
q.popleft()
return pair
def addPair(num1, num2):
addedPair = Pair(0, None)
num1.incrementLevel()
num2.incrementLevel()
num1.setParent(addedPair)
num2.setParent(addedPair)
addedPair.setLeft(num1)
addedPair.setRight(num2)
return addedPair
def explodePairLeft(pair):
parent = pair.parent
num = pair.left
found = False
while (parent is not None) and not found:
if pair == parent.right:
found = True
parent.incrementLeft(num)
else:
pair = parent
parent = pair.parent
def explodePairRight(pair):
parent = pair.parent
num = pair.right
found = False
while (parent is not None) and not found:
if pair == parent.left:
found = True
parent.incrementRight(num)
else:
pair = parent
parent = pair.parent
def explodePair(pair):
explodePairLeft(pair)
explodePairRight(pair)
if pair.parent.right == pair:
pair.parent.right = 0
else:
pair.parent.left = 0
def findLevel4Pair(pair):
if pair.nestLevel == 4:
return pair
else:
if type(pair.left) is Pair:
leftPair = findLevel4Pair(pair.left)
if leftPair:
return leftPair
else:
if type(pair.right) is Pair:
return findLevel4Pair(pair.right)
else:
return None
else:
if type(pair.right) is Pair:
return findLevel4Pair(pair.right)
else:
return None
def findPairToSplit(pair):
if type(pair.left) is Pair:
leftPair = findPairToSplit(pair.left)
if leftPair:
return leftPair
if type(pair.right) is Pair:
return findPairToSplit(pair.right)
if pair.right >= 10:
return pair
return None
else:
if pair.left >= 10:
return pair
if type(pair.right) is Pair:
return findPairToSplit(pair.right)
else:
if pair.right >= 10:
return pair
else:
return None
def splitPair(pair):
if type(pair.left) is not Pair and pair.left >= 10:
newPair = Pair(pair.nestLevel + 1, pair)
newPair.setLeft(pair.left // 2)
newPair.setRight(ceil(pair.left / 2))
pair.setLeft(newPair)
return
else:
newPair = Pair(pair.nestLevel + 1, pair)
newPair.setLeft(pair.right // 2)
newPair.setRight(ceil(pair.right / 2))
pair.setRight(newPair)
def reducePair(pair):
pairtoExplode = findLevel4Pair(pair)
while (pairtoExplode):
explodePair(pairtoExplode)
pairtoExplode = findLevel4Pair(pair)
pairToSplit = findPairToSplit(pair)
if pairToSplit:
splitPair(pairToSplit)
reducePair(pair)
def calcMag(pair):
if type(pair.left) is int:
leftMag = pair.left
else:
leftMag = calcMag(pair.left)
if type(pair.right) is int:
rightMag = pair.right
else:
rightMag = calcMag(pair.right)
return (3 * leftMag) + (2 * rightMag)
line1 = deque(rawInput[0])
addedNum = parsePair(line1, 0, None)
for line in rawInput[1:]:
lineQ = deque(line)
lineNum = parsePair(lineQ, 0, None)
addedNum = addPair(addedNum, lineNum)
reducePair(addedNum)
print(calcMag(addedNum))
# part 2
largestMagSoFar = -1
def MagFrom2NumSum(str1, str2):
num1 = parsePair(deque(str1), 0, None)
num2 = parsePair(deque(str2), 0, None)
addedNum = addPair(num1, num2)
reducePair(addedNum)
return calcMag(addedNum)
for i in range(len(rawInput)):
for j in range(i + 1, len(rawInput)):
curMag1 = MagFrom2NumSum(rawInput[i], rawInput[j])
curMag2 = MagFrom2NumSum(rawInput[j], rawInput[i])
largestMagSoFar = max(largestMagSoFar, curMag1, curMag2)
print(largestMagSoFar)
end = time.time()
print(end - start)
|
py | 7dff79f55796846e952a567e50531a7e2550ba59 | #time_plotter.py
import numpy as np
import pandas as pd
import glob
import os
import sys
import matplotlib.pyplot as plt
def get_data(file, column):
df = pd.read_csv(file, header=None)
return df.groupby(column).mean()
def save_plot(df, file):
plt.plot(range(len(df.iloc[:,0])), df.iloc[:,0], linewidth=2.0)
plt.savefig(os.path.splitext(file)[0] + ".png")
def main():
input_length = len(sys.argv) #saves length of command line input
if input_length <= 2:
print ("please input filename and column to divide with") #gives error message for lack of input
else:
regex = sys.argv[1] #saves filename regex
file_list = glob.glob(regex) #saves list of filenames
i = 1 #variable for saving current position in list
column = int(sys.argv[2])
for file in file_list:
output = get_data(file, column)
save_plot(output, file)
if __name__ == "__main__":
main()
|
py | 7dff7a7e089399799ba8fb1d7e60d50b5a0ad250 |
# Генератор переменных для инвентори ансибла из ip-плана.
import re
import csv
from ansbl_inv_templates import template_common_router
with open('ip-plan-vpn.csv', 'r', encoding='utf-8') as f_plan:
reader = csv.reader(f_plan, delimiter = ";")
for row in reader:
point_name = 'other_entry'
if row[8] == 'VPN офис':
point_name = 'office_border_r'
if row[8] == 'VPN НРКП':
point_name = 'nrkp_border_r'
with open(point_name + row[2], 'w') as f_var:
f_var.write(template_common_router.render({'ip_conn':row[6]}))
# The End
|
py | 7dff7ad5b4da4a8729f9e5fa90e72a53a5a78ef2 | # Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base Configuration."""
import ml_collections
NUM_EPOCHS = 200
TRAIN_EXAMPLES = 160000
VALID_EXAMPLES = 20000
def get_config():
"""Get the default hyperparameter configuration."""
config = ml_collections.ConfigDict()
config.batch_size = 64
config.eval_frequency = TRAIN_EXAMPLES // config.batch_size
config.num_train_steps = (TRAIN_EXAMPLES // config.batch_size) * NUM_EPOCHS
config.num_eval_steps = VALID_EXAMPLES // config.batch_size
config.weight_decay = 0.
config.grad_clip_norm = 1.
config.save_checkpoints = True
config.restore_checkpoints = True
config.checkpoint_freq = (TRAIN_EXAMPLES //
config.batch_size) * NUM_EPOCHS // 2
config.random_seed = 0
config.learning_rate = .001
config.factors = 'constant * linear_warmup * cosine_decay'
config.warmup = (TRAIN_EXAMPLES // config.batch_size) * 1
config.steps_per_cycle = (TRAIN_EXAMPLES // config.batch_size) * NUM_EPOCHS
# model params
config.model = ml_collections.ConfigDict()
config.model.num_layers = 1
config.model.num_heads = 2
config.model.emb_dim = 32
config.model.dropout_rate = 0.1
config.model.qkv_dim = config.model.emb_dim // 2
config.model.mlp_dim = config.model.qkv_dim * 2
config.model.attention_dropout_rate = 0.1
config.model.classifier_pool = 'MEAN'
config.model.learn_pos_emb = True
config.trial = 0 # dummy for repeated runs.
return config
|
py | 7dff7bf2a09067ffe7e59c87de9858c63ae32d37 | # https://docker-py.readthedocs.io/en/stable/volumes.html
from client_config import client
vol = client.volumes
def remove_volumes(obj, lst, force=False):
for i in lst:
r = obj.get(i.id)
s = r.remove(force=force)
print(s)
if __name__ == "__main__":
# volumes
lst = vol.list()
print(lst)
vol.prune()
remove_volumes(vol, lst, force=True)
|
py | 7dff7c13131d7609f2f26e3b3d6d0a3adb43bddf | # For code completion in VIM and Youcompleteme include necessary COIN-OR
# headers in relative paths based on the original ycm_extra_conf.py from
# YouCompleteMe
# Haroldo, 2019
import os
import ycm_core
flags = [
"-Wall",
"-Wextra",
"-Werror",
"-fopenmp",
"-Wno-long-long",
"-Wno-variadic-macros",
"-Wno-unused-variable",
"-I../../CoinUtils/src/",
"-I../../Osi/src/Osi/",
"-I../../Clp/src/",
"-I../../Clp/src/OsiClp/",
"-I../../ThirdParty/",
"-I../../Cgl/src/",
"-I../../Cgl/src/CglPreProcess/",
"-I../../Cgl/src/CglGomory/",
"-I../../Cgl/src/CglProbing/",
"-I../../Cgl/src/CglKnapsackCover/",
"-I../../Cgl/src/CglRedSplit/",
"-I../../Cgl/src/CglRedSplit2/",
"-I../../Cgl/src/CglGMI/",
"-I../../Cgl/src/CglClique/",
"-I../../Cgl/src/CglFlowCover/",
"-I../../Cgl/src/CglMixedIntegerRounding2/",
"-I../../Cgl/src/CglTwomir/",
"-I../../Cgl/src/CglDuplicateRow/",
"-I../../Cgl/src/CglStored/",
"-I../../Cgl/src/CglLandP/",
"-I../../Cgl/src/CglResidualCapacity/",
"-I../../Cgl/src/CglZeroHalf/",
"-I../../Cgl/src/CglCliqueStrengthening/",
"-I../../Cgl/src/CglOddWheel/",
"-I../../Cgl/src/CglCommon/",
"-I../../Cgl/src/CglBKClique/",
"-std=c++11",
"-x",
"c++",
]
compilation_database_folder = ""
if os.path.exists(compilation_database_folder):
database = ycm_core.CompilationDatabase(compilation_database_folder)
else:
database = None
SOURCE_EXTENSIONS = [".cpp", ".cxx", ".cc", ".c", ".m", ".mm"]
def DirectoryOfThisScript():
return os.path.dirname(os.path.abspath(__file__))
def MakeRelativePathsInFlagsAbsolute(flags, working_directory):
if not working_directory:
return list(flags)
new_flags = []
make_next_absolute = False
path_flags = ["-isystem", "-I", "-iquote", "--sysroot="]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith("/"):
new_flag = os.path.join(working_directory, flag)
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith(path_flag):
path = flag[len(path_flag) :]
new_flag = path_flag + os.path.join(working_directory, path)
break
if new_flag:
new_flags.append(new_flag)
return new_flags
def IsHeaderFile(filename):
extension = os.path.splitext(filename)[1]
return extension in [".h", ".hxx", ".hpp", ".hh"]
def GetCompilationInfoForFile(filename):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile(filename):
basename = os.path.splitext(filename)[0]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists(replacement_file):
compilation_info = database.GetCompilationInfoForFile(replacement_file)
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile(filename)
def FlagsForFile(filename, **kwargs):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile(filename)
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_, compilation_info.compiler_working_dir_
)
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove("-stdlib=libc++")
except ValueError:
pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute(flags, relative_to)
return {"flags": final_flags}
|
py | 7dff7d5980b818ebdddda82896260a1b9b52ca88 | from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
Alias,
Typed,
String,
Float,
Integer,
Bool,
NoneSet,
Set,
)
from openpyxl.descriptors.excel import (
ExtensionList,
HexBinary,
Guid,
Relation,
Base64Binary,
)
class WorkbookProtection(Serialisable):
tagname = "workbookPr"
workbookPassword = HexBinary(allow_none=True)
workbook_password = Alias("workbookPassword")
workbookPasswordCharacterSet = String(allow_none=True)
revisionsPassword = HexBinary(allow_none=True)
revision_password = Alias("revisionsPassword")
revisionsPasswordCharacterSet = String(allow_none=True)
lockStructure = Bool(allow_none=True)
lock_structure = Alias("lockStructure")
lockWindows = Bool(allow_none=True)
lock_windows = Alias("lockWindows")
lockRevision = Bool(allow_none=True)
lock_revision = Alias("lockRevision")
revisionsAlgorithmName = String(allow_none=True)
revisionsHashValue = Base64Binary(allow_none=True)
revisionsSaltValue = Base64Binary(allow_none=True)
revisionsSpinCount = Integer(allow_none=True)
workbookAlgorithmName = String(allow_none=True)
workbookHashValue = Base64Binary(allow_none=True)
workbookSaltValue = Base64Binary(allow_none=True)
workbookSpinCount = Integer(allow_none=True)
def __init__(self,
workbookPassword=None,
workbookPasswordCharacterSet=None,
revisionsPassword=None,
revisionsPasswordCharacterSet=None,
lockStructure=None,
lockWindows=None,
lockRevision=None,
revisionsAlgorithmName=None,
revisionsHashValue=None,
revisionsSaltValue=None,
revisionsSpinCount=None,
workbookAlgorithmName=None,
workbookHashValue=None,
workbookSaltValue=None,
workbookSpinCount=None,
):
self.workbookPassword = workbookPassword
self.workbookPasswordCharacterSet = workbookPasswordCharacterSet
self.revisionsPassword = revisionsPassword
self.revisionsPasswordCharacterSet = revisionsPasswordCharacterSet
self.lockStructure = lockStructure
self.lockWindows = lockWindows
self.lockRevision = lockRevision
self.revisionsAlgorithmName = revisionsAlgorithmName
self.revisionsHashValue = revisionsHashValue
self.revisionsSaltValue = revisionsSaltValue
self.revisionsSpinCount = revisionsSpinCount
self.workbookAlgorithmName = workbookAlgorithmName
self.workbookHashValue = workbookHashValue
self.workbookSaltValue = workbookSaltValue
self.workbookSpinCount = workbookSpinCount
# Backwards compatibility
DocumentSecurity = WorkbookProtection
class FileSharing(Serialisable):
tagname = "fileSharing"
readOnlyRecommended = Bool(allow_none=True)
userName = String(allow_none=True)
reservationPassword = HexBinary(allow_none=True)
algorithmName = String(allow_none=True)
hashValue = HexBinary(allow_none=True)
saltValue = HexBinary(allow_none=True)
spinCount = Integer(allow_none=True)
def __init__(self,
readOnlyRecommended=None,
userName=None,
reservationPassword=None,
algorithmName=None,
hashValue=None,
saltValue=None,
spinCount=None,
):
self.readOnlyRecommended = readOnlyRecommended
self.userName = userName
self.reservationPassword = reservationPassword
self.algorithmName = algorithmName
self.hashValue = hashValue
self.saltValue = saltValue
self.spinCount = spinCount
|
py | 7dff7dcfc1263e5234ec0e553526faa7191010d3 | # Generated by Django 2.1.7 on 2019-03-19 14:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ui', '0101_name_as_text'),
]
operations = [
migrations.AddField(
model_name='contract',
name='is_identity',
field=models.BooleanField(default=False, verbose_name='is identity?'),
),
]
|
py | 7dff7e4de0daf5e860a33134fcd5fd89e2cd4bd0 | import time
import network
import machine
import font_title, font_body
from inkplate10 import Inkplate
from secrets import ssid, password
import urequests
import writer
START_TIME = time.time()
# The number of seconds to wait between refreshing the page
DELAY_TIME_S = 300
def log(message):
"""Utility function adds elapsed time to the start of `message` before printing
it
"""
elapsed = time.time() - START_TIME
print("{elapsed}: {message}".format(elapsed=elapsed, message=message))
def connect_network():
"""Connect to the wifi network specified by ssid and password"""
sta_if = network.WLAN(network.STA_IF)
if not sta_if.isconnected():
log("connecting to network...")
sta_if.active(True)
sta_if.connect(ssid, password)
while not sta_if.isconnected():
pass
log("connected to network with IP %s" % sta_if.ifconfig()[0])
class Poem:
"""Class represents a poem and provides a few useful constants related to it"""
def __init__(self, title: str, author: str, content: str) -> None:
self.title = title
self.author = author
self.content = content
self.lines = self.content.split("\n")
self.num_lines = len(self.lines)
self.max_line_length = max([len(l) for l in self.lines])
log("Instantiated poem %s by %s" % (title, author))
def render(self, display, page: int) -> None:
# A little hacky, but the following geometric constants place things on the
# page
left_margin = 40
top_margin = 40
header_height = 120
line_spacing = 15
# x-offset for all lines on this page
xoff = int(page * display.width()/2 + left_margin)
# Display an approximately 2/3 width line under the title
display.drawLine(xoff, header_height,
xoff + 400, header_height,
display.BLACK)
# Calculate how many blank lines to leave to get the poem vertically centered
max_lines = (display.height()-header_height) / line_spacing
skip_lines = int((max_lines - self.num_lines)/2)
title_writer = writer.Writer(display, font_title, verbose=False)
body_writer = writer.Writer(display, font_body, verbose=False)
# Write the header
title_writer.set_textpos(display, top_margin, xoff)
title_writer.printstring(self.title)
body_writer.set_textpos(display, top_margin+50, xoff)
body_writer.printstring(self.author)
# Write the poem
yoff = header_height + skip_lines * line_spacing
for x in self.lines:
body_writer.set_textpos(display, yoff, xoff)
body_writer.printstring(x)
yoff += line_spacing
class Poet:
"""The Poet class has a "speak" generator function, which creates an infinite
iterator over poems on the poemist API
"""
# Endpoint to GET poems from
url = "https://www.poemist.com/api/v1/randompoems"
def __init__(self):
"""Instantiating a Poet will cause it to `speak` two random poems, write
them to the book, and then exit
"""
connect_network()
self.display = Inkplate(Inkplate.INKPLATE_1BIT)
self.display.begin()
self.display.clearDisplay()
poem = next(self.speak())
poem.render(self.display, 0)
poem = next(self.speak())
poem.render(self.display, 1)
self.display.display()
time.sleep(10)
machine.deepsleep(1000*DELAY_TIME_S)
def speak(self): # -> Generator[Poem] (no typing module on micropython)
"""yield a random Poem object"""
while True:
log("Making get request to %s" % self.url)
response = urequests.get(self.url)
try:
result = response.json()
except ValueError:
# If things fail, print the full response and wait 5 minutes
print(response)
print(response.content)
time.sleep(300)
continue
for poem_payload in result:
poem = Poem(poem_payload["title"],
poem_payload["poet"]["name"],
poem_payload["content"])
# The following is a little hacky, but filter out poems that wont display
# well on the epaper display due to number of lines, line length,
# long title or presence of unicode characters.
if poem.content is None:
continue
if not all(ord(c) < 128 for c in poem.content):
log("Skipping poem {title}, contains unicode".format(
title=poem.title))
if poem.num_lines > 45:
log("Skipping poem {title}, too many lines ({num_lines})".format(
title=poem.title, num_lines=poem.num_lines))
continue
if poem.max_line_length > 75:
log("Skipping poem {title}, long lines ({max_line_length})".format(
title=poem.title, max_line_length=poem.max_line_length))
continue
if len(poem.title) > 40:
log("Skipping poem {title}, long title".format(
title=poem.title))
continue
yield poem
# Additional sleep here. In the rare case that we reject 5 consecutive poems,
# ensure that there is some space before making another API request
time.sleep(10)
Poet()
|
py | 7dff7eb8e3ff566434792e41b29be9c992e50637 | from unittest import TestCase
from zmodulo.plot.line.arrow import Arrow
__author__ = 'aruff'
class TestArrow(TestCase):
"""
Tester for the Z-Tree PlotLine Arrow class
"""
def setUp(self):
self.arrow = Arrow()
def test_default_initialization(self):
"""
Tests the default initialization of the Arrow class
"""
self.assertEqual(self.arrow.to_str(), "\tarrowclosed = FALSE;\n")
if __name__ == '__main__':
TestCase.main()
|
py | 7dff7ed3d8421987917158d18c9063983ac082d4 | '''
Created on 13 Aug 2017
Utilities module to provide a linked list for the exersises. IThere is no out of the box
collection in the python collections modules. In real life, one would usually use python lists
(they are not linked lists), or deques.
Disclaimer: The implementation here won't be very efficient, is not pythonic etc.
if one would be serious with making a linked list, one would have a look to a c impl.
Also, gpes without saying, this is not thread safe
@author: igoroya
'''
import string
class SinglyLinkedList(object):
class Node(object):
def __init__(self, cargo, next_node = None):
self.cargo = cargo
self.next_node = next_node
def __eq__(self, other):
return self.cargo == other.cargo and self.next_node and other.next_node
def __repr__(self):
return self.cargo.__repr__()
def __init__(self):
'''
create an empty linked list
'''
self.head_node = None
self.__current = self.head_node
self._length = 0
def __len__(self):
return self._length
def __contains__(self, value):
current = self.head_node
while current:
if current.cargo == value:
return True
current = current.next_node
return False
def __iter__(self):
while self.__current:
current = self.__current
self.__current = self.__current.next_node
yield current
self.__current = self.head_node
def __repr__(self):
node = self.head_node
reps = []
while node is not None:
reps.append(node.__repr__())
node = node.next_node
return reps.__repr__()
def insert(self, value, position = 0):
if value is None:
raise ValueError('cannot add None values')
if position < 0:
raise ValueError('cannot add values at negative position')
if position > self._length:
raise ValueError('cannot add value at position {} when list size is {}'.format(position, self._length))
if position == 0:
self.node = self.Node(value, self.head_node)
self.head_node = self.node
self.__current = self.head_node
self._length += 1
else:
current = self.head_node
count = 0
while current and ((count + 1) <= position):
if count + 1 == position:
self.node = self.Node(value, current.next_node)
current.next_node = self.node
self._length += 1
return
else:
current = current.next_node
count += 1
def pop(self, value):
'''
Remove a node based on the cargo data and pop the node object.
None is returned if value does not exist.
Update head if needed
'''
node = self.head_node
if node.cargo == value:
self.head_node = node.next_node
self._length -= 1
return node
while node is not None:
if node.next_node.cargo is value:
node.next_node = node.next_node.next_node
self._length -= 1
return node
node = node.next_node
return None
def search(self, value):
'''
Look for the 1st node that carries value and return it,
or None if it does not exist
'''
current = self.head_node
while current:
if current.cargo == value:
return current
current = current.next_node
return None
def add_in_front(self, value):
self.insert(value, 0)
def append(self, value):
self.insert(value, self._length)
def make_sample_list():
'''
Helper function to create a sampel list for the exercises
'''
my_list = SinglyLinkedList()
value = 'Head'
my_list.append(value)
my_string = string.ascii_lowercase[:5]
for c in my_string:
my_list.append(c)
#do twice to have dups
for c in my_string:
my_list.append(c)
value = 'Tail'
my_list.append(value)
return my_list
def make_sample_list_nums():
'''
Helper function to create a sampel list for the exercises
'''
my_list = SinglyLinkedList()
#do twice to have dups
for n in range(6):
my_list.append(2*n)
for n in range(6):
my_list.append(n)
return my_list
|
py | 7dff7f2e05f612d2852d129dadb12d2deca8b6ce | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for trackable object SavedModel save."""
import os
from absl.testing import parameterized
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import graph_debug_info_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.lib.io import file_io
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.saved_model import load
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import loader_impl
from tensorflow.python.saved_model import save
from tensorflow.python.saved_model import save_options
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import saver
from tensorflow.python.training.tracking import tracking
from tensorflow.python.training.tracking import util
from tensorflow.python.util import compat
def _run_signature(session, meta_graph_def, inputs, signature_key):
signature = meta_graph_def.signature_def[signature_key]
assert set(inputs.keys()) == set(signature.inputs.keys())
feed_dict = {}
for arg_name in inputs.keys():
input_tensor = session.graph.get_tensor_by_name(
signature.inputs[arg_name].name)
feed_dict[input_tensor] = inputs[arg_name]
output_dict = {}
for output_name, output_tensor_info in signature.outputs.items():
output_dict[output_name] = session.graph.get_tensor_by_name(
output_tensor_info.name)
return session.run(output_dict, feed_dict=feed_dict)
def _import_and_infer(
save_dir,
inputs,
signature_key=signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY):
"""Import a SavedModel into a TF 1.x-style graph and run `signature_key`."""
graph = ops.Graph()
with graph.as_default(), session_lib.Session() as session:
model = loader.load(session, [tag_constants.SERVING], save_dir)
return _run_signature(session, model, inputs, signature_key)
class SaveTest(test.TestCase, parameterized.TestCase):
def test_method_save_signature(self):
root = tracking.AutoTrackable()
root.f = def_function.function(
lambda x: 2. * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
root.f(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir, root.f)
self.assertEqual({"output_0": 2.}, _import_and_infer(save_dir, {"x": 1.}))
def test_method_save_list_func(self):
root = tracking.AutoTrackable()
@def_function.function
def case_fn(x):
branch_index = constant_op.constant(1)
branches = [lambda: x, lambda: x + 1]
case_out = control_flow_ops.switch_case(branch_index, branches)
return case_out
root.f = def_function.function(
lambda x: 2. * case_fn(x),
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
root.f(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir, root.f)
self.assertEqual({"output_0": 4.}, _import_and_infer(save_dir, {"x": 1.}))
def test_method_save_concrete(self):
root = tracking.AutoTrackable()
root.f = def_function.function(lambda z: {"out": 2. * z})
root.f(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(
root, save_dir, {
"non_default_key":
root.f.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32))
})
self.assertEqual({"out": 2.},
_import_and_infer(
save_dir, {"z": 1.}, signature_key="non_default_key"))
def test_method_save_annotated_function(self):
# This test is only meaningful with Python 3 because Python 2's
# inspect.getargspec doesn't save annotations.
root = tracking.AutoTrackable()
class UnknownType(object): # pylint: disable=unused-variable
pass
def annotated_function(z):
return {"out": 2. * z}
# Same effect as annotating function like the following.
# def annotated_function("z": UnknownType) -> UnknownType:
# This is a workaround since Python 2 does not support annotations and
# our presubmit linter catches it.
annotated_function.__annotations__ = {
"z": UnknownType,
"return": UnknownType
}
root.f = def_function.function(annotated_function)
root.f(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(
root, save_dir, {
"non_default_key":
root.f.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32))
})
self.assertEqual({"out": 2.},
_import_and_infer(
save_dir, {"z": 1.}, signature_key="non_default_key"))
def test_unsaveable_func_graph(self):
root = module.Module()
@def_function.function(input_signature=[])
def nested_f():
ops.get_default_graph().mark_as_unsaveable("ERROR MSG")
return 1
@def_function.function(input_signature=[])
def f():
return nested_f()
root.f = f
with self.assertRaisesRegex(ValueError, "ERROR MSG"):
save.save(root, os.path.join(self.get_temp_dir(), "saved_model"))
def test_untracked_variable_useful_message(self):
root = module.Module()
v = variables.Variable(1., name="some_unique_name")
@def_function.function(input_signature=[])
def f():
return v.read_value()
root.f = f
with self.assertRaisesRegex(
AssertionError, "Trackable referencing this tensor.*some_unique_name"):
save.save(root, os.path.join(self.get_temp_dir(), "saved_model"))
def test_version_information_included(self):
root = tracking.AutoTrackable()
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir)
saved_model_proto = loader_impl.parse_saved_model(save_dir)
self.assertEqual(
versions.__version__,
saved_model_proto.meta_graphs[0].meta_info_def.tensorflow_version)
self.assertEqual(
versions.__git_version__,
saved_model_proto.meta_graphs[0].meta_info_def.tensorflow_git_version)
def test_non_concrete_error(self):
root = tracking.AutoTrackable()
root.f = def_function.function(lambda x: 2. * x)
root.f(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegex(ValueError, "Expected a TensorFlow function"):
save.save(root, save_dir, root.f)
def test_captures_unreachable_variable(self):
root = tracking.AutoTrackable()
unreachable_variable = variables.Variable([5.0, 2.0])
root.reachable_variable = variables.Variable([1.0, 3.0])
@def_function.function
def increase_variable(x):
return 2 * unreachable_variable * x + root.reachable_variable
root.f = increase_variable
self.assertAllEqual([101.0, 83.0],
root.f(constant_op.constant([10.0, 20.0])).numpy())
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegex(KeyError, "not reachable from root"):
save.save(root, save_dir)
def test_nested_inputs(self):
root = tracking.AutoTrackable()
root.f = def_function.function(
lambda x: 2. * x[0],
input_signature=([
tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32)
],))
root.f([constant_op.constant(1.), constant_op.constant(1.)])
def test_nested_outputs(self):
root = tracking.AutoTrackable()
root.f = def_function.function(lambda x: (2. * x, (3. * x, 4. * x)))
root.f(constant_op.constant(1.))
to_save = root.f.get_concrete_function(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegex(ValueError, "non-Tensor value"):
save.save(root, save_dir, to_save)
def test_nested_dict_outputs(self):
root = util.Checkpoint(
f=def_function.function(lambda x: { # pylint: disable=g-long-lambda
"a": 2. * x,
"b": (3. * x, 4. * x)
}))
root.f(constant_op.constant(1.))
to_save = root.f.get_concrete_function(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegex(ValueError, "non-Tensor value"):
save.save(root, save_dir, to_save)
def test_variable(self):
root = tracking.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
root.f(constant_op.constant(1.))
to_save = root.f.get_concrete_function(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir, to_save)
self.assertAllEqual({"output_0": 12.},
_import_and_infer(save_dir, {"x": 2.}))
def test_single_function_default_signature(self):
model = tracking.AutoTrackable()
model.f = def_function.function(lambda: 3., input_signature=())
model.f()
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(model, save_dir)
self.assertAllClose({"output_0": 3.}, _import_and_infer(save_dir, {}))
def test_single_function_no_signature(self):
model = tracking.AutoTrackable()
model.f = def_function.function(lambda: 3.)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(model, save_dir)
def test_save_function_no_trace(self):
class ObjWithFunction(module.Module):
@def_function.function
def foo(self, a):
return a
@def_function.function
def bar(self, a):
return a + 1
root = ObjWithFunction()
root.bar(1)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertLogs(level="WARNING") as logs:
save.save(root, save_dir)
expected_message = (
"WARNING:absl:Found untraced functions such as foo while saving "
"(showing 1 of 1). These functions will not be directly callable after "
"loading.")
self.assertIn(expected_message, logs.output)
def test_find_default_save_function(self):
class ObjWithDefaultSignature(util.Checkpoint):
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32)
])
def _default_save_signature(self, x):
return x + x + 1
obj = ObjWithDefaultSignature()
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(obj, save_dir)
self.assertAllClose({"output_0": 7.},
_import_and_infer(save_dir, {"x": 3.}))
def test_docstring(self):
class Adder(module.Module):
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32)
])
def add(self, x):
return x + x + 1.
to_save = Adder()
to_save.add(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(to_save, save_dir)
self.assertAllClose({"output_0": 7.},
_import_and_infer(save_dir, {"x": 3.}))
def test_datastructures(self):
class HasDatastructures(util.Checkpoint):
def __init__(self):
self.a = [1.]
self.a.append(variables.Variable(2.))
self.b = {"a": variables.Variable(3.)}
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32)
])
def add(self, x):
return x + math_ops.add_n(self.a) + self.b["a"]
to_save = HasDatastructures()
to_save.add(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(to_save, save_dir)
self.assertAllClose({"output_0": 10.},
_import_and_infer(save_dir, {"x": 4.}))
def test_default_attr_stripping(self):
class Complex(util.Checkpoint):
@def_function.function(input_signature=[])
def __call__(self):
return math_ops.complex(
constant_op.constant(1.), constant_op.constant(2.), name="complex")
to_save = Complex()
to_save()
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(to_save, save_dir)
graph = ops.Graph()
with graph.as_default(), self.session(graph) as session:
loader.load(session, [tag_constants.SERVING], save_dir)
func, = [f for name, f in graph._functions.items() if "call" in name]
complex_node, = [
node for node in func.definition.node_def if node.op == "Complex"
]
self.assertNotIn("T", complex_node.attr)
self.assertNotIn("Tout", complex_node.attr)
def test_signature_attribute_reserved(self):
root = util.Checkpoint(signatures=variables.Variable(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegex(ValueError, "del obj.signatures"):
save.save(root, save_dir)
del root.signatures
save.save(root, save_dir)
def test_function_with_captured_dataset(self):
if test_util.is_gpu_available():
self.skipTest("Currently broken when a GPU is available.")
class HasDataset(module.Module):
def __init__(self):
super(HasDataset, self).__init__()
self.dataset = (dataset_ops.Dataset.range(5).map(lambda x: x**2))
@def_function.function
def __call__(self, x):
current_sum = array_ops.zeros([], dtype=dtypes.int64)
for element in self.dataset:
current_sum += x * element
return current_sum
root = HasDataset()
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(
root,
save_dir,
signatures=root.__call__.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.int64)))
self.assertAllClose({"output_0": 3 * (1 + 4 + 9 + 16)},
_import_and_infer(save_dir, {"x": 3}))
def test_variable_args_cannot_be_used_as_signature(self):
@def_function.function(input_signature=[
resource_variable_ops.VariableSpec(shape=[], dtype=dtypes.int32)
])
def f(unused_v):
return 1
root = tracking.AutoTrackable()
root.f = f.get_concrete_function()
with self.assertRaisesRegex(ValueError,
"tf.Variable inputs cannot be exported"):
save.save(
root,
os.path.join(self.get_temp_dir(), "saved_model"),
signatures=root.f)
def test_export_correct_output_shapes(self):
"""Asserts that nodes are exported with the correct number of output shapes.
After backpropagation rewrite, functions are rewritten with additional
outputs. When exporting to SavedModel, the shapes of the additional outputs
were incorrectly added to the FunctionDef proto (b/133666530).
"""
obj = tracking.AutoTrackable()
obj.v = variables.Variable(2.)
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
def f(x):
return (math_ops.multiply(obj.v, x), math_ops.multiply(obj.v,
(x + 1)), None)
obj.f = f
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
def g(x):
return obj.f(x)[1]
obj.g = g
# After the following lines, the concrete functions of obj.g and obj.f are
# rewritten with many extra outputs.
with backprop.GradientTape():
obj.g(constant_op.constant(3.0))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(obj, save_dir, signatures={"g": obj.g})
graph_def = loader_impl.parse_saved_model(save_dir).meta_graphs[0].graph_def
def assert_correct_number_of_output_shapes(node):
if node.op == "StatefulPartitionedCall":
fn_name = node.attr["f"].func.name
if fn_name.startswith("__inference_f"):
self.assertLen(node.attr["_output_shapes"].list.shape, 2)
if fn_name.startswith("__inference_g"):
self.assertLen(node.attr["_output_shapes"].list.shape, 1)
for f in graph_def.library.function:
if (f.signature.name.startswith("__inference_f") or
f.signature.name.startswith("__inference_g")):
for node in f.node_def:
assert_correct_number_of_output_shapes(node)
def test_save_cached_variable(self):
with ops.Graph().as_default(), session_lib.Session() as session:
obj = tracking.AutoTrackable()
obj.v = variables.Variable(2., caching_device=lambda op: op.device)
obj.w = variables.Variable(3.)
session.run([obj.v.initializer, obj.w.initializer])
@def_function.function(input_signature=[])
def f():
return obj.v + obj.w
obj.f = f
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(obj, save_dir, signatures=obj.f)
self.assertAllClose({"output_0": 5}, _import_and_infer(save_dir, {}))
@parameterized.named_parameters(
("_SaveDevices_ExportMetaGraph",
save_options.VariablePolicy.SAVE_VARIABLE_DEVICES, True),
("_DiscardDevices_ExportMetaGraph", save_options.VariablePolicy.NONE,
True), ("_SaveDevices_Save",
save_options.VariablePolicy.SAVE_VARIABLE_DEVICES, False),
("_DiscardDevices_Save", save_options.VariablePolicy.NONE, False))
def test_save_variable_devices(self, save_devices, meta_graph_only):
context._reset_context()
cpus = context.context().list_physical_devices("CPU")
if len(cpus) == 1:
context.context().set_logical_device_configuration(
cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
context.ensure_initialized()
root = tracking.AutoTrackable()
with ops.device("CPU:0"):
root.v0 = variables.Variable(1., name="v0")
with ops.device("CPU:1"):
root.v1 = variables.Variable(1., name="v1")
options = save_options.SaveOptions(
experimental_variable_policy=save_devices)
file_name = os.path.join(self.get_temp_dir(), "saved_model")
if meta_graph_only:
save.export_meta_graph(obj=root, filename=file_name, options=options)
else:
save.save(obj=root, export_dir=file_name, options=options)
meta = None
if meta_graph_only:
meta = meta_graph.read_meta_graph_file(file_name)
else:
meta = loader_impl.parse_saved_model(file_name).meta_graphs[0]
# Check devices in meta graph nodes.
graph_def = meta.graph_def
v0 = next((n for n in graph_def.node if n.name == "v0"), None)
v1 = next((n for n in graph_def.node if n.name == "v1"), None)
self.assertIsNotNone(v0)
self.assertIsNotNone(v1)
if save_devices == save_options.VariablePolicy.SAVE_VARIABLE_DEVICES:
self.assertIn("CPU:0", v0.device)
self.assertIn("CPU:1", v1.device)
else:
self.assertEmpty(v0.device)
self.assertEmpty(v1.device)
# Check devices in object graph nodes.
object_graph_def = meta.object_graph_def
v0 = next((n.variable
for n in object_graph_def.nodes
if n.HasField("variable") and n.variable.name == "v0"), None)
v1 = next((n.variable
for n in object_graph_def.nodes
if n.HasField("variable") and n.variable.name == "v1"), None)
self.assertIsNotNone(v0)
self.assertIsNotNone(v1)
if save_devices == save_options.VariablePolicy.SAVE_VARIABLE_DEVICES:
self.assertIn("CPU:0", v0.device)
self.assertIn("CPU:1", v1.device)
else:
self.assertEmpty(v0.device)
self.assertEmpty(v1.device)
@parameterized.named_parameters(
("_ExpandDistributedVariablesWithPolicy",
save_options.VariablePolicy.EXPAND_DISTRIBUTED_VARIABLES, True),
("_ExpandDistributedVariablesWithoutPolicy",
save_options.VariablePolicy.EXPAND_DISTRIBUTED_VARIABLES, False),
("_DiscardDistributedVariablesWithPolicy",
save_options.VariablePolicy.NONE, True),
("_DiscardDistributedVariablesWithoutPolicy",
save_options.VariablePolicy.NONE, False))
def test_expand_distributed_variables(self, expand_strategy, policy):
# 1. Create a context with both CPU:0 and CPU:1.
context._reset_context()
cpus = context.context().list_physical_devices("CPU")
if len(cpus) == 1:
context.context().set_logical_device_configuration(
cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
context.ensure_initialized()
# 2. Create and save a model under a mirrored strategy.
file_name = os.path.join(self.get_temp_dir(), "saved_model.pb")
strategy = mirrored_strategy.MirroredStrategy(["CPU:0", "CPU:1"])
strategy.extended._use_var_policy = policy
with strategy.scope():
root = tracking.AutoTrackable()
root.v = variables.Variable([1., 1.], name="v")
@def_function.function(input_signature=[])
def f():
root.v.assign([2., 2.])
root.f = f
save.export_meta_graph(
obj=root,
filename=file_name,
options=save_options.SaveOptions(
experimental_variable_policy=expand_strategy))
# 3. Read the output file and test behavior.
meta_graph_def = meta_graph.read_meta_graph_file(file_name)
object_graph = meta_graph_def.object_graph_def
graph_def = meta_graph_def.graph_def
v = next((n.variable
for n in object_graph.nodes
if n.HasField("variable") and n.variable.name == "v"), None)
saved_function = next((f for f in graph_def.library.function
if "inference_f_" in f.signature.name), None)
self.assertIsNotNone(saved_function)
if (expand_strategy ==
save_options.VariablePolicy.EXPAND_DISTRIBUTED_VARIABLES):
# experimental_save_variable_devices should have been automatically set.
self.assertIn("CPU:0", v.device)
components = v.experimental_distributed_variable_components
self.assertLen(components, 2)
v0 = next((x for x in components if x.name == "v"), None)
v1 = next((x for x in components if x.name == "v/replica_1"), None)
self.assertIsNotNone(v0)
self.assertIsNotNone(v1)
self.assertIn("CPU:0", v0.device)
self.assertIn("CPU:1", v1.device)
self.assertLen(saved_function.signature.input_arg, 2)
else:
self.assertEmpty(v.device)
self.assertEmpty(v.experimental_distributed_variable_components)
self.assertLen(saved_function.signature.input_arg, 1)
def test_save_uninitialized_variable(self):
root = tracking.AutoTrackable()
root.uninitialized_variable = resource_variable_ops.UninitializedVariable(
name="uninitialized_variable", dtype=dtypes.float32)
root.initialized_variable = variables.Variable(
1.0, name="initialized_variable")
# TODO(b/149594077): Python loading does not work now partly because it
# shouldn't, as the public API and semantics of uninitialized variables
# are not properly defined, and officially supporting loading would end up
# defining semantics "by usage." We should only allow loading once the API
# is made official.
export_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, export_dir)
with self.assertRaisesRegex(FileNotFoundError,
"Key uninitialized_variable"):
load.load(export_dir)
with ops.Graph().as_default(), session_lib.Session() as session:
# The final ValueError here (with "no variables to save") is confusing,
# but errors upstream give the user the correct information (a
# NotFoundError stating that the uninitalized_variable was not found in
# the checkpoint).
with self.assertRaises(ValueError):
loader.load(session, [tag_constants.SERVING], export_dir)
def test_concrete_function_with_set_shape(self,):
# Serialized concrete function should retain the shape from the TensorSpec,
# instead of using the shape of the inputs (which are changed by set_shape).
@def_function.function
def f(x):
x.set_shape((5, 1))
return x
root = tracking.AutoTrackable()
path = os.path.join(self.get_temp_dir(), "saved_model")
concrete = f.get_concrete_function(
tensor_spec.TensorSpec((None, 1), name="name"))
save.save(root, path, signatures={"key": concrete})
imported = load.load(path)
self.assertEqual(imported.signatures["key"].structured_input_signature[1],
{"name": tensor_spec.TensorSpec((None, 1), name="name")})
def test_save_composite_tensor_signature(self):
@def_function.function(
input_signature=[ragged_tensor.RaggedTensorSpec(ragged_rank=2)])
def f(x):
return {"output_key": x}
root = tracking.AutoTrackable()
path = os.path.join(self.get_temp_dir(), "saved_model")
inp = ragged_factory_ops.constant([[[1.0, 2.0], [3.0]], [[5.]]])
flat_inp = {
"x": constant_op.constant([1., 2., 3., 5]),
"x_1": constant_op.constant([0, 2, 3], dtype=dtypes.int64),
"x_2": constant_op.constant([0, 2, 3, 4], dtype=dtypes.int64)
}
save.save(root, path, signatures={"key": f.get_concrete_function()})
# Test that the ragged signature can be loaded back into Python with V2 APIs
imported = load.load(path)
self.assertAllEqual(inp,
imported.signatures["key"](**flat_inp)["output_key"])
graph = ops.Graph()
# Try running the signature with V1 APIs.
with graph.as_default(), session_lib.Session() as session:
meta_graph_def = loader.load(session, [tag_constants.SERVING], path)
signature = meta_graph_def.signature_def["key"]
feed_dict = {}
for arg_name in flat_inp:
input_tensor = session.graph.get_tensor_by_name(
signature.inputs[arg_name].name)
feed_dict[input_tensor] = flat_inp[arg_name].numpy()
# Get composite tensor components
output_components = (
signature.outputs["output_key"].composite_tensor.components)
fetches = {}
components_keys = ["x", "x_1", "x_2"]
for k, output_tensor_info in zip(components_keys, output_components):
fetches[k] = session.graph.get_tensor_by_name(output_tensor_info.name)
outputs = session.run(fetches, feed_dict)
self.assertAllClose(flat_inp, outputs)
def test_save_uses_sanitized_signature_name(self):
@def_function.function(
input_signature=[ragged_tensor.RaggedTensorSpec(ragged_rank=2)])
def f(x):
return {"output_key": x}
# Colons are not usable as name scopes.
unsanitized_name = "foo:bar"
root = tracking.AutoTrackable()
path = os.path.join(self.get_temp_dir(), "saved_model")
save.save(
root, path, signatures={unsanitized_name: f.get_concrete_function()})
graph = ops.Graph()
with graph.as_default(), session_lib.Session() as session:
meta_graph_def = loader.load(session, [tag_constants.SERVING], path)
signature = meta_graph_def.signature_def[unsanitized_name]
tensor_names = [
session.graph.get_tensor_by_name(signature.inputs[key].name).name
for key in signature.inputs
]
# The placeholder names will have the sanitized version.
self.assertCountEqual(tensor_names,
["foo_bar_x:0", "foo_bar_x_1:0", "foo_bar_x_2:0"])
def test_save_returns_none(self):
# Test that `tf.saved_model.save` API returns None to user.
root = tracking.AutoTrackable()
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
result = save.save(root, save_dir)
self.assertIsNone(result)
class DependencyTest(test.TestCase):
"""Tests for deserialization dependencies (saving-related only)."""
def test_validate_dependencies(self):
class Valid(tracking.AutoTrackable):
def _deserialization_dependencies(self, children):
return children
root = Valid()
root.f = variables.Variable(1.0)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir)
def test_validate_dependencies_error_untracked(self):
untracked = variables.Variable(1.0)
class Invalid(tracking.AutoTrackable):
def _deserialization_dependencies(self, children):
del children # Unused.
return {"untracked": untracked}
invalid_deps = Invalid()
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegex(ValueError, "Found an untracked dependency"):
save.save(invalid_deps, save_dir)
def test_validate_dependencies_error_cyclic(self):
class Invalid(tracking.AutoTrackable):
def __init__(self):
self.cycle_ref = None
def _deserialization_dependencies(self, children):
del children # Unused.
return {"cycle_ref": self.cycle_ref}
cycle1 = Invalid()
cycle2 = Invalid()
cycle1.cycle_ref = cycle2
cycle2.cycle_ref = cycle1
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegex(ValueError,
"dependency cycle in the saved Trackable"):
save.save(cycle1, save_dir)
class VariablePolicyEnumTest(test.TestCase):
def testFromObj(self):
self.assertEqual(save_options.VariablePolicy.NONE,
save_options.VariablePolicy.from_obj(None))
self.assertEqual(
save_options.VariablePolicy.SAVE_VARIABLE_DEVICES,
save_options.VariablePolicy.from_obj(
save_options.VariablePolicy.SAVE_VARIABLE_DEVICES))
self.assertEqual(
save_options.VariablePolicy.EXPAND_DISTRIBUTED_VARIABLES,
save_options.VariablePolicy.from_obj(
save_options.VariablePolicy.EXPAND_DISTRIBUTED_VARIABLES))
self.assertEqual(
save_options.VariablePolicy.SAVE_VARIABLE_DEVICES,
save_options.VariablePolicy.from_obj("save_variable_devices"))
self.assertEqual(
save_options.VariablePolicy.SAVE_VARIABLE_DEVICES,
save_options.VariablePolicy.from_obj("SaVe_VaRiAbLe_DeViCeS"))
self.assertEqual(
save_options.VariablePolicy.EXPAND_DISTRIBUTED_VARIABLES,
save_options.VariablePolicy.from_obj("expand_distributed_variables"))
self.assertEqual(
save_options.VariablePolicy.EXPAND_DISTRIBUTED_VARIABLES,
save_options.VariablePolicy.from_obj("eXpAnD_dIsTrIbUtEd_VaRiAbLeS"))
for invalid in ["not_a_valid_value", 2.0, []]:
with self.assertRaisesRegex(ValueError, "invalid VariablePolicy value"):
save_options.VariablePolicy.from_obj(invalid)
def testNamingConvention(self):
"""Enforces names are uppercase versions of values."""
for policy in save_options.VariablePolicy:
if policy == save_options.VariablePolicy.NONE:
self.assertIsNone(policy.value)
else:
self.assertEqual(policy.name, policy.name.upper())
self.assertEqual(policy.value, policy.value.lower())
self.assertEqual(policy.name, policy.value.upper())
class SavingOptionsTest(test.TestCase):
def testOpNameSpace(self):
# TODO(kathywu): Add test that saves out SavedModel with a custom op when
# the ">" character is allowed in op names.
graph_def = graph_pb2.GraphDef()
text_format.Parse("node { name: 'A' op: 'Test>CustomOp' }", graph_def)
with self.assertRaisesRegex(
ValueError, "Attempted to save ops from non-whitelisted namespaces"):
save._verify_ops(graph_def, [])
save._verify_ops(graph_def, ["Test"])
# Test with multiple carrots in op name.
text_format.Parse("node { name: 'A' op: 'Test>>A>CustomOp' }", graph_def)
with self.assertRaisesRegex(
ValueError, "Attempted to save ops from non-whitelisted namespaces"):
save._verify_ops(graph_def, [])
save._verify_ops(graph_def, ["Test"])
def test_save_custom_op_with_no_whitelist_specified(self):
# Test that we are able to save a model that contains a custom op with a
# custom namespace when the user has not explicitly specified a namespace
# whitelist (i.e. that we default to allowing all custom ops when saving
# and no whitelist is specified, rather than throwing an exception).
graph_def = graph_pb2.GraphDef()
text_format.Parse("node { name: 'A' op: 'Test>CustomOp' }", graph_def)
save._verify_ops(graph_def, namespace_whitelist=None)
# If the user passes an empty list for the namespace whitelist rather than
# nothing, we should then throw an exception if a custom op is used.
with self.assertRaisesRegex(
ValueError, "Attempted to save ops from non-whitelisted namespaces"):
save._verify_ops(graph_def, [])
def test_save_debug_info_enabled(self):
root = tracking.AutoTrackable()
root.f = def_function.function(
lambda x: math_ops.mul(2., x, name="DEBUG_INFO_OP"),
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(
root,
save_dir,
root.f,
options=save_options.SaveOptions(save_debug_info=True))
debug_info_file_name = os.path.join(save_dir, "debug",
"saved_model_debug_info.pb")
self.assertTrue(os.path.exists(debug_info_file_name))
debug_info = graph_debug_info_pb2.GraphDebugInfo()
with open(debug_info_file_name, "rb") as f:
debug_info.ParseFromString(f.read())
# Verify that there is a trace for DEBUG_INFO_OP just to ensure that
# function debug info tracing is nominally functioning.
found_op = False
for key in debug_info.traces.keys():
if key.startswith("DEBUG_INFO_OP@"):
found_op = True
break
self.assertTrue(found_op, "Did not find DEBUG_INFO_OP in trace")
def test_save_debug_info_disabled(self):
root = tracking.AutoTrackable()
root.f = def_function.function(
lambda x: math_ops.mul(2., x, name="DEBUG_INFO_OP"),
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(
root,
save_dir,
root.f,
options=save_options.SaveOptions(save_debug_info=False))
debug_info_file_name = os.path.join(save_dir, "debug",
"saved_model_debug_info.pb")
self.assertFalse(os.path.exists(debug_info_file_name))
def test_function_aliases(self):
root = tracking.AutoTrackable()
root.f = def_function.function(
lambda x: 2. * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
options = save_options.SaveOptions(function_aliases={
"my_func": root.f,
})
save.save(root, save_dir, root.f, options=options)
function_cache = root.f._stateful_fn._list_all_concrete_functions()
function_aliases = loader_impl.parse_saved_model(
save_dir).meta_graphs[0].meta_info_def.function_aliases
self.assertLen(function_cache, 1)
self.assertEqual(function_cache[0].name.decode("utf-8"),
list(function_aliases.keys())[0])
def test_accepts_io_device(self):
options = save_options.SaveOptions()
self.assertIsNone(options.experimental_io_device)
options = save_options.SaveOptions(experimental_io_device="/job:localhost")
self.assertEqual("/job:localhost", options.experimental_io_device)
def test_accepts_variable_policy(self):
options = save_options.SaveOptions()
self.assertEqual(save_options.VariablePolicy.NONE,
options.experimental_variable_policy)
# VariablePolicy instances.
options = save_options.SaveOptions(experimental_variable_policy=save_options
.VariablePolicy.SAVE_VARIABLE_DEVICES)
self.assertEqual(save_options.VariablePolicy.SAVE_VARIABLE_DEVICES,
options.experimental_variable_policy)
options = save_options.SaveOptions(
experimental_variable_policy=save_options.VariablePolicy
.EXPAND_DISTRIBUTED_VARIABLES)
self.assertEqual(save_options.VariablePolicy.EXPAND_DISTRIBUTED_VARIABLES,
options.experimental_variable_policy)
# String conversions.
options = save_options.SaveOptions(
experimental_variable_policy="save_variable_devices")
self.assertEqual(save_options.VariablePolicy.SAVE_VARIABLE_DEVICES,
options.experimental_variable_policy)
options = save_options.SaveOptions(
experimental_variable_policy="expand_distributed_variables")
self.assertEqual(save_options.VariablePolicy.EXPAND_DISTRIBUTED_VARIABLES,
options.experimental_variable_policy)
with self.assertRaisesRegex(ValueError, "invalid VariablePolicy value"):
options = save_options.SaveOptions(
experimental_variable_policy="not_a_valid_value")
class AssetTests(test.TestCase):
def setUp(self):
super(AssetTests, self).setUp()
self._vocab_path = os.path.join(self.get_temp_dir(), "vocab.txt")
with open(self._vocab_path, "w") as f:
f.write("alpha\nbeta\ngamma\n")
def test_asset_path_returned(self):
root = tracking.AutoTrackable()
root.path = tracking.Asset(self._vocab_path)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
root.get_asset = def_function.function(lambda: root.path.asset_path)
save.save(root, save_dir, signatures=root.get_asset.get_concrete_function())
second_dir = os.path.join(self.get_temp_dir(), "second_dir")
file_io.rename(save_dir, second_dir)
imported_path = _import_and_infer(second_dir, {})["output_0"]
self.assertIn(
compat.as_str_any(second_dir), compat.as_str_any(imported_path))
def test_table(self):
initializer = lookup_ops.TextFileInitializer(
self._vocab_path,
key_dtype=dtypes.string,
key_index=lookup_ops.TextFileIndex.WHOLE_LINE,
value_dtype=dtypes.int64,
value_index=lookup_ops.TextFileIndex.LINE_NUMBER)
root = util.Checkpoint(
table=lookup_ops.HashTable(initializer, default_value=-1))
root.table_user = def_function.function(
root.table.lookup,
input_signature=[tensor_spec.TensorSpec(None, dtypes.string)])
self.assertEqual(
2, self.evaluate(root.table_user(constant_op.constant("gamma"))))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir)
file_io.delete_file(self._vocab_path)
self.assertAllClose({"output_0": [2, 0]},
_import_and_infer(save_dir,
{"keys": ["gamma", "alpha"]}))
second_dir = os.path.join(self.get_temp_dir(), "second_dir")
# Asset paths should track the location the SavedModel is loaded from.
file_io.rename(save_dir, second_dir)
self.assertAllClose({"output_0": [2, 1]},
_import_and_infer(second_dir,
{"keys": ["gamma", "beta"]}))
def test_untracked_table_useful_message(self):
root = module.Module()
initializer = lookup_ops.TextFileInitializer(
self._vocab_path,
key_dtype=dtypes.string,
key_index=lookup_ops.TextFileIndex.WHOLE_LINE,
value_dtype=dtypes.int64,
value_index=lookup_ops.TextFileIndex.LINE_NUMBER)
table = lookup_ops.HashTable(initializer, default_value=-1)
root.table_user = def_function.function(
table.lookup,
input_signature=[tensor_spec.TensorSpec(None, dtypes.string)])
root.table_user(constant_op.constant("gamma"))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegexp(AssertionError, "HashTable"):
save.save(root, save_dir)
def test_unused_asset(self):
root = tracking.AutoTrackable()
root.f = def_function.function(
lambda x: 2. * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
root.asset = tracking.Asset(self._vocab_path)
export_dir = os.path.join(self.get_temp_dir(), "save_dir")
save.save(root, export_dir)
self.assertAllClose({"output_0": [0.2]},
_import_and_infer(export_dir, {"x": [0.1]}))
def test_sensible_function_building_exception(self):
root = util.Checkpoint(v=variables.Variable(2.))
root.f = def_function.function(
lambda x: 2. * root.v,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
export_dir = os.path.join(self.get_temp_dir(), "save_dir")
@def_function.function
def _calls_save():
save.save(root, export_dir)
with self.assertRaisesRegex(AssertionError, "tf.function"):
_calls_save()
def test_rewrite_asset_to_same_destination(self):
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
asset_path = os.path.join(self.get_temp_dir(), "asset")
model = tracking.AutoTrackable()
with open(asset_path, "w") as f:
f.write("first")
save.save(model, save_dir)
load.load(save_dir)
self.assertEqual("first", file_io.read_file_to_string(asset_path))
with open(asset_path, "w") as f:
f.write("second")
save.save(model, save_dir)
load.load(save_dir)
self.assertEqual("second", file_io.read_file_to_string(asset_path))
class ExportMetaGraphTests(test.TestCase):
def test_export_meta_graph(self):
root = tracking.AutoTrackable()
root.variable = resource_variable_ops.UninitializedVariable(
name="some_variable", dtype=dtypes.float32)
@def_function.function(input_signature=[tensor_spec.TensorSpec(None)])
def multiply_var(x):
return root.variable * x
@def_function.function(input_signature=[tensor_spec.TensorSpec([])])
def update(y):
root.variable.assign_add(y)
# TODO(b/150393409): All functions exported as signatures must have at
# least one output.
return 0
@def_function.function(input_signature=[])
def initialize():
root.variable.assign(1.0)
# TODO(b/150393409): All functions exported as signatures must have at
# least one output.
return 0
save_path = os.path.join(self.get_temp_dir(), "meta_graph.pb")
save.export_meta_graph(
root,
save_path,
signatures={
"multiply_var": multiply_var,
"initialize": initialize,
"update": update
})
with ops.Graph().as_default(), session_lib.Session() as session:
saver.import_meta_graph(save_path)
meta_graph_def = meta_graph.read_meta_graph_file(save_path)
# Initialize variable to 1
_run_signature(session, meta_graph_def, {}, "initialize")
out = _run_signature(session, meta_graph_def, {"x": 3}, "multiply_var")
self.assertAllEqual(out, {"output_0": 3})
# Adds 2 to the variable. Variable is now 3
_run_signature(session, meta_graph_def, {"y": 2}, "update")
out = _run_signature(session, meta_graph_def, {"x": 4}, "multiply_var")
self.assertAllEqual(out, {"output_0": 12})
if __name__ == "__main__":
test.main()
|
py | 7dff8000b0531c8c1603ffe33cbd0bfdf61ba02b | # -*- coding: utf-8 -*-
#
# This software is in the public domain because it contains materials
# that originally came from the United States Geological Survey,
# an agency of the United States Department of Interior.
# For more information, see the official USGS copyright policy at
# http://www.usgs.gov/visual-id/credit_usgs.html#copyright
#
# <author> Rian Bogle </author>
from ais.lib.task import Task
from ais.ui.models import Config,Plugin
from wtforms import Form, StringField, HiddenField, TextAreaField
from flask.ext.admin import expose
import datetime, ast
class RunArgsForm(Form):
id = HiddenField()
name = StringField("Set Name")
arg1 = StringField('Arg 1')
arg2 = StringField('Arg 2')
class InitArgsForm(Form):
id = HiddenField()
arg1 = StringField('Arg 1')
arg2 = StringField('Arg 2')
class Test_Task(Task):
def run(self, **kwargs):
self.last_run = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.logger.info(self._print_keyword_args("Test_Task Run called", **kwargs))
import random
if random.random()>.5:
raise Exception("Ugh We Failed our Task")
else:
return "Test Task run is happy as a clam!"
def configure(self, **kwargs):
self.logger.debug(self._print_keyword_args("Init called", **kwargs))
self.initialized = True
def __init__(self, **kwargs):
super(Test_Task,self).__init__(**kwargs)
self.widgetized = True
self.viewable = True
self.logger.debug(self._print_keyword_args("Init called", **kwargs))
def _print_keyword_args(self, msg, **kwargs):
# kwargs is a dict of the keyword args passed to the function
str= msg+'\n'
for key, value in kwargs.iteritems():
str+= "%s = %s\n" % (key, value)
return str
@expose('/', methods=('GET','POST'))
def plugin_view(self):
from flask.ext.admin import helpers as h
from flask import flash,request
active_tab = 'main'
if h.is_form_submitted():
form_data = request.form
form_type = form_data.get('id')
if form_type == 'init':
#load init config stored
icfg = Config.query.join(Plugin).filter(Plugin.name==self.name).filter(Config.role=="Initalize").first()
if icfg is None:
plg = Plugin.query.filter_by(name=self.name).first()
icfg = Config(plugin=plg, role="Initalize", args={}, name="Test_Task Init")
#update config obj with formdata
new_args = icfg.args.copy()
#icfg.args = new_ar
new_args['arg1']= form_data.get('arg1') #this one is a string
new_args['arg2']= form_data.get('arg2') #this one should be a dict
icfg.args = new_args
try:
self.app.db.session.add(icfg)
self.app.db.session.commit()
except:
flash("Init form submission failed, bad data in form", "danger")
active_tab = 'init'
else:
flash("Init Form submitted", "message")
elif form_type == 'run':
#load init config stored
plg = Plugin.query.filter_by(name=self.name).first()
icfg = Config(
name=form_data.get('name'),
role="Runtime",
plugin=plg
)
#update config obj with formdata
icfg.args = dict()
icfg.args['arg1']=form_data.get('arg1')
icfg.args['arg2']=form_data.get('arg2')
try:
self.app.db.session.add(icfg)
self.app.db.session.commit()
except ValueError:
flash("Run form submission failed, bad data in arg2", "danger")
active_tab = 'run'
except:
flash("Run form submission failed", "danger")
active_tab = 'run'
else:
flash("Run Form submitted", "message")
#load init config stored
icfg = Config.query.join(Plugin).filter(Plugin.name==self.name).filter(Config.role=="Initalize").first()
if icfg is None:
plg = Plugin.query.filter_by(name=self.name).first()
icfg = Config(plugin=plg, role="Initalize", args={}, name="Test_Task Init")
init_form = InitArgsForm(id="init",name=icfg.name, arg1 = icfg.args.get('arg1', ""), arg2 =icfg.args.get('arg2', ""))
run_form = RunArgsForm(id="run")
##render page
return self.render(
self.view_template,
init_form = init_form,
active_tab = active_tab,
return_url = "/test_task/",
run_form=run_form
) |
py | 7dff80f6bb2a87ca6c53535deb5ef88e4c2ef15e | import logging
import profile
def fun():
for i in range(10000):
logging.debug("This is a debug log.")
logging.info("This is a info log.")
logging.warning("This is a warning log.")
logging.error("This is a error log.")
logging.critical("This is a critical log.")
if __name__ == '__main__':
profile.run('fun()') |
py | 7dff811453c413726f62b81e89a7b5599d801b46 | import collections
import argparse
from configVar import config_vars
class OptionToConfigVar:
""" when attribute of CommandLineOptions is get or set
OptionToConfigVar will read/write it to configVar
if default is provided to __init__ the configVar is a priori set with this value
if set_value is provided to __init__ when __set__ is called, the configVar is set
to this value regardless of the value provided as param to __set__, useful
for example when boolean "yes" value is required to mark the value was set.
"""
def __init__(self, default=None, set_value=None):
self.set_value = set_value
self.default = default
def __set_name__(self, owner, name):
self.var_name = name
if self.default is not None:
config_vars[self.var_name] = self.default
def __get__(self, instance, owner):
retVal = None
if self.var_name in config_vars:
retVal = str(config_vars[self.var_name])
return retVal
def __set__(self, instance, value):
if value is not None:
if self.set_value is not None:
config_vars[self.var_name] = self.set_value
else:
if isinstance(value, collections.Sequence):
config_vars[self.var_name] = value
else:
config_vars[self.var_name] = str(value)
class CommandLineOptions(object):
""" namespace object to give to parse_args
holds command line options
"""
__BASE_URL__ = OptionToConfigVar()
__CONFIG_FILE__ = OptionToConfigVar()
__CREDENTIALS__ = OptionToConfigVar()
__MAIN_DB_FILE__ = OptionToConfigVar()
__DOCK_ITEM_LABEL__ = OptionToConfigVar()
__DOCK_ITEM_PATH__ = OptionToConfigVar()
__FAIL_EXIT_CODE__ = OptionToConfigVar()
__FAIL_SLEEP_TIME__ = OptionToConfigVar()
__FILE_SIZES_FILE__ = OptionToConfigVar()
__JUST_WITH_NUMBER__ = OptionToConfigVar(default="0")
__LIMIT_COMMAND_TO__ = OptionToConfigVar()
__MAIN_COMMAND__ = OptionToConfigVar()
__MAIN_INPUT_FILE__ = OptionToConfigVar()
__MAIN_OUT_FILE__ = OptionToConfigVar()
__NO_NUMBERS_PROGRESS__ = OptionToConfigVar()
__NO_WTAR_ARTIFACTS__ = OptionToConfigVar()
__OUTPUT_FORMAT__ = OptionToConfigVar()
__PROPS_FILE__ = OptionToConfigVar()
__REMOVE_FROM_DOCK__ = OptionToConfigVar()
__REPORT_ONLY_INSTALLED__ = OptionToConfigVar()
__RESTART_THE_DOCK__ = OptionToConfigVar()
__RUN_AS_ADMIN__ = OptionToConfigVar()
__RUN_BATCH__ = OptionToConfigVar()
__RUN_COMMAND_LIST_IN_PARALLEL__ = OptionToConfigVar()
__SHA1_CHECKSUM__ = OptionToConfigVar()
__SHORTCUT_PATH__ = OptionToConfigVar()
__SHORTCUT_TARGET_PATH__ = OptionToConfigVar()
__START_DYNAMIC_PROGRESS__ = OptionToConfigVar()
__TOTAL_DYNAMIC_PROGRESS__ = OptionToConfigVar()
BASE_REPO_REV = OptionToConfigVar()
LS_FORMAT = OptionToConfigVar()
TARGET_REPO_REV = OptionToConfigVar()
ABORT_FILE = OptionToConfigVar()
SHELL = OptionToConfigVar()
RUN_PROCESS_ARGUMENTS = OptionToConfigVar()
__SILENT__ = OptionToConfigVar()
def __init__(self) -> None:
self.mode = None
self.which_revision = None
self.define = None
def __str__(self):
return "\n".join([''.join((n, ": ", str(v))) for n, v in sorted(vars(self).items())])
def prepare_args_parser(in_command):
"""
Prepare the parser for command line arguments
"""
all_command_details = dict()
# client commands
all_command_details.update({
'copy': {'mode': 'client', 'options': ('in', 'out', 'run', 'cred'), 'help': 'copy files to target paths'},
'read-yaml': {'mode': 'client', 'options': ('in', 'out', 'db'), 'help': "reads a yaml file to verify it's contents"},
'remove': {'mode': 'client', 'options': ('in', 'out', 'run',), 'help': 'remove items installed by copy'},
'report-versions': {'mode': 'client', 'options': ('in', 'out', 'output_format', 'only_installed'), 'help': 'report what is installed and what needs update'},
'sync': {'mode': 'client', 'options': ('in', 'out', 'run', 'cred'), 'help': 'sync files to be installed from server to local disk'},
'synccopy': {'mode': 'client', 'options': ('in', 'out', 'run', 'cred'), 'help': 'sync files to be installed from server to local disk and copy files to target paths'},
'uninstall': {'mode': 'client', 'options': ('in', 'out', 'run',), 'help': 'uninstall previously copied files, considering dependencies'},
'short-index': {'mode': 'client', 'options': {'in', 'out'}, 'help': 'create short version of the index'},
})
if in_command not in all_command_details:
# do_something commands
all_command_details.update({
'check-checksum': {'mode': 'do_something', 'options': ('in', 'prog',), 'help': 'check checksum for a list of files from info_map file'},
'checksum': {'mode': 'do_something', 'options': ('in',), 'help': 'calculate checksum for a file or folder'},
'command-list': {'mode': 'do_something', 'options': ('conf', 'prog', 'parallel'), 'help': 'do a list of commands from a file'},
'exec': {'mode': 'do_something', 'options': ('in', 'out', 'conf_opt'), 'help': 'Execute a python scrip'},
'fail': {'mode': 'do_something', 'options': (), 'help': "fail and return exit code"},
'help': {'mode': 'do_something', 'options': (), 'help': 'help'},
'ls': {'mode': 'do_something', 'options': ('in', 'out', 'limit'), 'help': 'create a directory listing'},
'parallel-run': {'mode': 'do_something', 'options': ('in', ), 'help': 'Run processes in parallel'},
'resolve': {'mode': 'do_something', 'options': ('in', 'out', 'conf'), 'help': 'read --in file resolve $() style variables and write result to --out, definitions are given in --config-file'},
'run-process': {'mode': 'do_something', 'options': ('in_opt',), 'help': 'Run a processes with optional abort file'},
'test-import': {'mode': 'do_something', 'options': (), 'help': 'test the import of required modules'},
'translate_url': {'mode': 'do_something', 'options': ('in', 'cred'), 'help': 'translate a url to be compatible with current connection'},
'unwtar': {'mode': 'do_something', 'options': ('in_opt', 'prog', 'out'), 'help': 'uncompress .wtar files in current (or in the --out) folder'},
'version': {'mode': 'do_something', 'options': (), 'help': 'display instl version'},
'wtar': {'mode': 'do_something', 'options': ('in', 'out'), 'help': 'create .wtar files from specified files and folders'},
'wzip': {'mode': 'do_something', 'options': ('in', 'out'), 'help': 'create .wzip file from specified file'},
})
if in_command not in all_command_details:
# admin commands
all_command_details.update({
# converted to instl 2 style
'activate-repo-rev': {'mode': 'admin', 'options': ('out', 'run', 'conf',), 'help': 'upload repository revision file to admin folder'},
'depend': {'mode': 'admin', 'options': ('in', 'out',), 'help': 'output a dependencies map for an index file'},
'file-sizes': {'mode': 'admin', 'options': ('in', 'out'), 'help': 'Create a list of files and their sizes'},
'fix-perm': {'mode': 'admin', 'options': ('out', 'run', 'conf', 'limit'), 'help': 'Fix Mac OS permissions'},
'fix-props': {'mode': 'admin', 'options': ('out', 'run', 'conf'), 'help': 'create svn commands to remove redundant properties such as executable bit from files that should not be marked executable'},
'fix-symlinks': {'mode': 'admin', 'options': ('out', 'run', 'conf', 'limit'), 'help': 'replace symlinks with .symlinks files'},
'stage2svn': {'mode': 'admin', 'options': ('out', 'run', 'conf', 'limit'), 'help': 'add/remove files in staging to svn sync repository'},
'svn2stage': {'mode': 'admin', 'options': ('out', 'run', 'conf', 'limit'), 'help': 'svn sync repository and copy to staging folder'},
'verify-repo': {'mode': 'admin', 'options': ('conf',), 'help': 'Verify a local repository against its index'},
'up2s3': {'mode': 'admin', 'options': ('conf', 'out', 'run'), 'help': 'upload revision to s3'},
'wait-on-action-trigger': {'mode': 'admin', 'options': ('conf',), 'help': 'wait for svn commit and upload revision to s3'},
'check-instl-folder-integrity': {'mode': 'admin', 'options': ('in',), 'help': 'check that index and info_maps have correct checksums, and other attributes'},
'read-info-map': {'mode': 'admin', 'options': ('in+', 'db'), 'help': "reads an info-map file to verify it's contents"},
'translate-guids': {'mode': 'admin', 'options': ('in', 'conf', 'out'), 'help': 'translate guids to iids'},
'verify-index': {'mode': 'admin', 'options': ('in', 'cred'), 'help': 'Verify that index and info map are compatible'},
'wtar-staging-folder': {'mode': 'admin', 'options': ('out', 'run', 'conf', 'limit'), 'help': 'create .wtar files inside staging folder'},
})
if in_command not in all_command_details:
# misc commands, gui, doit
all_command_details.update({
'doit': {'mode': 'doit', 'options': ('in', 'out', 'run'), 'help': 'Do something'},
'gui': {'mode': 'gui', 'options': (), 'help': 'graphical user interface'}
})
command_names = sorted(all_command_details.keys())
# if in_command is None - just return the command names
if in_command is None:
return None, command_names
def decent_convert_arg_line_to_args(self, arg_line):
""" parse a file with options so that we do not have to write one sub-option
per line. Remove empty lines, comment lines, and end of line comments.
ToDo: handle quotes
"""
line_no_whitespace = arg_line.strip()
if line_no_whitespace and line_no_whitespace[0] != '#':
for arg in line_no_whitespace.split():
if not arg:
continue
elif arg[0] == '#':
break
yield arg
parser = argparse.ArgumentParser(description='instl: cross platform installer',
prefix_chars='-+',
fromfile_prefix_chars='@',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
argparse.ArgumentParser.convert_arg_line_to_args = decent_convert_arg_line_to_args
subparsers = parser.add_subparsers(dest='__MAIN_COMMAND__', help='sub-command help')
command_details = all_command_details[in_command]
command_parser = subparsers.add_parser(in_command, help=command_details['help'])
command_parser.set_defaults(mode=command_details['mode'])
# optional --in
if 'in_opt' in command_details['options']:
input_options = command_parser.add_argument_group(description='input arguments:')
input_options.add_argument('--in', '-i',
required=False,
nargs=1,
metavar='path-to-input-file',
dest='__MAIN_INPUT_FILE__',
help="file to act upon")
# required --in
if 'in' in command_details['options']:
input_options = command_parser.add_argument_group(description='input arguments:')
input_options.add_argument('--in', '-i',
required=True,
nargs=1,
metavar='path-to-input-folder',
dest='__MAIN_INPUT_FILE__',
help="file or folder to act upon")
# required multi --in
if 'in+' in command_details['options']:
input_options = command_parser.add_argument_group(description='input arguments:')
input_options.add_argument('--in', '-i',
required=True,
nargs='+',
metavar='path-to-input-folder',
dest='__MAIN_INPUT_FILE__',
help="files or folders to act upon")
# optional --out
if 'out' in command_details['options']:
output_options = command_parser.add_argument_group(description='output arguments:')
output_options.add_argument('--out', '-o',
required=False,
nargs=1,
metavar='path-to-output-file',
dest='__MAIN_OUT_FILE__',
help="output file")
if 'run' in command_details['options']:
run_option = command_parser.add_argument_group(description='run arguments:')
run_option.add_argument('--run', '-r',
required=False,
default=False,
action='store_true',
dest='__RUN_BATCH__',
help="run the installation instructions script")
if 'output_format' in command_details['options']:
output_format_option = command_parser.add_argument_group(description='output_format arguments:')
output_format_option.add_argument('--output-format',
required=False,
nargs=1,
dest='__OUTPUT_FORMAT__',
help="specify output format")
if 'cred' in command_details['options']:
credentials_option = command_parser.add_argument_group(description='credentials:')
credentials_option.add_argument('--credentials',
required=False,
nargs=1,
metavar='credentials',
dest='__CREDENTIALS__',
help="credentials to file server")
if ('conf' in command_details['options']) or ('conf_opt' in command_details['options']):
config_file_options = command_parser.add_argument_group(description='admin arguments:')
is_required = 'conf' in command_details['options']
config_file_options.add_argument('--config-file', '-s',
required=is_required,
nargs='+',
metavar='path-to-config-file',
dest='__CONFIG_FILE__',
help="path to config-file")
if 'prog' in command_details['options']:
progress_options = command_parser.add_argument_group(description='dynamic progress report')
progress_options.add_argument('--start-progress',
required=False,
nargs=1,
metavar='start-progress-number',
dest='__START_DYNAMIC_PROGRESS__',
help="num progress items to begin with")
progress_options.add_argument('--total-progress',
required=False,
nargs=1,
metavar='total-progress-number',
dest='__TOTAL_DYNAMIC_PROGRESS__',
help="num total progress items")
progress_options.add_argument('--no-numbers-progress',
required=False,
default=False,
action='store_true',
dest='__NO_NUMBERS_PROGRESS__',
help="display progress but without specific numbers")
if 'limit' in command_details['options']:
limit_options = command_parser.add_argument_group(description='limit command to specific folder')
limit_options.add_argument('--limit',
required=False,
nargs='+',
metavar='limit-command-to',
dest='__LIMIT_COMMAND_TO__',
help="list of command to limit the action to")
if 'parallel' in command_details['options']:
parallel_option = command_parser.add_argument_group(description='parallel execution')
parallel_option.add_argument('--parallel', '-p',
required=False,
default=False,
action='store_true',
dest='__RUN_COMMAND_LIST_IN_PARALLEL__',
help="run the command-list in parallel")
# optional --db
if 'db' in command_details['options']:
db_options = command_parser.add_argument_group(description='database path:')
db_options.add_argument('--db', '-d',
required=False,
nargs=1,
metavar='path-to-db-file',
dest='__MAIN_DB_FILE__',
help="database file")
if 'rev' in command_details['options']:
rev_options = command_parser.add_argument_group(description='revision:')
rev_options.add_argument('--rev',
required=True,
nargs=1,
metavar='revision',
dest='TARGET_REPO_REV',
help="revision to create work on")
# the following option groups each belong only to a single command
if 'read-yaml' == in_command:#__SILENT__
read_yaml_options = command_parser.add_argument_group(description=in_command+' arguments:')
read_yaml_options.add_argument('--silent',
required=False,
default=False,
action='store_true',
dest='__SILENT__',
help="minimal output")
elif 'activate-repo-rev' == in_command:
up_repo_rev_options = command_parser.add_argument_group(description=in_command+' arguments:')
up_repo_rev_options.add_argument('--just-with-number', '-j',
required=False,
nargs=1,
metavar='just-with-number',
dest='__JUST_WITH_NUMBER__',
help="up load just the repo-rev file that ends with a specific number, not the general one")
elif 'unwtar' == in_command:
unwtar_options = command_parser.add_argument_group(description=in_command+' arguments:')
unwtar_options.add_argument('--no-artifacts',
required=False,
default=False,
action='store_true',
dest='__NO_WTAR_ARTIFACTS__',
help="remove all .wtar files and .done files")
elif in_command in ('up2s3',):
which_revision_options = command_parser.add_argument_group(description=in_command+' arguments:')
which_revision_options.add_argument('--revision',
required=False,
nargs=1,
default=False,
dest='which_revision',
help="all==work on all revisions even if above repo-rev, num=work on specific revision")
elif 'ls' == in_command:
ls_options = command_parser.add_argument_group(description='output_format arguments:')
ls_options.add_argument('--output-format',
required=False,
nargs=1,
dest='LS_FORMAT',
help="specify output format")
elif 'fail' == in_command:
fail_options = command_parser.add_argument_group(description=in_command+' arguments:')
fail_options.add_argument('--exit-code',
required=False,
nargs=1,
metavar='exit-code-to-return',
dest='__FAIL_EXIT_CODE__',
help="exit code to return")
fail_options.add_argument('--sleep',
required=False,
nargs=1,
metavar='time-to-sleep',
dest='__FAIL_SLEEP_TIME__',
help="time to sleep")
elif 'report-versions' == in_command:
report_versions_options = command_parser.add_argument_group(description=in_command+' arguments:')
report_versions_options.add_argument('--only-installed',
required=False,
default=False,
action='store_true',
dest='__REPORT_ONLY_INSTALLED__',
help="report only installed products")
elif 'help' == in_command:
help_options = command_parser.add_argument_group(description='help subject:')
help_options.add_argument('subject', nargs='?')
elif 'run-process' == in_command:
run_process_options = command_parser.add_argument_group(description='run-process:')
run_process_options.add_argument('--abort-file',
required=False,
default=None,
nargs=1,
metavar='abort_file',
dest='ABORT_FILE',
help="run a process with optional abort file")
run_process_options.add_argument('--shell',
required=False,
default=False,
action='store_true',
dest='SHELL',
help="run a process in shell")
run_process_options.add_argument(dest='RUN_PROCESS_ARGUMENTS',
nargs='...',
)
general_options = command_parser.add_argument_group(description='general:')
general_options.add_argument('--define',
required=False,
default=False,
nargs=1,
metavar='define',
dest='define',
help="define variable(s) format: X=y,A=b")
general_options.add_argument('--no-stdout',
required=False,
action='store_const',
metavar='no_stdout',
const='__NO_STDOUT__',
help="do not output to stdout")
general_options.add_argument('--no-system-log',
required=False,
action='store_const',
metavar='no_system_log',
const='__NO_SYSLOG__',
help="do not output to system log")
general_options.add_argument('--log',
required=False,
nargs='+',
metavar='log_file',
dest='__LOG_FILE__',
help="log to file(s)")
return parser, command_names
def read_command_line_options(name_space_obj, arg_list=None):
""" parse command line options """
command_name = arg_list[0] if arg_list else None
parser, command_names = prepare_args_parser(command_name)
if parser:
# Command line options were given or auto run file was found
parser.parse_args(arg_list, namespace=name_space_obj)
else:
# No command line options were given
name_space_obj.mode = "interactive"
return command_names
|
py | 7dff81160e0f831061d5c2264aca260e551f3cb2 | """Support for MQTT fans."""
import logging
import voluptuous as vol
from homeassistant.components import fan, mqtt
from homeassistant.components.fan import (
ATTR_SPEED, SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM, SPEED_OFF,
SUPPORT_OSCILLATE, SUPPORT_SET_SPEED, FanEntity)
from homeassistant.const import (
CONF_DEVICE, CONF_NAME, CONF_OPTIMISTIC, CONF_PAYLOAD_OFF, CONF_PAYLOAD_ON,
CONF_STATE, STATE_OFF, STATE_ON)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from . import (
ATTR_DISCOVERY_HASH, CONF_COMMAND_TOPIC, CONF_QOS, CONF_RETAIN,
CONF_STATE_TOPIC, CONF_UNIQUE_ID, MqttAttributes, MqttAvailability,
MqttDiscoveryUpdate, MqttEntityDeviceInfo, subscription)
from .discovery import MQTT_DISCOVERY_NEW, clear_discovery_hash
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['mqtt']
CONF_STATE_VALUE_TEMPLATE = 'state_value_template'
CONF_SPEED_STATE_TOPIC = 'speed_state_topic'
CONF_SPEED_COMMAND_TOPIC = 'speed_command_topic'
CONF_SPEED_VALUE_TEMPLATE = 'speed_value_template'
CONF_OSCILLATION_STATE_TOPIC = 'oscillation_state_topic'
CONF_OSCILLATION_COMMAND_TOPIC = 'oscillation_command_topic'
CONF_OSCILLATION_VALUE_TEMPLATE = 'oscillation_value_template'
CONF_PAYLOAD_OSCILLATION_ON = 'payload_oscillation_on'
CONF_PAYLOAD_OSCILLATION_OFF = 'payload_oscillation_off'
CONF_PAYLOAD_LOW_SPEED = 'payload_low_speed'
CONF_PAYLOAD_MEDIUM_SPEED = 'payload_medium_speed'
CONF_PAYLOAD_HIGH_SPEED = 'payload_high_speed'
CONF_SPEED_LIST = 'speeds'
DEFAULT_NAME = 'MQTT Fan'
DEFAULT_PAYLOAD_ON = 'ON'
DEFAULT_PAYLOAD_OFF = 'OFF'
DEFAULT_OPTIMISTIC = False
OSCILLATE_ON_PAYLOAD = 'oscillate_on'
OSCILLATE_OFF_PAYLOAD = 'oscillate_off'
OSCILLATION = 'oscillation'
PLATFORM_SCHEMA = mqtt.MQTT_RW_PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_STATE_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_SPEED_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_SPEED_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_SPEED_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_OSCILLATION_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_OSCILLATION_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_OSCILLATION_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_PAYLOAD_ON, default=DEFAULT_PAYLOAD_ON): cv.string,
vol.Optional(CONF_PAYLOAD_OFF, default=DEFAULT_PAYLOAD_OFF): cv.string,
vol.Optional(CONF_PAYLOAD_OSCILLATION_ON,
default=DEFAULT_PAYLOAD_ON): cv.string,
vol.Optional(CONF_PAYLOAD_OSCILLATION_OFF,
default=DEFAULT_PAYLOAD_OFF): cv.string,
vol.Optional(CONF_PAYLOAD_LOW_SPEED, default=SPEED_LOW): cv.string,
vol.Optional(CONF_PAYLOAD_MEDIUM_SPEED, default=SPEED_MEDIUM): cv.string,
vol.Optional(CONF_PAYLOAD_HIGH_SPEED, default=SPEED_HIGH): cv.string,
vol.Optional(CONF_SPEED_LIST,
default=[SPEED_OFF, SPEED_LOW,
SPEED_MEDIUM, SPEED_HIGH]): cv.ensure_list,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_UNIQUE_ID): cv.string,
vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA,
}).extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema).extend(
mqtt.MQTT_JSON_ATTRS_SCHEMA.schema)
async def async_setup_platform(hass: HomeAssistantType, config: ConfigType,
async_add_entities, discovery_info=None):
"""Set up MQTT fan through configuration.yaml."""
await _async_setup_entity(config, async_add_entities)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT fan dynamically through MQTT discovery."""
async def async_discover(discovery_payload):
"""Discover and add a MQTT fan."""
try:
discovery_hash = discovery_payload.pop(ATTR_DISCOVERY_HASH)
config = PLATFORM_SCHEMA(discovery_payload)
await _async_setup_entity(config, async_add_entities, config_entry,
discovery_hash)
except Exception:
if discovery_hash:
clear_discovery_hash(hass, discovery_hash)
raise
async_dispatcher_connect(
hass, MQTT_DISCOVERY_NEW.format(fan.DOMAIN, 'mqtt'),
async_discover)
async def _async_setup_entity(config, async_add_entities, config_entry=None,
discovery_hash=None):
"""Set up the MQTT fan."""
async_add_entities([MqttFan(config, config_entry, discovery_hash)])
# pylint: disable=too-many-ancestors
class MqttFan(MqttAttributes, MqttAvailability, MqttDiscoveryUpdate,
MqttEntityDeviceInfo, FanEntity):
"""A MQTT fan component."""
def __init__(self, config, config_entry, discovery_hash):
"""Initialize the MQTT fan."""
self._unique_id = config.get(CONF_UNIQUE_ID)
self._state = False
self._speed = None
self._oscillation = None
self._supported_features = 0
self._sub_state = None
self._topic = None
self._payload = None
self._templates = None
self._optimistic = None
self._optimistic_oscillation = None
self._optimistic_speed = None
# Load config
self._setup_from_config(config)
device_config = config.get(CONF_DEVICE)
MqttAttributes.__init__(self, config)
MqttAvailability.__init__(self, config)
MqttDiscoveryUpdate.__init__(self, discovery_hash,
self.discovery_update)
MqttEntityDeviceInfo.__init__(self, device_config, config_entry)
async def async_added_to_hass(self):
"""Subscribe to MQTT events."""
await super().async_added_to_hass()
await self._subscribe_topics()
async def discovery_update(self, discovery_payload):
"""Handle updated discovery message."""
config = PLATFORM_SCHEMA(discovery_payload)
self._setup_from_config(config)
await self.attributes_discovery_update(config)
await self.availability_discovery_update(config)
await self.device_info_discovery_update(config)
await self._subscribe_topics()
self.async_write_ha_state()
def _setup_from_config(self, config):
"""(Re)Setup the entity."""
self._config = config
self._topic = {
key: config.get(key) for key in (
CONF_STATE_TOPIC,
CONF_COMMAND_TOPIC,
CONF_SPEED_STATE_TOPIC,
CONF_SPEED_COMMAND_TOPIC,
CONF_OSCILLATION_STATE_TOPIC,
CONF_OSCILLATION_COMMAND_TOPIC,
)
}
self._templates = {
CONF_STATE: config.get(CONF_STATE_VALUE_TEMPLATE),
ATTR_SPEED: config.get(CONF_SPEED_VALUE_TEMPLATE),
OSCILLATION: config.get(CONF_OSCILLATION_VALUE_TEMPLATE)
}
self._payload = {
STATE_ON: config.get(CONF_PAYLOAD_ON),
STATE_OFF: config.get(CONF_PAYLOAD_OFF),
OSCILLATE_ON_PAYLOAD: config.get(CONF_PAYLOAD_OSCILLATION_ON),
OSCILLATE_OFF_PAYLOAD: config.get(CONF_PAYLOAD_OSCILLATION_OFF),
SPEED_LOW: config.get(CONF_PAYLOAD_LOW_SPEED),
SPEED_MEDIUM: config.get(CONF_PAYLOAD_MEDIUM_SPEED),
SPEED_HIGH: config.get(CONF_PAYLOAD_HIGH_SPEED),
}
optimistic = config.get(CONF_OPTIMISTIC)
self._optimistic = optimistic or self._topic[CONF_STATE_TOPIC] is None
self._optimistic_oscillation = (
optimistic or self._topic[CONF_OSCILLATION_STATE_TOPIC] is None)
self._optimistic_speed = (
optimistic or self._topic[CONF_SPEED_STATE_TOPIC] is None)
self._supported_features = 0
self._supported_features |= (self._topic[CONF_OSCILLATION_STATE_TOPIC]
is not None and SUPPORT_OSCILLATE)
self._supported_features |= (self._topic[CONF_SPEED_STATE_TOPIC]
is not None and SUPPORT_SET_SPEED)
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
topics = {}
templates = {}
for key, tpl in list(self._templates.items()):
if tpl is None:
templates[key] = lambda value: value
else:
tpl.hass = self.hass
templates[key] = tpl.async_render_with_possible_json_value
@callback
def state_received(msg):
"""Handle new received MQTT message."""
payload = templates[CONF_STATE](msg.payload)
if payload == self._payload[STATE_ON]:
self._state = True
elif payload == self._payload[STATE_OFF]:
self._state = False
self.async_write_ha_state()
if self._topic[CONF_STATE_TOPIC] is not None:
topics[CONF_STATE_TOPIC] = {
'topic': self._topic[CONF_STATE_TOPIC],
'msg_callback': state_received,
'qos': self._config.get(CONF_QOS)}
@callback
def speed_received(msg):
"""Handle new received MQTT message for the speed."""
payload = templates[ATTR_SPEED](msg.payload)
if payload == self._payload[SPEED_LOW]:
self._speed = SPEED_LOW
elif payload == self._payload[SPEED_MEDIUM]:
self._speed = SPEED_MEDIUM
elif payload == self._payload[SPEED_HIGH]:
self._speed = SPEED_HIGH
self.async_write_ha_state()
if self._topic[CONF_SPEED_STATE_TOPIC] is not None:
topics[CONF_SPEED_STATE_TOPIC] = {
'topic': self._topic[CONF_SPEED_STATE_TOPIC],
'msg_callback': speed_received,
'qos': self._config.get(CONF_QOS)}
self._speed = SPEED_OFF
@callback
def oscillation_received(msg):
"""Handle new received MQTT message for the oscillation."""
payload = templates[OSCILLATION](msg.payload)
if payload == self._payload[OSCILLATE_ON_PAYLOAD]:
self._oscillation = True
elif payload == self._payload[OSCILLATE_OFF_PAYLOAD]:
self._oscillation = False
self.async_write_ha_state()
if self._topic[CONF_OSCILLATION_STATE_TOPIC] is not None:
topics[CONF_OSCILLATION_STATE_TOPIC] = {
'topic': self._topic[CONF_OSCILLATION_STATE_TOPIC],
'msg_callback': oscillation_received,
'qos': self._config.get(CONF_QOS)}
self._oscillation = False
self._sub_state = await subscription.async_subscribe_topics(
self.hass, self._sub_state,
topics)
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._sub_state = await subscription.async_unsubscribe_topics(
self.hass, self._sub_state)
await MqttAttributes.async_will_remove_from_hass(self)
await MqttAvailability.async_will_remove_from_hass(self)
@property
def should_poll(self):
"""No polling needed for a MQTT fan."""
return False
@property
def assumed_state(self):
"""Return true if we do optimistic updates."""
return self._optimistic
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def name(self) -> str:
"""Get entity name."""
return self._config.get(CONF_NAME)
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
return self._config.get(CONF_SPEED_LIST)
@property
def supported_features(self) -> int:
"""Flag supported features."""
return self._supported_features
@property
def speed(self):
"""Return the current speed."""
return self._speed
@property
def oscillating(self):
"""Return the oscillation state."""
return self._oscillation
async def async_turn_on(self, speed: str = None, **kwargs) -> None:
"""Turn on the entity.
This method is a coroutine.
"""
mqtt.async_publish(
self.hass, self._topic[CONF_COMMAND_TOPIC],
self._payload[STATE_ON], self._config.get(CONF_QOS),
self._config.get(CONF_RETAIN))
if speed:
await self.async_set_speed(speed)
async def async_turn_off(self, **kwargs) -> None:
"""Turn off the entity.
This method is a coroutine.
"""
mqtt.async_publish(
self.hass, self._topic[CONF_COMMAND_TOPIC],
self._payload[STATE_OFF], self._config.get(CONF_QOS),
self._config.get(CONF_RETAIN))
async def async_set_speed(self, speed: str) -> None:
"""Set the speed of the fan.
This method is a coroutine.
"""
if self._topic[CONF_SPEED_COMMAND_TOPIC] is None:
return
if speed == SPEED_LOW:
mqtt_payload = self._payload[SPEED_LOW]
elif speed == SPEED_MEDIUM:
mqtt_payload = self._payload[SPEED_MEDIUM]
elif speed == SPEED_HIGH:
mqtt_payload = self._payload[SPEED_HIGH]
else:
mqtt_payload = speed
mqtt.async_publish(
self.hass, self._topic[CONF_SPEED_COMMAND_TOPIC],
mqtt_payload, self._config.get(CONF_QOS),
self._config.get(CONF_RETAIN))
if self._optimistic_speed:
self._speed = speed
self.async_write_ha_state()
async def async_oscillate(self, oscillating: bool) -> None:
"""Set oscillation.
This method is a coroutine.
"""
if self._topic[CONF_OSCILLATION_COMMAND_TOPIC] is None:
return
if oscillating is False:
payload = self._payload[OSCILLATE_OFF_PAYLOAD]
else:
payload = self._payload[OSCILLATE_ON_PAYLOAD]
mqtt.async_publish(
self.hass, self._topic[CONF_OSCILLATION_COMMAND_TOPIC],
payload, self._config.get(CONF_QOS), self._config.get(CONF_RETAIN))
if self._optimistic_oscillation:
self._oscillation = oscillating
self.async_write_ha_state()
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
|
py | 7dff814c4f432afa061cba57bb8f04d5d2e4327d | import numpy as np
import tensorflow as tf
#slim = tf.distribute.slim
import tf_slim as slim
DEFAULT_PADDING = 'SAME'
def layer(op):
'''Decorator for composable network layers.'''
def layer_decorated(self, *args, **kwargs):
# Automatically set a name if not provided.
name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
# Figure out the layer inputs.
if len(self.terminals) == 0:
raise RuntimeError('No input variables found for layer %s.' % name)
elif len(self.terminals) == 1:
layer_input = self.terminals[0]
else:
layer_input = list(self.terminals)
# Perform the operation and get the output.
layer_output = op(self, layer_input, *args, **kwargs)
# Add to layer LUT.
self.layers[name] = layer_output
# This output is now the input for the next layer.
self.feed(layer_output)
# Return self for chained calls.
return self
return layer_decorated
class Network(object):
def __init__(self, inputs, trainable=True, is_training=False, num_classes=21):
# The input nodes for this network
self.inputs = inputs
# The current list of terminal nodes
self.terminals = []
# Mapping from layer names to layers
self.layers = dict(inputs)
# If true, the resulting variables are set as trainable
self.trainable = trainable
# Switch variable for dropout
self.use_dropout = tf.compat.v1.placeholder_with_default(tf.constant(1.0),
shape=[],
name='use_dropout')
self.setup(is_training, num_classes)
def setup(self, is_training):
'''Construct the network. '''
raise NotImplementedError('Must be implemented by the subclass.')
def load(self, data_path, session, ignore_missing=False):
'''Load network weights.
data_path: The path to the numpy-serialized network weights
session: The current TensorFlow session
ignore_missing: If true, serialized weights for missing layers are ignored.
'''
data_dict = np.load(data_path).item()
for op_name in data_dict:
with tf.compat.v1.variable_scope(op_name, reuse=True):
for param_name, data in data_dict[op_name].items():
try:
var = tf.compat.v1.get_variable(param_name)
session.run(var.assign(data))
except ValueError:
if not ignore_missing:
raise
def feed(self, *args):
'''Set the input(s) for the next operation by replacing the terminal nodes.
The arguments can be either layer names or the actual layers.
'''
assert len(args) != 0
self.terminals = []
for fed_layer in args:
if isinstance(fed_layer, str):
try:
fed_layer = self.layers[fed_layer]
except KeyError:
raise KeyError('Unknown layer name fed: %s' % fed_layer)
self.terminals.append(fed_layer)
return self
def get_output(self):
'''Returns the current network output.'''
return self.terminals[-1]
def get_unique_name(self, prefix):
'''Returns an index-suffixed unique name for the given prefix.
This is used for auto-generating layer names based on the type-prefix.
'''
ident = sum(t.startswith(prefix) for t, _ in list(self.layers.items())) + 1
return '%s_%d' % (prefix, ident)
def make_var(self, name, shape):
'''Creates a new TensorFlow variable.'''
return tf.compat.v1.get_variable(name, shape, trainable=self.trainable)
def validate_padding(self, padding):
'''Verifies that the padding is one of the supported ones.'''
assert padding in ('SAME', 'VALID')
@layer
def conv(self,
input,
k_h,
k_w,
c_o,
s_h,
s_w,
name,
relu=True,
padding=DEFAULT_PADDING,
group=1,
biased=True):
# Verify that the padding is acceptable
self.validate_padding(padding)
# Get the number of channels in the input
c_i = input.get_shape()[-1]
# Verify that the grouping parameter is valid
assert c_i % group == 0
assert c_o % group == 0
# Convolution for a given input and kernel
convolve = lambda i, k: tf.nn.conv2d(input=i, filters=k, strides=[1, s_h, s_w, 1], padding=padding)
with tf.compat.v1.variable_scope(name) as scope:
kernel = self.make_var('weights', shape=[k_h, k_w, c_i//group, c_o])
if group == 1:
# This is the common-case. Convolve the input without any further complications.
output = convolve(input, kernel)
else:
# Split the input into groups and then convolve each of them independently
input_groups = tf.split(3, group, input)
kernel_groups = tf.split(3, group, kernel)
output_groups = [convolve(i, k) for i, k in zip(input_groups, kernel_groups)]
# Concatenate the groups
output = tf.concat(3, output_groups)
# Add the biases
if biased:
biases = self.make_var('biases', [c_o])
output = tf.nn.bias_add(output, biases)
if relu:
# ReLU non-linearity
output = tf.nn.relu(output, name=scope.name)
return output
@layer
def atrous_conv(self,
input,
k_h,
k_w,
c_o,
dilation,
name,
relu=True,
padding=DEFAULT_PADDING,
group=1,
biased=True):
# Verify that the padding is acceptable
self.validate_padding(padding)
# Get the number of channels in the input
c_i = input.get_shape()[-1]
# Verify that the grouping parameter is valid
assert c_i % group == 0
assert c_o % group == 0
# Convolution for a given input and kernel
convolve = lambda i, k: tf.nn.atrous_conv2d(i, k, dilation, padding=padding)
with tf.compat.v1.variable_scope(name) as scope:
kernel = self.make_var('weights', shape=[k_h, k_w, c_i//group, c_o])
if group == 1:
# This is the common-case. Convolve the input without any further complications.
output = convolve(input, kernel)
else:
# Split the input into groups and then convolve each of them independently
input_groups = tf.split(3, group, input)
kernel_groups = tf.split(3, group, kernel)
output_groups = [convolve(i, k) for i, k in zip(input_groups, kernel_groups)]
# Concatenate the groups
output = tf.concat(3, output_groups)
# Add the biases
if biased:
biases = self.make_var('biases', [c_o])
output = tf.nn.bias_add(output, biases)
if relu:
# ReLU non-linearity
output = tf.nn.relu(output, name=scope.name)
return output
@layer
def relu(self, input, name):
return tf.nn.relu(input, name=name)
@layer
def max_pool(self, input, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):
self.validate_padding(padding)
return tf.nn.max_pool2d(input=input,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def avg_pool(self, input, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):
self.validate_padding(padding)
return tf.nn.avg_pool2d(input=input,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def lrn(self, input, radius, alpha, beta, name, bias=1.0):
return tf.nn.local_response_normalization(input,
depth_radius=radius,
alpha=alpha,
beta=beta,
bias=bias,
name=name)
@layer
def concat(self, inputs, axis, name):
return tf.concat(concat_dim=axis, values=inputs, name=name)
@layer
def add(self, inputs, name):
return tf.add_n(inputs, name=name)
@layer
def fc(self, input, num_out, name, relu=True):
with tf.compat.v1.variable_scope(name) as scope:
input_shape = input.get_shape()
if input_shape.ndims == 4:
# The input is spatial. Vectorize it first.
dim = 1
for d in input_shape[1:].as_list():
dim *= d
feed_in = tf.reshape(input, [-1, dim])
else:
feed_in, dim = (input, input_shape[-1].value)
weights = self.make_var('weights', shape=[dim, num_out])
biases = self.make_var('biases', [num_out])
op = tf.compat.v1.nn.relu_layer if relu else tf.compat.v1.nn.xw_plus_b
fc = op(feed_in, weights, biases, name=scope.name)
return fc
@layer
def softmax(self, input, name):
input_shape = [v.value for v in input.get_shape()]
if len(input_shape) > 2:
# For certain models (like NiN), the singleton spatial dimensions
# need to be explicitly squeezed, since they're not broadcast-able
# in TensorFlow's NHWC ordering (unlike Caffe's NCHW).
if input_shape[1] == 1 and input_shape[2] == 1:
input = tf.squeeze(input, axis=[1, 2])
else:
raise ValueError('Rank 2 tensor input expected for softmax!')
return tf.nn.softmax(input, name)
@layer
def batch_normalization(self, input, name, is_training, activation_fn=None, scale=True):
with tf.compat.v1.variable_scope(name) as scope:
output = slim.batch_norm(
input,
activation_fn=activation_fn,
is_training=is_training,
updates_collections=None,
scale=scale,
scope=scope)
return output
@layer
def dropout(self, input, keep_prob, name):
keep = 1 - self.use_dropout + (self.use_dropout * keep_prob)
return tf.nn.dropout(input, rate=1 - (keep), name=name)
|
py | 7dff81caae6a249568cbdd0a840a6dfd6701a5c1 |
def print_sum(a, b):
print(a+b)
print(__name__) |
py | 7dff820e2912ee6b8e1000f13d9731606a9cd31c | # -*- coding: utf-8 -*-
import sys
import warnings
from pathlib import Path
PROJECT_DIR = Path(__file__).resolve().parent
if str(PROJECT_DIR.parent) not in sys.path:
sys.path.insert(0, str(PROJECT_DIR.parent))
warnings.filterwarnings(
"ignore", category=FutureWarning, module="sklearn.utils.deprecation"
)
from common import *
warnings.filterwarnings(
"always", category=FutureWarning, module="sklearn.utils.deprecation"
)
figure_saver = PaperFigureSaver(
directories=Path("~") / "tmp" / PROJECT_DIR.parent.name / PROJECT_DIR.name,
debug=False,
)
map_figure_saver = figure_saver(**map_figure_saver_kwargs)
for fig_saver in (figure_saver, map_figure_saver):
fig_saver.experiment = PROJECT_DIR.name
memory = get_memory("__".join((PROJECT_DIR.parent.name, PROJECT_DIR.name)), verbose=100)
CACHE_DIR = Path(DATA_DIR) / ".pickle" / PROJECT_DIR.parent.name / PROJECT_DIR.name
data_split_cache = SimpleCache("data_split", cache_dir=CACHE_DIR)
save_ale_2d_and_get_importance = partial(
save_ale_2d_and_get_importance, figure_saver=figure_saver
)
save_pdp_plot_2d = partial(save_pdp_plot_2d, figure_saver=figure_saver)
save_ale_plot_1d_with_ptp = partial(
save_ale_plot_1d_with_ptp, figure_saver=figure_saver
)
save_pdp_plot_1d = partial(
save_pdp_plot_1d, CACHE_DIR=CACHE_DIR, figure_saver=figure_saver
)
multi_ale_plot_1d = partial(multi_ale_plot_1d, figure_saver=figure_saver)
# Number of SHAP jobs.
try:
X_train, X_test, y_train, y_test = data_split_cache.load()
# Maximum job array index (inclusive).
shap_params["max_index"] = math.floor(X_train.shape[0] / shap_params["job_samples"])
# Upper bound only.
shap_params["total_samples"] = (shap_params["max_index"] + 1) * shap_params[
"job_samples"
]
except NoCachedDataError:
warnings.warn(
"Processed data not found, not calculating 'max_index' or 'total_samples'."
)
# Upper bound only.
shap_interact_params["total_samples"] = (
shap_interact_params["max_index"] + 1
) * shap_interact_params["job_samples"]
# SHAP cache.
shap_cache = SimpleCache("shap_cache", cache_dir=CACHE_DIR / Path("shap"))
shap_interact_cache = SimpleCache(
"shap_interact_cache", cache_dir=CACHE_DIR / Path("shap_interaction")
)
interact_data_cache = SimpleCache("SHAP_interact_data", cache_dir=CACHE_DIR)
# Redefine the common functionality for our use-case - no shifted variables.
_common_get_data = get_data
_common_get_offset_data = get_offset_data
selected_features = (
"Dry Day Period",
"Dry Day Period -1 Month",
"Dry Day Period -3 Month",
"Dry Day Period -6 Month",
"Dry Day Period -9 Month",
"Max Temp",
"pftCrop",
"popd",
"pftHerb",
"AGB Tree",
"SIF 50P 4k",
"SIF 50P 4k -1 Month",
"SIF 50P 4k -3 Month",
"SIF 50P 4k -6 Month",
"SIF 50P 4k -9 Month",
)
offset_selected_features = []
for column in selected_features:
match = re.search(r"-\d{1,2}", column)
if match:
span = match.span()
# Change the string to reflect the shift.
original_offset = int(column[slice(*span)])
if original_offset > -12:
# Only shift months that are 12 or more months before the current month.
offset_selected_features.append(column)
continue
comp = -(-original_offset % 12)
new_column = " ".join(
(
column[: span[0] - 1],
f"{original_offset} - {comp}",
column[span[1] + 1 :],
)
)
offset_selected_features.append(new_column)
else:
offset_selected_features.append(column)
@wraps(_common_get_data)
def get_data(*args, **kwargs):
(
endog_data,
exog_data,
master_mask,
filled_datasets,
masked_datasets,
land_mask,
) = _common_get_data(*args, **kwargs)
# We need to subset exog_data, filled_datasets, and masked_datasets.
exog_data = exog_data[list(selected_features)]
filled_datasets = filled_datasets.select_variables(selected_features)
masked_datasets = masked_datasets.select_variables(selected_features)
return (
endog_data,
exog_data,
master_mask,
filled_datasets,
masked_datasets,
land_mask,
)
@wraps(_common_get_offset_data)
def get_offset_data(*args, **kwargs):
(
endog_data,
exog_data,
master_mask,
filled_datasets,
masked_datasets,
land_mask,
) = _common_get_offset_data(*args, **kwargs)
# We need to subset exog_data, filled_datasets, and masked_datasets.
exog_data = exog_data[list(offset_selected_features)]
filled_datasets = filled_datasets.select_variables(selected_features)
masked_datasets = masked_datasets.select_variables(selected_features)
return (
endog_data,
exog_data,
master_mask,
filled_datasets,
masked_datasets,
land_mask,
)
def get_model(X_train=None, y_train=None):
return common_get_model(cache_dir=CACHE_DIR, X_train=X_train, y_train=y_train)
model_score_cache = SimpleCache("model_scores", cache_dir=CACHE_DIR)
@model_score_cache
def get_model_scores(rf=None, X_test=None, X_train=None, y_test=None, y_train=None):
return common_get_model_scores(rf, X_test, X_train, y_test, y_train)
|
py | 7dff825d0548deed3dcc1ea01438a361076caa2d | import re
import pickle
import subprocess
import logging
from pathlib import Path
from urllib import request, parse
import synquiz.util as util
log = logging.getLogger('synquiz')
content_types = {
'.opus': 'audio/ogg',
'.m4a': 'audio/mpeg',
'.mp3': 'audio/mpeg',
'.wav': 'audio/wav',
'.mp4': 'video/mp4',
'.mkv': 'video/mp4',
'.gif': 'video/mp4',
'.webm': 'video/webm',
'.ogg': 'video/ogg',
}
def content_type(path):
return content_types.get(Path(path).suffix, 'unknown/unknown')
def is_local_media(data):
return not data['url'].lower().startswith('http')
def video_metadata(data):
keys = ['start', 'end', 'len']
# No length specified, whole media should be considered
if not any(map(lambda x: x in data, keys)):
return True, (data['url'], 0, 1000)
start = data.get('start', 0)
end = data.get('end', None)
if end is None:
length = data.get('len', 1000)
else:
assert end > start
length = end - start
return False, (data['url'], start, length)
def media_cache_key(data):
if data.get('type') == 'image':
return data['url']
if data.get('type') in ('audio', 'video'):
return video_metadata(data)[1]
return None
class Cache:
def __init__(self, home):
self.home = home
self.db_file = self.home / 'db.pickle'
self._prepare()
def _prepare(self):
if self.db_file.exists():
self.cache = pickle.load(open(self.db_file, 'rb'))
else:
self.cache = {}
def write(self):
with open(self.db_file, 'wb') as f:
pickle.dump(self.cache, f)
def contains(self, key, data):
if key in self.cache and (self.home / self.cache[key]).exists():
log.debug('Media found in cache')
data['file'] = self.cache[key]
data['content_type'] = content_type(self.cache[key])
return True
return False
def add(self, path, key, data):
file = Path(path).relative_to(self.home)
log.debug(f'Filename: {file}')
data['file'] = file
data['content_type'] = content_type(file)
self.cache[key] = file
def keys(self):
return self.cache.keys()
def remove(self, key):
if key in self.cache:
del self.cache[key]
def __getitem__(self, key):
return self.cache[key]
def get(self, key):
return self.cache.get(key)
class MediaManager:
def __init__(self, home):
self.home = home
self.cache = Cache(self.home)
def _handle_local_media(self, data):
url = data['url']
data['file'] = url
data['content_type'] = content_type(url)
def media_file(self, data):
if 'url' not in data:
return None
if is_local_media(data):
return self.home / data['url']
key = media_cache_key(data)
cached = self.cache.get(key)
if cached:
return self.home / cached
return None
def needs_downloading(self, data):
if is_local_media(data):
log.debug('Media determined to be local, no downloading necessary')
self._handle_local_media(data)
return False
key = media_cache_key(data)
if self.cache.contains(key, data):
return False
return True
def handle_image(self, data):
if not self.needs_downloading(data):
return
url = data['url']
parsed = parse.urlparse(url)
parts = parsed.path.split('.')
if not parts:
ext = 'img'
ext = parts[-1].split('/')[0]
dest = self.home / 'data' / f'{util.randstr()}.{ext}'
log.info('Downloading media...')
try:
req = request.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5')
resp = request.urlopen(req)
except request.URLError:
log.warning(f"Could not download image '{url}' for {data.get('title')}")
return
log.info('Image successfully downloaded')
dest.write_bytes(resp.read())
self.cache.add(dest, url, data)
def handle_media(self, data):
if not self.needs_downloading(data):
return
url = data['url']
audio_only = data['type'] == 'audio'
get_all, key = video_metadata(data)
_, start, length = key
name = util.randstr()
file_format = f'{self.home}/data/{name}.%(ext)s'
log.info('Downloading media...')
extra_options = []
if audio_only:
extra_options = [
'-x',
]
postprocessor = []
if not get_all:
postprocessor = [
'--postprocessor-args',
f'-ss {util.to_hms(start)} -t {util.to_hms(length)}',
]
result = subprocess.run(
[
'youtube-dl',
'-o',
file_format,
*postprocessor,
*extra_options,
url,
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding='utf-8',
)
if result.returncode != 0:
log.warning(f"Error downloading '{url}' for {data.get('title')}")
log.warning(result.stdout)
return
log.info('Media download done')
file_re = re.compile(rf'\[(?:download|ffmpeg)\].*({self.home}/data/{name}\.[a-z0-9]+)')
log.debug(result.stdout)
match = file_re.findall(result.stdout)
if not match:
log.warning('No file name found for download {url}')
log.warning(result.stdout)
return
full_path = match[-1]
self.cache.add(full_path, key, data)
def media_items(self, questions):
res = []
for q in questions:
if util.is_media_type(q):
res.append(q)
if util.is_media_type(q.get('answer')):
res.append(q['answer'])
if q.get('type') == 'super':
res.extend(self.media_items(q['questions']))
return res
def clean(self, quiz_data, remove_all=False):
log.info('Cleaning up cached unused files')
media = set(map(media_cache_key, self.media_items(quiz_data['quiz'])))
keys = set(self.cache.keys())
keys = keys - media
if not keys:
log.info('Nothing to do')
else:
log.info(f'{len(keys)} files to delete')
for k in keys:
path = self.home / self.cache[k]
if path.is_file():
log.debug(f'Deleting {path}')
path.unlink()
self.cache.remove(k)
self.cache.write()
if not remove_all:
return
log.info('Cleaning up all files not used in quiz')
files = set(map(lambda x: self.media_file(x), self.media_items(quiz_data['quiz'])))
data_files = set([p for p in (self.home / 'data').glob('*') if p.is_file()])
to_delete = data_files - files
if not to_delete:
log.info('Nothing to do')
return
log.info(f'{len(to_delete)} files to delete')
for p in to_delete:
log.debug(f'Deleting {p}')
p.unlink()
def save_cache(self):
self.cache.write()
|
py | 7dff832c059530f87d6a5d0abd8e9a5f53e4b50a | import logging
from gateway import START_SIGNAL, STOP_SIGNAL, NEW_DATA_SIGNAL
from pydispatch import dispatcher
__author__ = 'edzard'
logger = logging.getLogger(__name__)
path = None
_file = None
_separator = ';'
def _start_handler():
dispatcher.connect(_stop_handler, signal=STOP_SIGNAL, sender='gateway')
global _file
_file = open(path, 'a', encoding='utf-8')
logger.debug("CSV output handler appending to {}".format(_file.name))
dispatcher.connect(_start_handler, signal=START_SIGNAL, sender='gateway')
def _data_handler(sender, timestamp, orientation, linear_acceleration):
str = "{};{};{};{};{};{};{};{}".format(
timestamp,
linear_acceleration['x'], linear_acceleration['y'], linear_acceleration['z'],
orientation['w'], orientation['x'], orientation['y'], orientation['z'])
str = str.replace('.',',')
logger.debug("Writing data: \"{}\"".format(str))
_file.write(str + '\n')
dispatcher.connect(_data_handler, signal=NEW_DATA_SIGNAL)
def _stop_handler():
# TODO: Graceful disconnect: finish writes before closing file
_file.close()
logger.debug("CSV output handler finished appending to {}".format(_file.name))
|
py | 7dff8390696276f2771b77c60cfa043d461b588e | # -* encoding: utf-8 *-
import enum
__all__ = [
"NOT_FOUND",
"NOT_SET",
"Priority",
"DEFAULT_PRIORITY"
]
RP_LOG_LEVELS = {
60000: "UNKNOWN",
50000: "FATAL",
40000: "ERROR",
30000: "WARN",
20000: "INFO",
10000: "DEBUG",
5000: "TRACE"
}
class Priority(enum.IntEnum):
"""
Generic enum for various operations prioritization.
"""
PRIORITY_IMMEDIATE = 0x0
PRIORITY_HIGH = 0x1
PRIORITY_MEDIUM = 0x2
PRIORITY_LOW = 0x3
class ItemStartType(str, enum.Enum):
BEFORE_CLASS = "before_class"
BEFORE_GROUPS = "before_groups"
BEFORE_METHOD = "before_method"
BEFORE_SUITE = "before_suite"
BEFORE_TEST = "before_test"
SUITE = "class"
STORY = "groups"
TEST = "method"
SCENARIO = "suite"
STEP = "test"
AFTER_CLASS = "after_class"
AFTER_GROUPS = "after_groups"
AFTER_METHOD = "after_method"
AFTER_SUITE = "after_suite"
AFTER_TEST = "after_test"
DEFAULT_PRIORITY = Priority.PRIORITY_MEDIUM
class _PresenceSentinel(object):
def __nonzero__(self):
"""
Added to handle a conditional clause on attributes that are this __class__ objects:
>>> if not response.error:
where response.error can be NOT_FOUND or NOT_SET
The constant must represent False state in bool context.
:return: bool
"""
return False
__bool__ = __nonzero__ # Python3 support
NOT_FOUND = _PresenceSentinel()
NOT_SET = _PresenceSentinel()
NoneType = type(None)
|
py | 7dff83f6a471dc2be07b451c1b183e659f915eda | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.23.6
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes_asyncio.client
from kubernetes_asyncio.client.api.flowcontrol_apiserver_v1beta2_api import FlowcontrolApiserverV1beta2Api # noqa: E501
from kubernetes_asyncio.client.rest import ApiException
class TestFlowcontrolApiserverV1beta2Api(unittest.TestCase):
"""FlowcontrolApiserverV1beta2Api unit test stubs"""
def setUp(self):
self.api = kubernetes_asyncio.client.api.flowcontrol_apiserver_v1beta2_api.FlowcontrolApiserverV1beta2Api() # noqa: E501
def tearDown(self):
pass
def test_create_flow_schema(self):
"""Test case for create_flow_schema
"""
pass
def test_create_priority_level_configuration(self):
"""Test case for create_priority_level_configuration
"""
pass
def test_delete_collection_flow_schema(self):
"""Test case for delete_collection_flow_schema
"""
pass
def test_delete_collection_priority_level_configuration(self):
"""Test case for delete_collection_priority_level_configuration
"""
pass
def test_delete_flow_schema(self):
"""Test case for delete_flow_schema
"""
pass
def test_delete_priority_level_configuration(self):
"""Test case for delete_priority_level_configuration
"""
pass
def test_get_api_resources(self):
"""Test case for get_api_resources
"""
pass
def test_list_flow_schema(self):
"""Test case for list_flow_schema
"""
pass
def test_list_priority_level_configuration(self):
"""Test case for list_priority_level_configuration
"""
pass
def test_patch_flow_schema(self):
"""Test case for patch_flow_schema
"""
pass
def test_patch_flow_schema_status(self):
"""Test case for patch_flow_schema_status
"""
pass
def test_patch_priority_level_configuration(self):
"""Test case for patch_priority_level_configuration
"""
pass
def test_patch_priority_level_configuration_status(self):
"""Test case for patch_priority_level_configuration_status
"""
pass
def test_read_flow_schema(self):
"""Test case for read_flow_schema
"""
pass
def test_read_flow_schema_status(self):
"""Test case for read_flow_schema_status
"""
pass
def test_read_priority_level_configuration(self):
"""Test case for read_priority_level_configuration
"""
pass
def test_read_priority_level_configuration_status(self):
"""Test case for read_priority_level_configuration_status
"""
pass
def test_replace_flow_schema(self):
"""Test case for replace_flow_schema
"""
pass
def test_replace_flow_schema_status(self):
"""Test case for replace_flow_schema_status
"""
pass
def test_replace_priority_level_configuration(self):
"""Test case for replace_priority_level_configuration
"""
pass
def test_replace_priority_level_configuration_status(self):
"""Test case for replace_priority_level_configuration_status
"""
pass
if __name__ == '__main__':
unittest.main()
|
py | 7dff841b69d08eb7b4ab6e4609355386669a3f94 | class Solution(object):
def generate(self, numRows):
"""
:type numRows: int
:rtype: List[List[int]]
"""
if numRows == 0:
return []
result = [[1]]
for n in range(1, numRows):
row = [1]
last_row = result[n-1]
for i in range(1, n):
row.append(last_row[i-1]+last_row[i])
row.append(1)
result.append(row)
return result
|
py | 7dff849c17cafc4060b5721d235d9fa0e27fbe84 | import psycopg2, psycopg2.extras
import os
import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import patches
from matplotlib.pyplot import figure
from IPython.display import SVG
from sklearn.metrics import classification_report
#error occurs when directly import keras without tensorflow.python
from tensorflow.python.keras import layers, Input, regularizers
from tensorflow.python.keras.models import Model, load_model
from tensorflow.python.keras.utils import to_categorical, model_to_dot, plot_model
from tensorflow.python.keras.optimizers import Adam
from tensorflow.python.keras.callbacks import ModelCheckpoint, Callback, EarlyStopping
# Import data:
# 1. read the text file line by line;
# 2. format the data in DataFrame.
def read_data(path):
data_list = []
with open(path, 'rb') as f:
while True:
line = f.readline()
if not line:
break
d_str = line.split()
try:
d_tem = [float(d) for d in d_str]
except ValueError:
pass
data_list.append(d_tem)
data = pd.DataFrame(data_list)
return data.T
# ready data for training:
# 1. sample_size=100: the most 100 recent updates
# 2. feature_num=40: 40 features per time stamp
# 3. target_num=5: relative changes for the next 1,2,3,5 and 10 events(5 in total),
# using equation 3 in the paper to calculate average future midprice and label the price movements as 0,1,2
def get_model_data(data, sample_size=100, feature_num=40, target_num=5):
data = data.values
shape = data.shape
X = np.zeros((shape[0]-sample_size, sample_size, feature_num))
Y = np.zeros((shape[0]-sample_size, target_num))
#e.g. take feature from 0~99 row to predict target label on 99th row, take feature from 31837~31936 row to predict target label on 31936th row
for i in range(shape[0]-sample_size):#range = 0~31837
X[i] = data[i:i+sample_size,0:feature_num]# [every 100 events from 31937 rows, take the first 40 columns as features]
Y[i] = data[i+sample_size-1,-target_num:]# [from 99~31936 rows, take the last 5 columns as labels]
X = X.reshape(X.shape[0], sample_size, feature_num, 1)# add the 4th dimension: 1 channel
# "Benchmark dataset for mid-price forecasting of limit order book data with machine learning"
# Y=Y-1 relabels as 0,1,2
# labels 0: equal to or greater than 0.002
# labels 1: -0.00199 to 0.00199
# labels 2: smaller or equal to -0.002
Y = Y-1
return X,Y
# transform array to rectangle shape
# def trans2rect(arr):
# tarr = []
# trend = arr[0]
# width = 1
# day = 0
# for elm in arr[1:]:
# if elm == trend:
# width += 1
# else:
# tarr.append((trend, day, width))
# trend = elm
# day += width
# width = 1
# tarr.append((trend, day, width))
# return tarr
# # callback for evaluating on each epoch
# class EvaluateCallback(Callback):
# def __init__(self, test_x, test_y, list_loss, list_acc):
# self.test_x = test_x
# self.test_y = test_y
# self.list_loss = list_loss
# self.list_acc = list_acc
# def on_epoch_end(self, epoch, logs={}):
# loss, acc = self.model.evaluate(self.test_x, self.test_y, verbose=0)
# self.list_loss.append(loss)
# self.list_acc.append(acc)
# # the size of a single input is (100,40)
input_tensor = Input(shape=(100,40,1))
# convolutional filter is (1,2) with stride of (1,2)
layer_x = layers.Conv2D(16, (1,2), strides=(1,2))(input_tensor)
layer_x = layers.LeakyReLU(alpha=0.01)(layer_x)
layer_x = layers.Conv2D(16, (4,1), padding='same')(layer_x)
layer_x = layers.LeakyReLU(alpha=0.01)(layer_x)
layer_x = layers.Conv2D(16, (4,1), padding='same')(layer_x)
layer_x = layers.LeakyReLU(alpha=0.01)(layer_x)
layer_x = layers.Conv2D(16, (1,2), strides=(1,2))(layer_x)
layer_x = layers.LeakyReLU(alpha=0.01)(layer_x)
layer_x = layers.Conv2D(16, (4,1), padding='same')(layer_x)
layer_x = layers.LeakyReLU(alpha=0.01)(layer_x)
layer_x = layers.Conv2D(16, (4,1), padding='same')(layer_x)
layer_x = layers.LeakyReLU(alpha=0.01)(layer_x)
layer_x = layers.Conv2D(16, (1,10))(layer_x)
layer_x = layers.LeakyReLU(alpha=0.01)(layer_x)
layer_x = layers.Conv2D(16, (4,1), padding='same')(layer_x)
layer_x = layers.LeakyReLU(alpha=0.01)(layer_x)
layer_x = layers.Conv2D(16, (4,1), padding='same')(layer_x)
layer_x = layers.LeakyReLU(alpha=0.01)(layer_x)
# Inception Module
tower_1 = layers.Conv2D(32, (1,1), padding='same')(layer_x)
tower_1 = layers.LeakyReLU(alpha=0.01)(tower_1)
tower_1 = layers.Conv2D(32, (3,1), padding='same')(tower_1)
tower_1 = layers.LeakyReLU(alpha=0.01)(tower_1)
tower_2 = layers.Conv2D(32, (1,1), padding='same')(layer_x)
tower_2 = layers.LeakyReLU(alpha=0.01)(tower_2)
tower_2 = layers.Conv2D(32, (5,1), padding='same')(tower_2)
tower_2 = layers.LeakyReLU(alpha=0.01)(tower_2)
tower_3 = layers.MaxPooling2D((3,1), padding='same', strides=(1,1))(layer_x)
tower_3 = layers.Conv2D(32, (1,1), padding='same')(tower_3)
tower_3 = layers.LeakyReLU(alpha=0.01)(tower_3)
layer_x = layers.concatenate([tower_1, tower_2, tower_3], axis=-1)
# concatenate features of tower_1, tower_2, tower_3
layer_x = layers.Reshape((100,96))(layer_x)
# 64 LSTM units
#CPU version
layer_x = layers.LSTM(64)(layer_x)
#GPU version, cannot run on CPU
#layer_x = layers.CuDNNLSTM(64)(layer_x)
# The last output layer uses a softmax activation function
output = layers.Dense(3, activation='softmax')(layer_x)
model = Model(input_tensor, output)
opt = Adam(lr=0.01, epsilon=1)# learning rate and epsilon are the same as paper DeepLOB
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
print(model.summary())
data_path = os.path.join(os.getcwd(), 'Train_Dst_NoAuction_ZScore_CF_9.txt')
data = read_data(data_path)
train_X, train_Y = get_model_data(data)
train_Y = train_Y.astype(int)
# #separate 5 target variables(next 1,2,3,5 and 10 events)
# train_y = to_categorical(train_Y[:,0])# y1 is the next event's mid price (k=1)
# train_y = to_categorical(train_Y[:,1])# k=2
# train_y = to_categorical(train_Y[:,2])# k=3
# train_y = to_categorical(train_Y[:,3])# k=5
train_y = to_categorical(train_Y[:,4])# k=10
# #test_data.to_csv('FI2010_test.csv')
# test_path = os.path.join(os.getcwd(), 'BenchmarkDatasets/NoAuction/1.NoAuction_Zscore/NoAuction_Zscore_Testing/Test_Dst_NoAuction_ZScore_CF_9.txt')
# #test_data.shape = 31937x149 (31937 events, 149 features)
# test_data = read_data(test_path)
# #test_X.shape = 31837x100x40x1 (31837 time frames, each with 100 events, each event with 40 features, 1 channel)
# test_X, test_Y = get_model_data(test_data)
# #test_Y.shape = 31837x5(5 target variables)
# test_Y = test_Y.astype(int)
# data = test_data.values
# midprice = data[:, 41]
# midprice = midprice[:200]
#test_y.shape = 31837x3(one hot encoding: 1,0,0; 0,1,0; 0,0,1)
# test_y = to_categorical(test_Y[:,0])
# test_y = to_categorical(test_Y[:,1])
# test_y = to_categorical(test_Y[:,2])
# test_y = to_categorical(test_Y[:,3])
# test_y = to_categorical(test_Y[:,4])
save_dir = os.path.join(os.getcwd(), 'saved_models')
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
filepath="model_{epoch:02d}-{acc:.2f}.hdf5"
#save model for each epoch
checkpoint = ModelCheckpoint(os.path.join(save_dir, filepath), monitor='acc',verbose=1)
# test_loss = []
# test_acc = []
# #evaluate on each epoch
# evaluate_callback = EvaluateCallback(test_X, test_y1, test_loss, test_acc)
#no difference between h5 & hdf5
# model = load_model('model1-with-70epochs.h5')
history1 = model.fit(train_X, train_y, epochs=1, batch_size=32, callbacks=[checkpoint], validation_split=0.2)
#print(history1.history.keys())#['loss', 'acc']
# results = model.evaluate(test_X, test_y1)
# print(model.metrics_names)
# # print(results)
# y_pred1 = model.predict(test_X, verbose=1)
# #y_pred2 = model2.predict(test_X, batch_size=32, verbose=1)
# #y_pred3 = model3.predict(test_X, batch_size=32, verbose=1)
# #y_pred5 = model5.predict(test_X, batch_size=32, verbose=1)
# #y_pred10 = model10.predict(test_X, batch_size=32, verbose=1)
# y_pred1 = np.argmax(y_pred1, axis=1)
# y_pred1.tolist()
#test_y1 = [np.where(r==1)[0][0] for r in test_y1]
# target_names = ['rise', 'stable', 'fall']
# print(classification_report(test_y1, y_pred1, target_names=target_names))
#plot train and validation accuracy
# plt.plot(history1.history['acc'])
# plt.plot(history1.history['val_acc'])
# # plt.plot(test_acc)
# plt.title('Accuracy')
# plt.ylabel('Accuracy')
# plt.xlabel('Epoch')
# plt.legend(['Train', 'Valid'])
# plt.savefig('accuracy_k=10.png')
# plt.clf()
# #plot train and validation loss
# plt.plot(history1.history['loss'])
# plt.plot(history1.history['val_loss'])
# # plt.plot(test_loss)
# plt.title('Loss')
# plt.ylabel('Loss')
# plt.xlabel('Epoch')
# plt.legend(['Train', 'Valid'])
# plt.savefig('loss_k=10.png')
# #plot rectangle graph
# figure(num=None, figsize=(16, 12), dpi=80, facecolor='w', edgecolor='k')
# ax = plt.subplot(111)
# #plt.xlim(0, 100)
# # y_pred1 = y_pred1[:200]
# test_y1 = test_y1[:200]
# tans = trans2rect(test_y1)
# # tpred = trans2rect(y_pred1)
# #ans at top, pred at bottom
# #label 0:rise, color=red
# #label 1:stable, color=white
# #label 2:fall, color=green
# for a in tans:
# if a[0] == 0:
# col = (1,.6,.6)
# elif a[0] == 1:
# col = 'w'
# elif a[0] == 2:
# col = (.6,1,.6)
# ax.add_patch(patches.Rectangle((a[1],0), a[2],3, color=col))
# # for a in tpred:
# # if a[0] == 0:
# # col = (1,.6,.6)
# # elif a[0] == 1:
# # col = 'w'
# # elif a[0] == 2:
# # col = (.6,1,.6)
# # ax.add_patch(patches.Rectangle((a[1],0), a[2],1.5, color=col))
# plt.plot(midprice)
# #save before show, otherwise can't save after closing show window
# plt.savefig('label_1.png')
#plt.show()
|
py | 7dff85dc55b5beabde2c338be71d269e0589bcd3 | # Generated by Django 4.0.1 on 2022-01-21 10:45
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('dashboard', '0004_alter_profile_user'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='user',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
py | 7dff88682b426fe64223a212f401bef425931f3b | from __future__ import annotations
from ...layers.utils.layer_utils import register_layer_action
from ...utils.translations import trans
from ._image_constants import Mode
from .image import Image
@Image.bind_key('Space')
def hold_to_pan_zoom(layer):
"""Hold to pan and zoom in the viewer."""
if layer._mode != Mode.PAN_ZOOM:
# on key press
prev_mode = layer.mode
layer.mode = Mode.PAN_ZOOM
yield
# on key release
layer.mode = prev_mode
def register_image_action(description):
return register_layer_action(Image, description)
@register_image_action(trans._('Transform'))
def activate_image_select_mode(layer):
layer.mode = Mode.TRANSFORM
@register_image_action(trans._('Pan/zoom'))
def activate_image_pan_zoom_mode(layer):
layer.mode = Mode.PAN_ZOOM
|
py | 7dff886a4baf1bf9a9c6b25b5eb5b3e1c0773d30 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
class report_workcenter_load(osv.osv):
_name="report.workcenter.load"
_description="Work Center Load"
_auto = False
_log_access = False
_columns = {
'name': fields.char('Week', size=64, required=True),
'workcenter_id': fields.many2one('mrp.workcenter', 'Work Center', required=True),
'cycle': fields.float('Number of Cycles'),
'hour': fields.float('Number of Hours'),
}
def init(self, cr):
cr.execute("""
create or replace view report_workcenter_load as (
SELECT
min(wl.id) as id,
to_char(p.date_planned,'YYYY:mm:dd') as name,
SUM(wl.hour) AS hour,
SUM(wl.cycle) AS cycle,
wl.workcenter_id as workcenter_id
FROM
mrp_production_workcenter_line wl
LEFT JOIN mrp_production p
ON p.id = wl.production_id
GROUP BY
wl.workcenter_id,
to_char(p.date_planned,'YYYY:mm:dd')
)""")
report_workcenter_load()
class report_mrp_inout(osv.osv):
_name="report.mrp.inout"
_description="Stock value variation"
_auto = False
_log_access = False
_rec_name = 'date'
_columns = {
'date': fields.char('Week', size=64, required=True),
'value': fields.float('Stock value', required=True, digits=(16,2)),
}
def init(self, cr):
cr.execute("""
create or replace view report_mrp_inout as (
select
min(sm.id) as id,
to_char(sm.date,'YYYY:IW') as date,
sum(case when (sl.usage='internal') then
pt.standard_price * sm.product_qty
else
0.0
end - case when (sl2.usage='internal') then
pt.standard_price * sm.product_qty
else
0.0
end) as value
from
stock_move sm
left join product_product pp
on (pp.id = sm.product_id)
left join product_template pt
on (pt.id = pp.product_tmpl_id)
left join stock_location sl
on ( sl.id = sm.location_id)
left join stock_location sl2
on ( sl2.id = sm.location_dest_id)
where
sm.state in ('waiting','confirmed','assigned')
group by
to_char(sm.date,'YYYY:IW')
)""")
report_mrp_inout()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
py | 7dff88b00a8c213dedd4e4463a9f4e95e7092c32 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Library of Cloud TPU helper functions for data loading."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import functional_ops
def _TextLineDataset(filename):
buffer_size = 8 * 1024 * 1024 # 8 MiB per file
dataset = readers.TextLineDataset(filename, buffer_size=buffer_size)
return dataset
def _TFRecordDataset(filename):
buffer_size = 8 * 1024 * 1024 # 8 MiB per file
dataset = readers.TFRecordDataset(filename, buffer_size=buffer_size)
return dataset
_FILETYPE_MAP = {
'tfrecord': _TFRecordDataset,
'textline': _TextLineDataset,
'text': _TextLineDataset,
}
def StreamingFilesDataset(files,
filetype=None,
file_reader_job=None,
worker_job=None,
num_epochs=None,
filename_shuffle_buffer_size=None,
num_parallel_reads=None,
batch_transfer_size=None,
sloppy=None):
"""StreamingFilesDataset constructs a dataset to stream from workers (GCE VM).
Because Cloud TPUs are allocated over the network, a Cloud TPU cannot read
files local to your GCE VM. In order to train using files stored on your local
VM (e.g. on local SSD for extreme performance), use the StreamingFilesDataset
helper to generate a dataset to feed your Cloud TPU with files from your GCE
VM.
The resulting dataset may return an OutOfRangeError if there are no files
found as a result of the fileglob expansion.
Note: StreamingFilesDataset assumes that the session is using a
TPUClusterResolver and has therefore a worker and a coordinator job. File
loading will be done on the coordinator job.
Args:
files: A string glob to match files, or a `tf.data.Dataset` generating file
names.
filetype: A string (one of 'tfrecord', or 'textline') or a single-argument
TensorFlow function that when given a filename returns a dataset.
file_reader_job: An optional string that corresponds to the job that should
perform the file reads.
worker_job: An optional string that corresponds to the job that should
process the tensors (i.e. your GPU or TPU worker).
num_epochs: The number of epochs through the training set that should be
generated. By default, it will repeat infinitely.
filename_shuffle_buffer_size: An optional integer whose value controls the
shuffling of the file names. If you would like to read from the files in
the same order, set to 0 or False.
num_parallel_reads: An optional integer controlling the number of files to
read from concurrently. (Set to 1 for no parallelism.)
batch_transfer_size: An optional integer controlling the batching used to
amortize the remote function invocation overhead. Set to a very large
number to increase throughput. Set to a very small number to reduce memory
consumption. Set to False to skip batching.
sloppy: (Optional.) If `False`, read input data while maintaining a
deterministic order. (This may have significant performance impacts.)
sloppy defaults to: True.
Returns:
A `tf.data.Dataset` with an infinite stream of elements generated by a
parallel interleaving of the set of files matched (or generated) by `files`
with a type is the output of the dataset specified by `filetype`.
Raises:
ValueError: if any argument is not of the expected type.
"""
if filetype is None:
filetype = 'tfrecord'
if isinstance(filetype, str):
if filetype not in _FILETYPE_MAP:
raise ValueError('Unexpected filetype: %s' % filetype)
reader_fn = _FILETYPE_MAP[filetype]
elif callable(filetype):
reader_fn = filetype
else:
raise ValueError('filetype should be a string or a callable')
file_reader_job = file_reader_job or 'coordinator'
worker_job = worker_job or 'worker'
if filename_shuffle_buffer_size is None:
filename_shuffle_buffer_size = 4096
num_parallel_reads = num_parallel_reads or 8
if batch_transfer_size is None:
batch_transfer_size = 256
if sloppy is None:
sloppy = True
with ops.device('/job:%s' % file_reader_job):
if isinstance(files, str):
source_dataset = dataset_ops.Dataset.list_files(files)
elif isinstance(files, dataset_ops.Dataset):
source_dataset = files
else:
raise ValueError('files was not a string or a dataset: %s' % files)
if filename_shuffle_buffer_size:
source_dataset = source_dataset.shuffle(
buffer_size=filename_shuffle_buffer_size)
# NOTE: We perform the `repeat` on the source dataset, because the output
# dataset does not currently have enough information to recreate an iterator
# over the source dataset when it reaches the end.
source_dataset = source_dataset.repeat(num_epochs)
source_dataset = source_dataset.apply(
interleave_ops.parallel_interleave(
reader_fn, cycle_length=num_parallel_reads, sloppy=sloppy))
if batch_transfer_size:
source_dataset = source_dataset.batch(batch_transfer_size)
source_dataset = source_dataset.prefetch(1)
source_iterator = source_dataset.make_one_shot_iterator()
source_handle = source_iterator.string_handle()
@function.Defun(dtypes.string)
def LoadingFunc(h):
remote_iterator = iterator_ops.Iterator.from_string_handle(
h, source_dataset.output_types, source_dataset.output_shapes)
return remote_iterator.get_next()
def MapFn(unused_input):
if isinstance(source_dataset.output_types, dtypes.DType):
output_types = [source_dataset.output_types]
elif isinstance(source_dataset.output_types, (list, tuple)):
output_types = source_dataset.output_types
else:
raise ValueError('source dataset has invalid output types')
remote_calls = functional_ops.remote_call(
args=[source_handle],
Tout=output_types,
f=LoadingFunc,
target='/job:%s/replica:0/task:0/cpu:0' % file_reader_job)
if len(remote_calls) == 1:
return remote_calls[0]
else:
return remote_calls
with ops.device('/job:%s' % worker_job):
output_dataset = dataset_ops.Dataset.range(2).repeat().map(
MapFn, num_parallel_calls=4 if sloppy else None)
output_dataset = output_dataset.prefetch(1)
if batch_transfer_size:
# Undo the batching used during the transfer.
output_dataset = output_dataset.apply(batching.unbatch()).prefetch(1)
return output_dataset
|
py | 7dff890577ab924f5e36534a7a4f5e6b82c83ad7 | def gamma(value,gamma=2.5,offset=0.5):
assert 0 <= value <= 255
return int( pow( float(value) / 255.0, gamma ) * 255.0 + offset )
|
py | 7dff89b08d8b991ff15afdfcecc5dec1dff62cc3 | import bezier
import networkx as nx
import numpy as np
def curved_edges(G, pos, dist_ratio=0.2, bezier_precision=20, polarity='random'):
# Get nodes into np array
edges = np.array(G.edges())
l = edges.shape[0]
if polarity == 'random':
# Random polarity of curve
rnd = np.where(np.random.randint(2, size=l)==0, -1, 1)
else:
# Create a fixed (hashed) polarity column in the case we use fixed polarity
# This is useful, e.g., for animations
rnd = np.where(np.mod(np.vectorize(hash)(edges[:,0])+np.vectorize(hash)(edges[:,1]),2)==0,-1,1)
# Coordinates (x,y) of both nodes for each edge
# e.g., https://stackoverflow.com/questions/16992713/translate-every-element-in-numpy-array-according-to-key
# Note the np.vectorize method doesn't work for all node position dictionaries for some reason
u, inv = np.unique(edges, return_inverse = True)
coords = np.array([pos[x] for x in u])[inv].reshape([edges.shape[0], 2, edges.shape[1]])
coords_node1 = coords[:,0,:]
coords_node2 = coords[:,1,:]
# Swap node1/node2 allocations to make sure the directionality works correctly
should_swap = coords_node1[:,0] > coords_node2[:,0]
coords_node1[should_swap], coords_node2[should_swap] = coords_node2[should_swap], coords_node1[should_swap]
# Distance for control points
dist = dist_ratio * np.sqrt(np.sum((coords_node1-coords_node2)**2, axis=1))
# Gradients of line connecting node & perpendicular
m1 = (coords_node2[:,1]-coords_node1[:,1])/(coords_node2[:,0]-coords_node1[:,0])
m2 = -1/m1
# Temporary points along the line which connects two nodes
# e.g., https://math.stackexchange.com/questions/656500/given-a-point-slope-and-a-distance-along-that-slope-easily-find-a-second-p
t1 = dist/np.sqrt(1+m1**2)
v1 = np.array([np.ones(l),m1])
coords_node1_displace = coords_node1 + (v1*t1).T
coords_node2_displace = coords_node2 - (v1*t1).T
# Control points, same distance but along perpendicular line
# rnd gives the 'polarity' to determine which side of the line the curve should arc
t2 = dist/np.sqrt(1+m2**2)
v2 = np.array([np.ones(len(edges)),m2])
coords_node1_ctrl = coords_node1_displace + (rnd*v2*t2).T
coords_node2_ctrl = coords_node2_displace + (rnd*v2*t2).T
# Combine all these four (x,y) columns into a 'node matrix'
node_matrix = np.array([coords_node1, coords_node1_ctrl, coords_node2_ctrl, coords_node2])
# Create the Bezier curves and store them in a list
curveplots = []
for i in range(l):
nodes = node_matrix[:,i,:].T
curveplots.append(bezier.Curve(nodes, degree=2).evaluate_multi(np.linspace(0,1,bezier_precision)).T)
# Return an array of these curves
curves = np.array(curveplots)
return curves |
py | 7dff8b32adc4563db5fcecf93bcec490985ff86b | import logging
import os
import psycopg2
import shlex
import shutil
import subprocess
import tempfile
import time
from patroni.exceptions import PostgresConnectionException, PostgresException
from patroni.utils import compare_values, parse_bool, parse_int, Retry, RetryFailedError
from six import string_types
from threading import Lock
logger = logging.getLogger(__name__)
ACTION_ON_START = "on_start"
ACTION_ON_STOP = "on_stop"
ACTION_ON_RESTART = "on_restart"
ACTION_ON_RELOAD = "on_reload"
ACTION_ON_ROLE_CHANGE = "on_role_change"
class Postgresql(object):
# List of parameters which must be always passed to postmaster as command line options
# to make it not possible to change them with 'ALTER SYSTEM'.
# Some of these parameters have sane default value assigned and Patroni doesn't allow
# to decrease this value. E.g. 'wal_level' can't be lower then 'hot_standby' and so on.
# These parameters could be changed only globally, i.e. via DCS.
# P.S. 'listen_addresses' and 'port' are added here just for convenience, to mark them
# as a parameters which should always be passed through command line.
#
# Format:
# key - parameter name
# value - tuple(default_value, check_function, min_version)
# default_value -- some sane default value
# check_function -- if the new value is not correct must return `!False`
# min_version -- major version of PostgreSQL when parameter was introduced
CMDLINE_OPTIONS = {
'listen_addresses': (None, lambda _: False, 9.1),
'port': (None, lambda _: False, 9.1),
'cluster_name': (None, lambda _: False, 9.5),
'wal_level': ('hot_standby', lambda v: v.lower() in ('hot_standby', 'logical'), 9.1),
'hot_standby': ('on', lambda _: False, 9.1),
'max_connections': (100, lambda v: int(v) >= 100, 9.1),
'max_wal_senders': (5, lambda v: int(v) >= 5, 9.1),
'wal_keep_segments': (8, lambda v: int(v) >= 8, 9.1),
'max_prepared_transactions': (0, lambda v: int(v) >= 0, 9.1),
'max_locks_per_transaction': (64, lambda v: int(v) >= 64, 9.1),
'track_commit_timestamp': ('off', lambda v: parse_bool(v) is not None, 9.5),
'max_replication_slots': (5, lambda v: int(v) >= 5, 9.4),
'max_worker_processes': (8, lambda v: int(v) >= 8, 9.4),
'wal_log_hints': ('on', lambda _: False, 9.4)
}
def __init__(self, config):
self.config = config
self.name = config['name']
self.scope = config['scope']
self._database = config.get('database', 'postgres')
self._data_dir = config['data_dir']
self._pending_restart = False
self._server_parameters = self.get_server_parameters(config)
self._connect_address = config.get('connect_address')
self._superuser = config['authentication'].get('superuser', {})
self._replication = config['authentication']['replication']
self.resolve_connection_addresses()
self._need_rewind = False
self._use_slots = config.get('use_slots', True)
self._version_file = os.path.join(self._data_dir, 'PG_VERSION')
self._major_version = self.get_major_version()
self._schedule_load_slots = self.use_slots
self._pgpass = config.get('pgpass') or os.path.join(os.path.expanduser('~'), 'pgpass')
self.callback = config.get('callbacks') or {}
config_base_name = config.get('config_base_name', 'postgresql')
self._postgresql_conf = os.path.join(self._data_dir, config_base_name + '.conf')
self._postgresql_base_conf_name = config_base_name + '.base.conf'
self._postgresql_base_conf = os.path.join(self._data_dir, self._postgresql_base_conf_name)
self._recovery_conf = os.path.join(self._data_dir, 'recovery.conf')
self._configuration_to_save = (self._postgresql_conf, self._postgresql_base_conf,
os.path.join(self._data_dir, 'pg_hba.conf'))
self._postmaster_pid = os.path.join(self._data_dir, 'postmaster.pid')
self._trigger_file = config.get('recovery_conf', {}).get('trigger_file') or 'promote'
self._trigger_file = os.path.abspath(os.path.join(self._data_dir, self._trigger_file))
self._connection = None
self._cursor_holder = None
self._sysid = None
self._replication_slots = [] # list of already existing replication slots
self.retry = Retry(max_tries=-1, deadline=config['retry_timeout']/2.0, max_delay=1,
retry_exceptions=PostgresConnectionException)
self._state_lock = Lock()
self.set_state('stopped')
self._role_lock = Lock()
self.set_role(self.get_postgres_role_from_data_directory())
if self.is_running():
self.set_state('running')
self.set_role('master' if self.is_leader() else 'replica')
self._write_postgresql_conf() # we are "joining" already running postgres
@property
def use_slots(self):
return self._use_slots and self._major_version >= 9.4
def _version_file_exists(self):
return not self.data_directory_empty() and os.path.isfile(self._version_file)
def get_major_version(self):
if self._version_file_exists():
try:
with open(self._version_file) as f:
return float(f.read())
except Exception:
logger.exception('Failed to read PG_VERSION from %s', self._data_dir)
return 0.0
def get_server_parameters(self, config):
parameters = config['parameters'].copy()
listen_addresses, port = (config['listen'] + ':5432').split(':')[:2]
parameters.update({'cluster_name': self.scope, 'listen_addresses': listen_addresses, 'port': port})
return parameters
def resolve_connection_addresses(self):
self._local_address = self.get_local_address()
self.connection_string = 'postgres://{0}/{1}'.format(
self._connect_address or self._local_address['host'] + ':' + self._local_address['port'], self._database)
def pg_ctl(self, cmd, *args, **kwargs):
"""Builds and executes pg_ctl command
:returns: `!True` when return_code == 0, otherwise `!False`"""
pg_ctl = ['pg_ctl', cmd]
if cmd in ('start', 'stop', 'restart'):
pg_ctl += ['-w']
timeout = self.config.get('pg_ctl_timeout')
if timeout:
try:
pg_ctl += ['-t', str(int(timeout))]
except Exception:
logger.error('Bad value of pg_ctl_timeout: %s', timeout)
return subprocess.call(pg_ctl + ['-D', self._data_dir] + list(args), **kwargs) == 0
def reload_config(self, config):
server_parameters = self.get_server_parameters(config)
listen_address_changed = pending_reload = pending_restart = False
if self.is_healthy():
changes = {p: v for p, v in server_parameters.items() if '.' not in p}
changes.update({p: None for p, v in self._server_parameters.items() if not ('.' in p or p in changes)})
if changes:
if 'wal_segment_size' not in changes:
changes['wal_segment_size'] = '16384kB'
# XXX: query can raise an exception
for r in self.query("""SELECT name, setting, unit, vartype, context
FROM pg_settings
WHERE name IN (""" + ', '.join(['%s'] * len(changes)) + """)
ORDER BY 1 DESC""", *(list(changes.keys()))):
if r[4] == 'internal':
if r[0] == 'wal_segment_size':
server_parameters.pop(r[0], None)
wal_segment_size = parse_int(r[2], 'kB')
if wal_segment_size is not None:
changes['wal_segment_size'] = '{0}kB'.format(int(r[1]) * wal_segment_size)
elif r[0] in changes:
unit = changes['wal_segment_size'] if r[0] in ('min_wal_size', 'max_wal_size') else r[2]
new_value = changes.pop(r[0])
if new_value is None or not compare_values(r[3], unit, r[1], new_value):
if r[4] == 'postmaster':
pending_restart = True
if r[0] in ('listen_addresses', 'port'):
listen_address_changed = True
else:
pending_reload = True
for param in changes:
if param in server_parameters:
logger.warning('Removing invalid parameter `%s` from postgresql.parameters', param)
server_parameters.pop(param)
# Check that user-defined-paramters have changed (parameters with period in name)
if not pending_reload:
for p, v in server_parameters.items():
if '.' in p and (p not in self._server_parameters or str(v) != str(self._server_parameters[p])):
pending_reload = True
break
if not pending_reload:
for p, v in self._server_parameters.items():
if '.' in p and (p not in server_parameters or str(v) != str(server_parameters[p])):
pending_reload = True
break
self.config = config
self._pending_restart = pending_restart
self._server_parameters = server_parameters
self._connect_address = config.get('connect_address')
if not listen_address_changed:
self.resolve_connection_addresses()
if pending_reload:
self._write_postgresql_conf()
self.reload()
self.retry.deadline = config['retry_timeout']/2.0
@property
def pending_restart(self):
return self._pending_restart
@property
def can_rewind(self):
""" check if pg_rewind executable is there and that pg_controldata indicates
we have either wal_log_hints or checksums turned on
"""
# low-hanging fruit: check if pg_rewind configuration is there
if not (self.config.get('use_pg_rewind') and all(self._superuser.get(n) for n in ('username', 'password'))):
return False
cmd = ['pg_rewind', '--help']
try:
ret = subprocess.call(cmd, stdout=open(os.devnull, 'w'), stderr=subprocess.STDOUT)
if ret != 0: # pg_rewind is not there, close up the shop and go home
return False
except OSError:
return False
# check if the cluster's configuration permits pg_rewind
data = self.controldata()
return data.get('wal_log_hints setting', 'off') == 'on' or data.get('Data page checksum version', '0') != '0'
@property
def sysid(self):
if not self._sysid:
data = self.controldata()
self._sysid = data.get('Database system identifier', "")
return self._sysid
def get_local_address(self):
listen_addresses = self._server_parameters['listen_addresses'].split(',')
local_address = listen_addresses[0].strip() # take first address from listen_addresses
for la in listen_addresses:
if la.strip().lower() in ('*', '0.0.0.0', '127.0.0.1', 'localhost'): # we are listening on '*' or localhost
local_address = 'localhost' # connection via localhost is preferred
break
return {'host': local_address, 'port': self._server_parameters['port']}
def get_postgres_role_from_data_directory(self):
if self.data_directory_empty():
return 'uninitialized'
elif os.path.exists(self._recovery_conf):
return 'replica'
else:
return 'master'
@property
def _local_connect_kwargs(self):
ret = self._local_address.copy()
ret.update({'database': self._database,
'fallback_application_name': 'Patroni',
'connect_timeout': 3,
'options': '-c statement_timeout=2000'})
if 'username' in self._superuser:
ret['user'] = self._superuser['username']
if 'password' in self._superuser:
ret['password'] = self._superuser['password']
return ret
def connection(self):
if not self._connection or self._connection.closed != 0:
self._connection = psycopg2.connect(**self._local_connect_kwargs)
self._connection.autocommit = True
self.server_version = self._connection.server_version
return self._connection
def _cursor(self):
if not self._cursor_holder or self._cursor_holder.closed or self._cursor_holder.connection.closed != 0:
logger.info("establishing a new patroni connection to the postgres cluster")
self._cursor_holder = self.connection().cursor()
return self._cursor_holder
def close_connection(self):
if self._cursor_holder and self._cursor_holder.connection and self._cursor_holder.connection.closed == 0:
self._cursor_holder.connection.close()
logger.info("closed patroni connection to the postgresql cluster")
def _query(self, sql, *params):
cursor = None
try:
cursor = self._cursor()
cursor.execute(sql, params)
return cursor
except psycopg2.Error as e:
if cursor and cursor.connection.closed == 0:
raise e
if self.state == 'restarting':
raise RetryFailedError('cluster is being restarted')
raise PostgresConnectionException('connection problems')
def query(self, sql, *params):
try:
return self.retry(self._query, sql, *params)
except RetryFailedError as e:
raise PostgresConnectionException(str(e))
def data_directory_empty(self):
return not os.path.exists(self._data_dir) or os.listdir(self._data_dir) == []
@staticmethod
def initdb_allowed_option(name):
if name in ['pgdata', 'nosync', 'pwfile', 'sync-only']:
raise Exception('{0} option for initdb is not allowed'.format(name))
return True
def get_initdb_options(self, config):
options = []
for o in config:
if isinstance(o, string_types) and self.initdb_allowed_option(o):
options.append('--{0}'.format(o))
elif isinstance(o, dict):
keys = list(o.keys())
if len(keys) != 1 or not isinstance(keys[0], string_types) or not self.initdb_allowed_option(keys[0]):
raise Exception('Invalid option: {0}'.format(o))
options.append('--{0}={1}'.format(keys[0], o[keys[0]]))
else:
raise Exception('Unknown type of initdb option: {0}'.format(o))
return options
def _initialize(self, config):
self.set_state('initalizing new cluster')
options = self.get_initdb_options(config.get('initdb') or [])
pwfile = None
if self._superuser:
if 'username' in self._superuser:
options.append('--username={0}'.format(self._superuser['username']))
if 'password' in self._superuser:
(fd, pwfile) = tempfile.mkstemp()
os.write(fd, self._superuser['password'].encode('utf-8'))
os.close(fd)
options.append('--pwfile={0}'.format(pwfile))
options = ['-o', ' '.join(options)] if options else []
ret = self.pg_ctl('initdb', *options)
if pwfile:
os.remove(pwfile)
if ret:
self.write_pg_hba(config.get('pg_hba', []))
self._major_version = self.get_major_version()
else:
self.set_state('initdb failed')
return ret
def delete_trigger_file(self):
if os.path.exists(self._trigger_file):
os.unlink(self._trigger_file)
def write_pgpass(self, record):
with open(self._pgpass, 'w') as f:
os.fchmod(f.fileno(), 0o600)
f.write('{host}:{port}:*:{user}:{password}\n'.format(**record))
env = os.environ.copy()
env['PGPASSFILE'] = self._pgpass
return env
def replica_method_can_work_without_replication_connection(self, method):
return method != 'basebackup' and self.config and self.config.get(method, {}).get('no_master')
def can_create_replica_without_replication_connection(self):
""" go through the replication methods to see if there are ones
that does not require a working replication connection.
"""
replica_methods = self.config.get('create_replica_method', [])
return any(self.replica_method_can_work_without_replication_connection(method) for method in replica_methods)
def create_replica(self, clone_member):
"""
create the replica according to the replica_method
defined by the user. this is a list, so we need to
loop through all methods the user supplies
"""
self.set_state('creating replica')
self._sysid = None
# get list of replica methods from config.
# If there is no configuration key, or no value is specified, use basebackup
replica_methods = self.config.get('create_replica_method') or ['basebackup']
if clone_member:
r = clone_member.conn_kwargs(self._replication)
connstring = 'postgres://{user}@{host}:{port}/{database}'.format(**r)
# add the credentials to connect to the replica origin to pgpass.
env = self.write_pgpass(r)
else:
connstring = ''
env = os.environ.copy()
# if we don't have any source, leave only replica methods that work without it
replica_methods = \
[r for r in replica_methods if self.replica_method_can_work_without_replication_connection(r)]
# go through them in priority order
ret = 1
for replica_method in replica_methods:
# if the method is basebackup, then use the built-in
if replica_method == "basebackup":
ret = self.basebackup(connstring, env)
if ret == 0:
logger.info("replica has been created using basebackup")
# if basebackup succeeds, exit with success
break
else:
cmd = replica_method
method_config = {}
# user-defined method; check for configuration
# not required, actually
if replica_method in self.config:
method_config = self.config[replica_method].copy()
# look to see if the user has supplied a full command path
# if not, use the method name as the command
cmd = method_config.pop('command', cmd)
# add the default parameters
try:
method_config.update({"scope": self.scope,
"role": "replica",
"datadir": self._data_dir,
"connstring": connstring})
params = ["--{0}={1}".format(arg, val) for arg, val in method_config.items()]
# call script with the full set of parameters
ret = subprocess.call(shlex.split(cmd) + params, env=env)
# if we succeeded, stop
if ret == 0:
logger.info('replica has been created using %s', replica_method)
break
except Exception:
logger.exception('Error creating replica using method %s', replica_method)
ret = 1
self.set_state('stopped')
return ret
def is_leader(self):
return not self.query('SELECT pg_is_in_recovery()').fetchone()[0]
def is_running(self):
if not (self._version_file_exists() and os.path.isfile(self._postmaster_pid)):
return False
try:
with open(self._postmaster_pid) as f:
pid = int(f.readline())
if pid < 0:
pid = -pid
return pid > 0 and pid != os.getpid() and pid != os.getppid() and (os.kill(pid, 0) or True)
except Exception:
return False
def call_nowait(self, cb_name):
""" pick a callback command and call it without waiting for it to finish """
if not self.callback or cb_name not in self.callback:
return False
cmd = self.callback[cb_name]
try:
subprocess.Popen(shlex.split(cmd) + [cb_name, self.role, self.scope])
except OSError:
logger.exception('callback %s %s %s %s failed', cmd, cb_name, self.role, self.scope)
return False
return True
@property
def role(self):
with self._role_lock:
return self._role
def set_role(self, value):
with self._role_lock:
self._role = value
@property
def state(self):
with self._state_lock:
return self._state
def set_state(self, value):
with self._state_lock:
self._state = value
def start(self, block_callbacks=False):
if self.is_running():
logger.error('Cannot start PostgreSQL because one is already running.')
return True
self.set_role(self.get_postgres_role_from_data_directory())
if os.path.exists(self._postmaster_pid):
os.remove(self._postmaster_pid)
logger.info('Removed %s', self._postmaster_pid)
if not block_callbacks:
self.set_state('starting')
env = {'PATH': os.environ.get('PATH')}
# pg_ctl will write a FATAL if the username is incorrect. exporting PGUSER if necessary
if 'username' in self._superuser and self._superuser['username'] != os.environ.get('USER'):
env['PGUSER'] = self._superuser['username']
self._write_postgresql_conf()
self.resolve_connection_addresses()
options = ' '.join("--{0}='{1}'".format(p, self._server_parameters[p]) for p, v in self.CMDLINE_OPTIONS.items()
if self._major_version >= v[2])
ret = self.pg_ctl('start', '-o', options, env=env, preexec_fn=os.setsid)
self._pending_restart = False
self.set_state('running' if ret else 'start failed')
self._schedule_load_slots = ret and self.use_slots
self.save_configuration_files()
# block_callbacks is used during restart to avoid
# running start/stop callbacks in addition to restart ones
if ret and not block_callbacks:
self.call_nowait(ACTION_ON_START)
return ret
def checkpoint(self, connect_kwargs=None):
check_not_is_in_recovery = connect_kwargs is not None
connect_kwargs = connect_kwargs or self._local_connect_kwargs
for p in ['connect_timeout', 'options']:
connect_kwargs.pop(p, None)
try:
with psycopg2.connect(**connect_kwargs) as conn:
conn.autocommit = True
with conn.cursor() as cur:
cur.execute("SET statement_timeout = 0")
if check_not_is_in_recovery:
cur.execute('SELECT pg_is_in_recovery()')
if cur.fetchone()[0]:
return 'is_in_recovery=true'
return cur.execute('CHECKPOINT')
except psycopg2.Error:
logging.exception('Exception during CHECKPOINT')
return 'not accessible or not healty'
def stop(self, mode='fast', block_callbacks=False, checkpoint=True):
# make sure we close all connections established against
# the former node, otherwise, we might get a stalled one
# after kill -9, which would report incorrect data to
# patroni.
self.close_connection()
if not self.is_running():
if not block_callbacks:
self.set_state('stopped')
return True
if checkpoint:
self.checkpoint()
if not block_callbacks:
self.set_state('stopping')
ret = self.pg_ctl('stop', '-m', mode)
# block_callbacks is used during restart to avoid
# running start/stop callbacks in addition to restart ones
if not ret:
logger.warning('pg_ctl stop failed')
self.set_state('stop failed')
elif not block_callbacks:
self.set_state('stopped')
self.call_nowait(ACTION_ON_STOP)
return ret
def reload(self):
ret = self.pg_ctl('reload')
if ret:
self.call_nowait(ACTION_ON_RELOAD)
return ret
def restart(self):
self.set_state('restarting')
ret = self.stop(block_callbacks=True) and self.start(block_callbacks=True)
if ret:
self.call_nowait(ACTION_ON_RESTART)
else:
self.set_state('restart failed ({0})'.format(self.state))
return ret
def _write_postgresql_conf(self):
# rename the original configuration if it is necessary
if not os.path.exists(self._postgresql_base_conf):
os.rename(self._postgresql_conf, self._postgresql_base_conf)
with open(self._postgresql_conf, 'w') as f:
f.write('# Do not edit this file manually!\n# It will be overwritten by Patroni!\n')
f.write("include '{0}'\n\n".format(self._postgresql_base_conf_name))
for name, value in sorted(self._server_parameters.items()):
if name not in self.CMDLINE_OPTIONS:
f.write("{0} = '{1}'\n".format(name, value))
def is_healthy(self):
if not self.is_running():
logger.warning('Postgresql is not running.')
return False
return True
def check_replication_lag(self, last_leader_operation):
return (last_leader_operation or 0) - self.xlog_position() <= self.config.get('maximum_lag_on_failover', 0)
def write_pg_hba(self, config):
with open(os.path.join(self._data_dir, 'pg_hba.conf'), 'a') as f:
f.write('\n{}\n'.format('\n'.join(config)))
def primary_conninfo(self, member):
if not (member and member.conn_url):
return None
r = member.conn_kwargs(self._replication)
r.update({'application_name': self.name, 'sslmode': 'prefer', 'sslcompression': '1'})
keywords = 'user password host port sslmode sslcompression application_name'.split()
return ' '.join('{0}={{{0}}}'.format(kw) for kw in keywords).format(**r)
def check_recovery_conf(self, primary_conninfo):
if not os.path.isfile(self._recovery_conf):
return False
with open(self._recovery_conf, 'r') as f:
for line in f:
if line.startswith('primary_conninfo'):
return primary_conninfo and (primary_conninfo in line)
return not primary_conninfo
def write_recovery_conf(self, primary_conninfo):
with open(self._recovery_conf, 'w') as f:
f.write("standby_mode = 'on'\nrecovery_target_timeline = 'latest'\n")
if primary_conninfo:
f.write("primary_conninfo = '{0}'\n".format(primary_conninfo))
if self.use_slots:
f.write("primary_slot_name = '{0}'\n".format(self.name))
for name, value in self.config.get('recovery_conf', {}).items():
if name not in ('standby_mode', 'recovery_target_timeline', 'primary_conninfo', 'primary_slot_name'):
f.write("{0} = '{1}'\n".format(name, value))
def rewind(self, r):
# prepare pg_rewind connection
env = self.write_pgpass(r)
dsn = 'user={user} host={host} port={port} dbname={database} sslmode=prefer sslcompression=1'.format(**r)
logger.info('running pg_rewind from %s', dsn)
try:
return subprocess.call(['pg_rewind', '-D', self._data_dir, '--source-server', dsn], env=env) == 0
except OSError:
return False
def controldata(self):
""" return the contents of pg_controldata, or non-True value if pg_controldata call failed """
result = {}
# Don't try to call pg_controldata during backup restore
if self._version_file_exists() and self.state != 'creating replica':
try:
data = subprocess.check_output(['pg_controldata', self._data_dir])
if data:
data = data.decode('utf-8').splitlines()
result = {l.split(':')[0].replace('Current ', '', 1): l.split(':')[1].strip() for l in data if l}
except subprocess.CalledProcessError:
logger.exception("Error when calling pg_controldata")
return result
def read_postmaster_opts(self):
""" returns the list of option names/values from postgres.opts, Empty dict if read failed or no file """
result = {}
try:
with open(os.path.join(self._data_dir, "postmaster.opts")) as f:
data = f.read()
opts = [opt.strip('"\n') for opt in data.split(' "')]
for opt in opts:
if '=' in opt and opt.startswith('--'):
name, val = opt.split('=', 1)
name = name.strip('-')
result[name] = val
except IOError:
logger.exception('Error when reading postmaster.opts')
return result
def single_user_mode(self, command=None, options=None):
""" run a given command in a single-user mode. If the command is empty - then just start and stop """
cmd = ['postgres', '--single', '-D', self._data_dir]
for opt, val in sorted((options or {}).items()):
cmd.extend(['-c', '{0}={1}'.format(opt, val)])
# need a database name to connect
cmd.append(self._database)
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=open(os.devnull, 'w'), stderr=subprocess.STDOUT)
if p:
if command:
p.communicate('{0}\n'.format(command))
p.stdin.close()
return p.wait()
return 1
def cleanup_archive_status(self):
status_dir = os.path.join(self._data_dir, 'pg_xlog', 'archive_status')
try:
for f in os.listdir(status_dir):
path = os.path.join(status_dir, f)
try:
if os.path.islink(path):
os.unlink(path)
elif os.path.isfile(path):
os.remove(path)
except OSError:
logger.exception("Unable to remove %s", path)
except OSError:
logger.exception("Unable to list %s", status_dir)
def follow(self, member, leader, recovery=False, async_executor=None):
primary_conninfo = self.primary_conninfo(member)
if self.check_recovery_conf(primary_conninfo) and not recovery:
return True
if async_executor:
async_executor.schedule('changing primary_conninfo and restarting')
async_executor.run_async(self._do_follow, (primary_conninfo, leader, recovery))
else:
self._do_follow(primary_conninfo, leader, recovery)
def _do_follow(self, primary_conninfo, leader, recovery=False):
change_role = self.role == 'master'
if change_role:
if leader:
if leader.name == self.name:
self._need_rewind = False
primary_conninfo = None
if self.is_running():
return
else:
self._need_rewind = bool(leader.conn_url) and self.can_rewind
else:
self._need_rewind = False
primary_conninfo = None
if self._need_rewind:
logger.info("set the rewind flag after demote")
self.set_role('unknown')
if self.is_running() and not self.stop():
return logger.warning('Can not run pg_rewind because postgres is still running')
if not (leader and leader.conn_url):
return logger.info('Leader unknown, can not rewind')
# prepare pg_rewind connection
r = leader.conn_kwargs(self._superuser)
# first make sure that we are really trying to rewind
# from the master and run a checkpoint on a t in order to
# make it store the new timeline ([email protected])
leader_status = self.checkpoint(r)
if leader_status:
return logger.warning('Can not use %s for rewind: %s', leader.name, leader_status)
# at present, pg_rewind only runs when the cluster is shut down cleanly
# and not shutdown in recovery. We have to remove the recovery.conf if present
# and start/shutdown in a single user mode to emulate this.
# XXX: if recovery.conf is linked, it will be written anew as a normal file.
if os.path.isfile(self._recovery_conf) or os.path.islink(self._recovery_conf):
os.unlink(self._recovery_conf)
# Archived segments might be useful to pg_rewind,
# clean the flags that tell we should remove them.
self.cleanup_archive_status()
# Start in a single user mode and stop to produce a clean shutdown
opts = self.read_postmaster_opts()
opts.update({'archive_mode': 'on', 'archive_command': 'false'})
self.single_user_mode(options=opts)
if self.rewind(r) or not self.config.get('remove_data_directory_on_rewind_failure', False):
self.write_recovery_conf(primary_conninfo)
ret = self.start()
else:
logger.error('unable to rewind the former master')
self.remove_data_directory()
self.set_role('uninitialized')
ret = True
self._need_rewind = False
else:
self.write_recovery_conf(primary_conninfo)
ret = self.restart()
self.set_role('replica')
if change_role:
self.call_nowait(ACTION_ON_ROLE_CHANGE)
return ret
def save_configuration_files(self):
"""
copy postgresql.conf to postgresql.conf.backup to be able to retrive configuration files
- originally stored as symlinks, those are normally skipped by pg_basebackup
- in case of WAL-E basebackup (see http://comments.gmane.org/gmane.comp.db.postgresql.wal-e/239)
"""
try:
for f in self._configuration_to_save:
if os.path.isfile(f):
shutil.copy(f, f + '.backup')
except IOError:
logger.exception('unable to create backup copies of configuration files')
def restore_configuration_files(self):
""" restore a previously saved postgresql.conf """
try:
for f in self._configuration_to_save:
if not os.path.isfile(f) and os.path.isfile(f + '.backup'):
shutil.copy(f + '.backup', f)
except IOError:
logger.exception('unable to restore configuration files from backup')
def promote(self):
if self.role == 'master':
return True
ret = self.pg_ctl('promote')
if ret:
self.set_role('master')
logger.info("cleared rewind flag after becoming the leader")
self._need_rewind = False
self.call_nowait(ACTION_ON_ROLE_CHANGE)
return ret
def create_or_update_role(self, name, password, options):
options = list(map(str.upper, options))
if 'NOLOGIN' not in options and 'LOGIN' not in options:
options.append('LOGIN')
self.query("""DO $$
BEGIN
SET local synchronous_commit = 'local';
PERFORM * FROM pg_authid WHERE rolname = %s;
IF FOUND THEN
ALTER ROLE "{0}" WITH {1} PASSWORD %s;
ELSE
CREATE ROLE "{0}" WITH {1} PASSWORD %s;
END IF;
END;
$$""".format(name, ' '.join(options)), name, password, password)
def xlog_position(self):
return self.query("""SELECT pg_xlog_location_diff(CASE WHEN pg_is_in_recovery()
THEN pg_last_xlog_replay_location()
ELSE pg_current_xlog_location()
END, '0/0')::bigint""").fetchone()[0]
def load_replication_slots(self):
if self.use_slots and self._schedule_load_slots:
cursor = self.query("SELECT slot_name FROM pg_replication_slots WHERE slot_type='physical'")
self._replication_slots = [r[0] for r in cursor]
self._schedule_load_slots = False
def postmaster_start_time(self):
try:
cursor = self.query("""SELECT to_char(pg_postmaster_start_time(), 'YYYY-MM-DD HH24:MI:SS.MS TZ')""")
return cursor.fetchone()[0]
except psycopg2.Error:
return None
def sync_replication_slots(self, cluster):
if self.use_slots:
try:
self.load_replication_slots()
# if the replicatefrom tag is set on the member - we should not create the replication slot for it on
# the current master, because that member would replicate from elsewhere. We still create the slot if
# the replicatefrom destination member is currently not a member of the cluster (fallback to the
# master), or if replicatefrom destination member happens to be the current master
if self.role == 'master':
slots = [m.name for m in cluster.members if m.name != self.name and
(m.replicatefrom is None or m.replicatefrom == self.name or
not cluster.has_member(m.replicatefrom))]
else:
# only manage slots for replicas that replicate from this one, except for the leader among them
slots = [m.name for m in cluster.members if m.replicatefrom == self.name and
m.name != cluster.leader.name]
# drop unused slots
for slot in set(self._replication_slots) - set(slots):
self.query("""SELECT pg_drop_replication_slot(%s)
WHERE EXISTS(SELECT 1 FROM pg_replication_slots
WHERE slot_name = %s)""", slot, slot)
# create new slots
for slot in set(slots) - set(self._replication_slots):
self.query("""SELECT pg_create_physical_replication_slot(%s)
WHERE NOT EXISTS (SELECT 1 FROM pg_replication_slots
WHERE slot_name = %s)""", slot, slot)
self._replication_slots = slots
except psycopg2.Error:
logger.exception('Exception when changing replication slots')
def last_operation(self):
return str(self.xlog_position())
def clone(self, clone_member):
"""
- initialize the replica from an existing member (master or replica)
- initialize the replica using the replica creation method that
works without the replication connection (i.e. restore from on-disk
base backup)
"""
ret = self.create_replica(clone_member) == 0
if ret:
self._major_version = self.get_major_version()
self.delete_trigger_file()
self.restore_configuration_files()
return ret
def bootstrap(self, config):
""" Initialize a new node from scratch and start it. """
if self._initialize(config) and self.start():
for name, value in config['users'].items():
if name not in (self._superuser.get('username'), self._replication['username']):
self.create_or_update_role(name, value['password'], value.get('options', []))
self.create_or_update_role(self._replication['username'], self._replication['password'], ['REPLICATION'])
else:
raise PostgresException("Could not bootstrap master PostgreSQL")
def move_data_directory(self):
if os.path.isdir(self._data_dir) and not self.is_running():
try:
new_name = '{0}_{1}'.format(self._data_dir, time.strftime('%Y-%m-%d-%H-%M-%S'))
logger.info('renaming data directory to %s', new_name)
os.rename(self._data_dir, new_name)
except OSError:
logger.exception("Could not rename data directory %s", self._data_dir)
def remove_data_directory(self):
logger.info('Removing data directory: %s', self._data_dir)
try:
if os.path.islink(self._data_dir):
os.unlink(self._data_dir)
elif not os.path.exists(self._data_dir):
return
elif os.path.isfile(self._data_dir):
os.remove(self._data_dir)
elif os.path.isdir(self._data_dir):
shutil.rmtree(self._data_dir)
except (IOError, OSError):
logger.exception('Could not remove data directory %s', self._data_dir)
self.move_data_directory()
def basebackup(self, conn_url, env):
# creates a replica data dir using pg_basebackup.
# this is the default, built-in create_replica_method
# tries twice, then returns failure (as 1)
# uses "stream" as the xlog-method to avoid sync issues
maxfailures = 2
ret = 1
for bbfailures in range(0, maxfailures):
try:
ret = subprocess.call(['pg_basebackup', '--pgdata=' + self._data_dir,
'--xlog-method=stream', "--dbname=" + conn_url], env=env)
if ret == 0:
break
except Exception as e:
logger.error('Error when fetching backup with pg_basebackup: {0}'.format(e))
if bbfailures < maxfailures - 1:
logger.error('Trying again in 5 seconds')
time.sleep(5)
return ret
@staticmethod
def postgres_version_to_int(pg_version):
""" Convert the server_version to integer
>>> Postgresql.postgres_version_to_int('9.5.3')
90503
>>> Postgresql.postgres_version_to_int('9.3.13')
90313
>>> Postgresql.postgres_version_to_int('10.1')
100001
>>> Postgresql.postgres_version_to_int('10')
Traceback (most recent call last):
...
Exception: Invalid PostgreSQL format: X.Y or X.Y.Z is accepted: 10
>>> Postgresql.postgres_version_to_int('a.b.c')
Traceback (most recent call last):
...
Exception: Invalid PostgreSQL version: a.b.c
"""
components = pg_version.split('.')
result = []
if len(components) < 2 or len(components) > 3:
raise Exception("Invalid PostgreSQL format: X.Y or X.Y.Z is accepted: {0}".format(pg_version))
if len(components) == 2:
# new style verion numbers, i.e. 10.1 becomes 100001
components.insert(1, '0')
try:
result = [c if int(c) > 10 else '0{0}'.format(c) for c in components]
result = int(''.join(result))
except ValueError:
raise Exception("Invalid PostgreSQL version: {0}".format(pg_version))
return result
|
py | 7dff8b584adc3302f254b13cdf41a9d2a58fa131 |
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
import json
import uuid
from datetime import datetime, timedelta
from .models import Reservation
from atlas.comparer import APITestCaseExtend, APITestClient
from backend.common_test import CommonSetup
# Create your tests here.
class ReservationModuleTest(APITestCaseExtend):
def setUp(self):
self.client = APITestClient()
self.maxDiff = None
dummy = self.dummy = CommonSetup(hospital=1, disease=1, customer=1, patient=1)
self.hospital_id = dummy.hospital[0]
self.disease_id = dummy.disease[0]
payload = [
{
"hospital_id": self.hospital_id,
"diseases": [
{
"disease_id": self.disease_id,
"date_slots": [
{
"date": datetime(2018, 1, 1) + timedelta(days=dt*7),
"quantity": 1,
"type": "add"
}
for dt in range(2)
]
}
]
}
]
resp_info = self.client.json(method="POST", call_name="slot_publish_batch", data=payload)
self.timeslot_ids = list(map(uuid.UUID, resp_info['created']))
def test_all_workflows(self):
resv_init_sample = {
'user_id': self.dummy.customer[0],
'patient_id': self.dummy.patient[0],
'hospital_id': self.hospital_id,
'disease_id': self.disease_id,
'timeslot': self.timeslot_ids[0],
}
# Test create
resobj = self.client.json(method="PUT", call_name="reservation_init", data=resv_init_sample)
self.assertEqual(Reservation.objects.count(), 1)
# Test get and create result
resvid = resobj['rid']
get_resv_url = reverse("reservation_get", kwargs={'resid': resvid})
response = self.client.get(get_resv_url)
self.assertEqual(response.status_code, 200)
response_obj = json.loads(response.content)
self.assertJSONIntersectEqual(resv_init_sample, response_obj)
self.assertIsNotNone(response_obj['ctime'])
self.assertIsNone(response_obj['commit_at'])
self.assertJSONIntersectEqual(response_obj, resv_init_sample)
commit_resv_url = reverse("reservation_commit", kwargs={'resid': resvid})
update_resv_url = reverse("reservation_update", kwargs={'resid': resvid})
# test empty commit failure
self.assertEqual(self.client.post(commit_resv_url).status_code, 400)
# test update field
extra_fields_sample = {
"first_hospital": "Beijing Hexie Hospital",
"first_doctor_name": "Crab River",
"first_doctor_contact": "+86 13802332333",
}
response = self.client.post(update_resv_url, data=extra_fields_sample)
self.assertEqual(response.status_code, 200)
self.assertEqual(set(json.loads(response.content)['updated_fields']), set(extra_fields_sample))
response_obj = json.loads(self.client.get(get_resv_url).content)
self.assertJSONIntersectEqual(response_obj, dict(resv_init_sample, **extra_fields_sample))
# test empty commit success
response = self.client.post(commit_resv_url)
self.assertEqual(response.status_code, 204)
response_obj = json.loads(self.client.get(get_resv_url).content)
self.assertIsNotNone(response_obj['commit_at'])
overwrite_sample = {
"first_doctor_name": "Dr. He, Xie",
"timeslot": self.timeslot_ids[1],
}
# test update failure
self.assertEqual(self.client.post(update_resv_url, data=overwrite_sample).status_code, 400)
# test update success
del overwrite_sample['timeslot']
response = self.client.post(update_resv_url, data=overwrite_sample)
self.assertEqual(response.status_code, 200)
response_obj = json.loads(self.client.get(get_resv_url).content)
self.assertJSONIntersectEqual(response_obj, dict(extra_fields_sample, **overwrite_sample))
def test_insufficient_slot(self):
place_taker_1 = {
'user_id': self.dummy.customer[0],
'patient_id': self.dummy.patient[0],
'hospital_id': self.hospital_id,
'disease_id': self.disease_id,
'timeslot': self.timeslot_ids[0],
}
place_taker_2 = {
'user_id': self.dummy.customer[0],
'patient_id': self.dummy.patient[0],
'hospital_id': self.hospital_id,
'disease_id': self.disease_id,
'timeslot': self.timeslot_ids[1],
}
# setup
create_resv_url = reverse("reservation_init")
response = self.client.put(create_resv_url, place_taker_1, format='json')
self.assertEqual(response.status_code, 200)
response = self.client.put(create_resv_url, place_taker_2, format='json')
self.assertEqual(response.status_code, 200)
self.assertEqual(Reservation.objects.count(), 2)
rid2 = json.loads(response.content)['rid']
# add one more
response = self.client.put(create_resv_url, place_taker_1, format='json')
self.assertEqual(response.status_code, 400)
self.assertEqual(json.loads(response.content)['error'], "InsufficientSpaceException")
update_resv_url = reverse("reservation_update", kwargs={'resid': rid2})
# update to full time slot should fail and keep current slot
response = self.client.post(update_resv_url, data={'timeslot': self.timeslot_ids[0]})
self.assertEqual(response.status_code, 400)
self.assertEqual(json.loads(response.content)['error'], "InsufficientSpaceException")
self.assertEqual(Reservation.objects.get(res_id=rid2).timeslot_id, self.timeslot_ids[1])
self.client.post(
reverse("slot_publish_batch"),
[
{
"hospital_id": self.hospital_id,
"diseases": [
{
"disease_id": self.disease_id,
"date_slots": [
{
"date": datetime(2018, 1, 1),
"quantity": 1,
"type": "add"
}
for dt in range(2)
]
}
]
}
],
format='json'
)
response = self.client.post(update_resv_url, data={'timeslot': self.timeslot_ids[0]})
self.assertEqual(response.status_code, 200)
self.assertEqual(Reservation.objects.get(res_id=rid2).timeslot_id, self.timeslot_ids[0])
|
py | 7dff8bc8a0a43f096e4013f3a141632ee65b15c8 | from django.contrib import admin
from clinic.patients.models import Patient
admin.site.register(Patient)
|
py | 7dff8c77693c9e4006537512282d9542e50341d6 | from setuptools import setup, find_packages
install_requires = [
'botocore>=1.12.54',
'python-dateutil>=2.1,<3.0.0',
'amazon-dax-client>=1.1.7'
]
setup(
name='pynamodb-dax',
version=__import__('pynamodb').__version__,
packages=find_packages(),
url='https://github.com/thanakijwanavit/PynamoDB',
author='Nic Wanavit (fork)',
author_email='[email protected]',
description='fork to pynamodb for supporting dax',
long_description=open('README.rst').read(),
zip_safe=False,
license='MIT',
keywords='python dynamodb amazon dax',
install_requires=install_requires,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.8',
'License :: OSI Approved :: MIT License',
],
extras_require={
'signals': ['blinker>=1.3,<2.0']
},
package_data={'pynamodb': ['py.typed']},
)
|
py | 7dff8d29e6f804ecfa16dc9ebc7c7cd9e8e275a3 | import torch as th
import torch.nn as nn
from torch.autograd import Variable
from latent_dialog.base_models import BaseModel
from latent_dialog.corpora import SYS, EOS, PAD
from latent_dialog.utils import INT, FLOAT, LONG, Pack
from latent_dialog.enc2dec.encoders import EncoderRNN, RnnUttEncoder, MlpGoalEncoder
from latent_dialog.nn_lib import IdentityConnector, Bi2UniConnector
from latent_dialog.enc2dec.decoders import DecoderRNN, GEN, GEN_VALID, TEACH_FORCE
from latent_dialog.criterions import NLLEntropy, NLLEntropy4CLF, CombinedNLLEntropy4CLF
import latent_dialog.utils as utils
import latent_dialog.nn_lib as nn_lib
import latent_dialog.criterions as criterions
import numpy as np
class HRED(BaseModel):
def __init__(self, corpus, config):
super(HRED, self).__init__(config)
self.vocab = corpus.vocab
self.vocab_dict = corpus.vocab_dict
self.vocab_size = len(self.vocab)
self.movie_vocab = corpus.movie_vocab
self.movie_vocab_dict = corpus.movie_vocab_dict
self.movie_vocab_size = len(self.movie_vocab)
# self.outcome_vocab = corpus.outcome_vocab
# self.outcome_vocab_dict = corpus.outcome_vocab_dict
# self.outcome_vocab_size = len(self.outcome_vocab)
self.sys_id = self.vocab_dict[SYS]
self.eos_id = self.vocab_dict[EOS]
self.pad_id = self.vocab_dict[PAD]
self.movie_encoder = MlpGoalEncoder(movie_vocab_size=self.movie_vocab_size,
k=config.k,
nembed=config.movie_embed_size,
nhid=config.movie_nhid,
init_range=config.init_range)
self.embedding = nn.Embedding(self.vocab_size, config.embed_size, padding_idx=self.pad_id)
self.utt_encoder = RnnUttEncoder(vocab_size=self.vocab_size,
embedding_dim=config.embed_size,
feat_size=1,
movie_nhid=config.movie_nhid,
rnn_cell=config.utt_rnn_cell,
utt_cell_size=config.utt_cell_size,
num_layers=config.num_layers,
input_dropout_p=config.dropout,
output_dropout_p=config.dropout,
bidirectional=config.bi_utt_cell,
variable_lengths=False,
use_attn=config.enc_use_attn,
embedding=self.embedding)
self.ctx_encoder = EncoderRNN(input_dropout_p=0.0,
rnn_cell=config.ctx_rnn_cell,
# input_size=self.utt_encoder.output_size+config.movie_nhid,
input_size=self.utt_encoder.output_size,
hidden_size=config.ctx_cell_size,
num_layers=config.num_layers,
output_dropout_p=config.dropout,
bidirectional=config.bi_ctx_cell,
variable_lengths=False)
# TODO connector
if config.bi_ctx_cell:
self.connector = Bi2UniConnector(rnn_cell=config.ctx_rnn_cell,
num_layer=1,
hidden_size=config.ctx_cell_size,
output_size=config.dec_cell_size)
else:
self.connector = IdentityConnector()
self.decoder = DecoderRNN(input_dropout_p=config.dropout,
rnn_cell=config.dec_rnn_cell,
input_size=config.embed_size + config.movie_nhid,
hidden_size=config.dec_cell_size,
num_layers=config.num_layers,
output_dropout_p=config.dropout,
bidirectional=False,
vocab_size=self.vocab_size,
use_attn=config.dec_use_attn,
ctx_cell_size=self.ctx_encoder.output_size,
attn_mode=config.dec_attn_mode,
sys_id=self.sys_id,
eos_id=self.eos_id,
use_gpu=config.use_gpu,
max_dec_len=config.max_dec_len,
embedding=self.embedding)
self.nll = NLLEntropy(self.pad_id, config.avg_type)
def forward(self, data_feed, mode, clf=False, gen_type='greedy', use_py=None, return_latent=False):
clf = False
if not clf:
ctx_lens = data_feed['context_lens'] # (batch_size, )
ctx_utts = self.np2var(data_feed['contexts'], LONG) # (batch_size, max_ctx_len, max_utt_len)
ctx_confs = self.np2var(data_feed['context_confs'], FLOAT) # (batch_size, max_ctx_len)
out_utts = self.np2var(data_feed['outputs'], LONG) # (batch_size, max_out_len)
movies = self.np2var(data_feed['movies'], LONG) # (batch_size, movie_len)
batch_size = len(ctx_lens)
# encode movie info (EDITED, not used for now)
# movies_h = self.movie_encoder(movies) # (batch_size, movie_nhid)
movies_h = None
enc_inputs, _, _ = self.utt_encoder(ctx_utts, feats=ctx_confs,
movies=movies_h) # (batch_size, max_ctx_len, num_directions*utt_cell_size)
# enc_outs: (batch_size, max_ctx_len, ctx_cell_size)
# enc_last: tuple, (h_n, c_n)
# h_n: (num_layers*num_directions, batch_size, ctx_cell_size)
# c_n: (num_layers*num_directions, batch_size, ctx_cell_size)
enc_outs, enc_last = self.ctx_encoder(enc_inputs, input_lengths=ctx_lens, movies=None)
# get decoder inputs
dec_inputs = out_utts[:, :-1]
labels = out_utts[:, 1:].contiguous()
# pack attention context
if self.config.dec_use_attn:
attn_context = enc_outs
else:
attn_context = None
# create decoder initial states
dec_init_state = self.connector(enc_last)
# decode
dec_outputs, dec_hidden_state, ret_dict = self.decoder(batch_size=batch_size,
dec_inputs=dec_inputs,
# (batch_size, movie_size-1)
dec_init_state=dec_init_state, # tuple: (h, c)
attn_context=attn_context,
# (batch_size, max_ctx_len, ctx_cell_size)
mode=mode,
gen_type=gen_type,
beam_size=self.config.beam_size,
movie_hid=None) # (batch_size, movie_nhid)
if mode == GEN:
return ret_dict, labels
if return_latent:
return Pack(nll=self.nll(dec_outputs, labels),
latent_action=dec_init_state)
else:
return Pack(nll=self.nll(dec_outputs, labels))
class GaussHRED(BaseModel):
def __init__(self, corpus, config):
super(GaussHRED, self).__init__(config)
self.vocab = corpus.vocab
self.vocab_dict = corpus.vocab_dict
self.vocab_size = len(self.vocab)
self.movie_vocab = corpus.movie_vocab
self.movie_vocab_dict = corpus.movie_vocab_dict
self.movie_vocab_size = len(self.movie_vocab)
# self.outcome_vocab = corpus.outcome_vocab
# self.outcome_vocab_dict = corpus.outcome_vocab_dict
# self.outcome_vocab_size = len(self.outcome_vocab)
self.sys_id = self.vocab_dict[SYS]
self.eos_id = self.vocab_dict[EOS]
self.pad_id = self.vocab_dict[PAD]
self.simple_posterior = config.simple_posterior
self.movie_encoder = MlpGoalEncoder(movie_vocab_size=self.movie_vocab_size,
k=config.k,
nembed=config.movie_embed_size,
nhid=config.movie_nhid,
init_range=config.init_range)
self.embedding = nn.Embedding(self.vocab_size, config.embed_size, padding_idx=self.pad_id)
self.utt_encoder = RnnUttEncoder(vocab_size=self.vocab_size,
embedding_dim=config.embed_size,
feat_size=0,
movie_nhid=config.movie_nhid,
rnn_cell=config.utt_rnn_cell,
utt_cell_size=config.utt_cell_size,
num_layers=config.num_layers,
input_dropout_p=config.dropout,
output_dropout_p=config.dropout,
bidirectional=config.bi_utt_cell,
variable_lengths=False,
use_attn=config.enc_use_attn,
embedding=self.embedding)
self.ctx_encoder = EncoderRNN(input_dropout_p=0.0,
rnn_cell=config.ctx_rnn_cell,
# input_size=self.utt_encoder.output_size+config.movie_nhid,
input_size=self.utt_encoder.output_size,
hidden_size=config.ctx_cell_size,
num_layers=config.num_layers,
output_dropout_p=config.dropout,
bidirectional=config.bi_ctx_cell,
variable_lengths=False)
# mu and logvar projector
self.c2z = nn_lib.Hidden2Gaussian(self.utt_encoder.output_size, config.y_size, is_lstm=False)
self.gauss_connector = nn_lib.GaussianConnector(self.use_gpu)
self.z_embedding = nn.Linear(config.y_size, config.dec_cell_size)
if not self.simple_posterior:
self.xc2z = nn_lib.Hidden2Gaussian(self.utt_encoder.output_size+self.ctx_encoder.output_size, config.y_size, is_lstm=False)
self.decoder = DecoderRNN(input_dropout_p=config.dropout,
rnn_cell=config.dec_rnn_cell,
input_size=config.embed_size + config.movie_nhid,
hidden_size=config.dec_cell_size,
num_layers=config.num_layers,
output_dropout_p=config.dropout,
bidirectional=False,
vocab_size=self.vocab_size,
use_attn=config.dec_use_attn,
ctx_cell_size=self.ctx_encoder.output_size,
attn_mode=config.dec_attn_mode,
sys_id=self.sys_id,
eos_id=self.eos_id,
use_gpu=config.use_gpu,
max_dec_len=config.max_dec_len,
embedding=self.embedding)
self.nll = NLLEntropy(self.pad_id, config.avg_type)
self.gauss_kl = criterions.NormKLLoss(unit_average=True)
self.zero = utils.cast_type(th.zeros(1), FLOAT, self.use_gpu)
def valid_loss(self, loss, batch_cnt=None):
if self.simple_posterior:
total_loss = loss.nll
if self.config.use_pr > 0.0:
total_loss += self.config.beta * loss.pi_kl
else:
total_loss = loss.nll + loss.pi_kl
return total_loss
def gaussian_logprob(self, mu, logvar, sample_z):
var = th.exp(logvar)
constant = float(-0.5 * np.log(2*np.pi))
logprob = constant - 0.5 * logvar - th.pow((mu-sample_z), 2) / (2.0*var)
return logprob
def z2dec(self, last_h, requires_grad):
p_mu, p_logvar = self.c2z(last_h)
if requires_grad:
sample_z = self.gauss_connector(p_mu, p_logvar)
joint_logpz = None
else:
sample_z = th.normal(p_mu, th.sqrt(th.exp(p_logvar))).detach()
logprob_sample_z = self.gaussian_logprob(p_mu, p_logvar, sample_z)
joint_logpz = th.sum(logprob_sample_z.squeeze(0), dim=1)
dec_init_state = self.z_embedding(sample_z)
attn_context = None
if self.config.dec_rnn_cell == 'lstm':
dec_init_state = tuple([dec_init_state, dec_init_state])
return dec_init_state, attn_context, joint_logpz
def forward(self, data_feed, mode, clf=False, gen_type='greedy', use_py=None, return_latent=False):
ctx_lens = data_feed['context_lens'] # (batch_size, )
ctx_utts = self.np2var(data_feed['contexts'], LONG) # (batch_size, max_ctx_len, max_utt_len)
out_utts = self.np2var(data_feed['outputs'], LONG) # (batch_size, max_out_len)
movies = self.np2var(data_feed['movies'], LONG) # (batch_size, movie_len)
batch_size = len(ctx_lens)
# encode movie info
# movies_h = self.movie_encoder(movies) # (batch_size, movie_nhid)
movies_h = None
enc_inputs, _, _ = self.utt_encoder(ctx_utts, movies=movies_h)
# (batch_size, max_ctx_len, num_directions*utt_cell_size)
# enc_outs: (batch_size, max_ctx_len, ctx_cell_size)
# enc_last: tuple, (h_n, c_n)
# h_n: (num_layers*num_directions, batch_size, ctx_cell_size)
# c_n: (num_layers*num_directions, batch_size, ctx_cell_size)
enc_outs, enc_last = self.ctx_encoder(enc_inputs, input_lengths=ctx_lens, movies=None)
# get decoder inputs
dec_inputs = out_utts[:, :-1]
labels = out_utts[:, 1:].contiguous()
# create decoder initial states
if self.simple_posterior:
q_mu, q_logvar = self.c2z(enc_last)
sample_z = self.gauss_connector(q_mu, q_logvar)
p_mu, p_logvar = self.zero, self.zero
else:
p_mu, p_logvar = self.c2z(enc_last)
# encode movie and use posterior to find q(z|x, c)
x_h, _, _ = self.utt_encoder(out_utts.unsqueeze(1), movies=movies_h)
q_mu, q_logvar = self.xc2z(th.cat([enc_last, x_h.squeeze(1).unsqueeze(0)], dim=2))
# use prior at inference time, otherwise use posterior
if mode == GEN or use_py:
sample_z = self.gauss_connector(p_mu, p_logvar)
else:
sample_z = self.gauss_connector(q_mu, q_logvar)
# pack attention context
dec_init_state = self.z_embedding(sample_z)
attn_context = None
# decode
if self.config.dec_rnn_cell == 'lstm':
dec_init_state = tuple([dec_init_state, dec_init_state])
# decode
dec_outputs, dec_hidden_state, ret_dict = self.decoder(batch_size=batch_size,
dec_inputs=dec_inputs,
# (batch_size, movie_size-1)
dec_init_state=dec_init_state, # tuple: (h, c)
attn_context=attn_context,
# (batch_size, max_ctx_len, ctx_cell_size)
mode=mode,
gen_type=gen_type,
beam_size=self.config.beam_size,
movie_hid=movies_h) # (batch_size, movie_nhid)
if mode == GEN:
ret_dict['sample_z'] = sample_z
return ret_dict, labels
else:
result = Pack(nll=self.nll(dec_outputs, labels))
pi_kl = self.gauss_kl(q_mu, q_logvar, p_mu, p_logvar)
result['pi_kl'] = pi_kl
result['nll'] = self.nll(dec_outputs, labels)
return result
class CatHRED(BaseModel):
def __init__(self, corpus, config):
super(CatHRED, self).__init__(config)
self.vocab = corpus.vocab
self.vocab_dict = corpus.vocab_dict
self.vocab_size = len(self.vocab)
self.movie_vocab = corpus.movie_vocab
self.movie_vocab_dict = corpus.movie_vocab_dict
self.movie_vocab_size = len(self.movie_vocab)
# self.outcome_vocab = corpus.outcome_vocab
# self.outcome_vocab_dict = corpus.outcome_vocab_dict
# self.outcome_vocab_size = len(self.outcome_vocab)
self.sys_id = self.vocab_dict[SYS]
self.eos_id = self.vocab_dict[EOS]
self.pad_id = self.vocab_dict[PAD]
self.simple_posterior = config.simple_posterior
self.movie_encoder = MlpGoalEncoder(movie_vocab_size=self.movie_vocab_size,
k=config.k,
nembed=config.movie_embed_size,
nhid=config.movie_nhid,
init_range=config.init_range)
self.embedding = nn.Embedding(self.vocab_size, config.embed_size, padding_idx=self.pad_id)
self.utt_encoder = RnnUttEncoder(vocab_size=self.vocab_size,
embedding_dim=config.embed_size,
feat_size=0,
movie_nhid=config.movie_nhid,
rnn_cell=config.utt_rnn_cell,
utt_cell_size=config.utt_cell_size,
num_layers=config.num_layers,
input_dropout_p=config.dropout,
output_dropout_p=config.dropout,
bidirectional=config.bi_utt_cell,
variable_lengths=False,
use_attn=config.enc_use_attn,
embedding=self.embedding)
self.ctx_encoder = EncoderRNN(input_dropout_p=0.0,
rnn_cell=config.ctx_rnn_cell,
# input_size=self.utt_encoder.output_size+config.movie_nhid,
input_size=self.utt_encoder.output_size,
hidden_size=config.ctx_cell_size,
num_layers=config.num_layers,
output_dropout_p=config.dropout,
bidirectional=config.bi_ctx_cell,
variable_lengths=False)
# mu and logvar projector
self.c2z = nn_lib.Hidden2Discrete(self.ctx_encoder.output_size, config.y_size, config.k_size,
is_lstm=config.ctx_rnn_cell == 'lstm')
if not self.simple_posterior:
self.xc2z = nn_lib.Hidden2Discrete(self.ctx_encoder.output_size + self.utt_encoder.output_size,
config.y_size, config.k_size, is_lstm=False)
self.gumbel_connector = nn_lib.GumbelConnector(config.use_gpu)
self.z_embedding = nn.Linear(config.y_size * config.k_size, config.dec_cell_size, bias=False)
self.decoder = DecoderRNN(input_dropout_p=config.dropout,
rnn_cell=config.dec_rnn_cell,
input_size=config.embed_size + config.movie_nhid,
hidden_size=config.dec_cell_size,
num_layers=config.num_layers,
output_dropout_p=config.dropout,
bidirectional=False,
vocab_size=self.vocab_size,
use_attn=config.dec_use_attn,
ctx_cell_size=self.ctx_encoder.output_size,
attn_mode=config.dec_attn_mode,
sys_id=self.sys_id,
eos_id=self.eos_id,
use_gpu=config.use_gpu,
max_dec_len=config.max_dec_len,
embedding=self.embedding)
self.nll = NLLEntropy(self.pad_id, config.avg_type)
self.cat_kl_loss = criterions.CatKLLoss()
self.entropy_loss = criterions.Entropy()
self.log_uniform_y = Variable(th.log(th.ones(1) / config.k_size))
if self.use_gpu:
self.log_uniform_y = self.log_uniform_y.cuda()
def valid_loss(self, loss, batch_cnt=None):
if self.simple_posterior:
total_loss = loss.nll
if self.config.use_pr > 0.0:
total_loss -= self.config.beta * loss.pi_kl
else:
total_loss = loss.nll + loss.pi_kl
return total_loss
def z2dec(self, last_h, requires_grad):
logits, log_qy = self.c2z(last_h)
if requires_grad:
sample_y = self.gumbel_connector(logits)
logprob_z = None
else:
idx = th.multinomial(th.exp(log_qy), 1).detach()
logprob_z = th.sum(log_qy.gather(1, idx))
sample_y = utils.cast_type(Variable(th.zeros(log_qy.size())), FLOAT, self.use_gpu)
sample_y.scatter_(1, idx, 1.0)
if self.config.dec_use_attn:
z_embeddings = th.t(self.z_embedding.weight).split(self.config.k_size, dim=0)
attn_context = []
temp_sample_y = sample_y.view(-1, self.config.y_size, self.config.k_size)
for z_id in range(self.config.y_size):
attn_context.append(th.mm(temp_sample_y[:, z_id], z_embeddings[z_id]).unsqueeze(1))
attn_context = th.cat(attn_context, dim=1)
dec_init_state = th.sum(attn_context, dim=1).unsqueeze(0)
else:
attn_context = None
dec_init_state = self.z_embedding(sample_y.view(1, -1, self.config.y_size * self.config.k_size))
return dec_init_state, attn_context, logprob_z
def forward(self, data_feed, mode, clf=False, gen_type='greedy', use_py=None, return_latent=False):
ctx_lens = data_feed['context_lens'] # (batch_size, )
ctx_utts = self.np2var(data_feed['contexts'], LONG) # (batch_size, max_ctx_len, max_utt_len)
out_utts = self.np2var(data_feed['outputs'], LONG) # (batch_size, max_out_len)
movies = self.np2var(data_feed['movies'], LONG) # (batch_size, movie_len)
batch_size = len(ctx_lens)
# encode movie info
# movies_h = self.movie_encoder(movies) # (batch_size, movie_nhid)
movies_h = None
enc_inputs, _, _ = self.utt_encoder(ctx_utts, movies=movies_h)
# (batch_size, max_ctx_len, num_directions*utt_cell_size)
# enc_outs: (batch_size, max_ctx_len, ctx_cell_size)
# enc_last: tuple, (h_n, c_n)
# h_n: (num_layers*num_directions, batch_size, ctx_cell_size)
# c_n: (num_layers*num_directions, batch_size, ctx_cell_size)
enc_outs, enc_last = self.ctx_encoder(enc_inputs, input_lengths=ctx_lens, movies=None)
# get decoder inputs
dec_inputs = out_utts[:, :-1]
labels = out_utts[:, 1:].contiguous()
# create decoder initial states
if self.simple_posterior:
logits_qy, log_qy = self.c2z(enc_last)
sample_y = self.gumbel_connector(logits_qy)
log_py = self.log_uniform_y
else:
logits_py, log_py = self.c2z(enc_last)
# encode movie and use posterior to find q(z|x, c)
x_h, _, _ = self.utt_encoder(out_utts.unsqueeze(1), movies=movies_h)
logits_qy, log_qy = self.xc2z(th.cat([enc_last, x_h.squeeze(1).unsqueeze(0)], dim=2))
# use prior at inference time, otherwise use posterior
if mode == GEN or use_py:
sample_y = self.gumbel_connector(logits_py)
else:
sample_y = self.gumbel_connector(logits_qy)
# pack attention context
if self.config.dec_use_attn:
z_embeddings = th.t(self.z_embedding.weight).split(self.config.k_size, dim=0)
attn_context = []
temp_sample_y = sample_y.view(-1, self.config.y_size, self.config.k_size)
for z_id in range(self.config.y_size):
attn_context.append(th.mm(temp_sample_y[:, z_id], z_embeddings[z_id]).unsqueeze(1))
attn_context = th.cat(attn_context, dim=1)
dec_init_state = th.sum(attn_context, dim=1).unsqueeze(0)
else:
attn_context = None
dec_init_state = self.z_embedding(sample_y.view(1, -1, self.config.y_size * self.config.k_size))
# decode
dec_outputs, dec_hidden_state, ret_dict = self.decoder(batch_size=batch_size,
dec_inputs=dec_inputs,
# (batch_size, movie_size-1)
dec_init_state=dec_init_state, # tuple: (h, c)
attn_context=attn_context,
# (batch_size, max_ctx_len, ctx_cell_size)
mode=mode,
gen_type=gen_type,
beam_size=self.config.beam_size,
movie_hid=movies_h) # (batch_size, movie_nhid)
if mode == GEN:
return ret_dict, labels
else:
# regularization qy to be uniform
avg_log_qy = th.exp(log_qy.view(-1, self.config.y_size, self.config.k_size))
avg_log_qy = th.log(th.mean(avg_log_qy, dim=0) + 1e-15)
mi = self.entropy_loss(avg_log_qy, unit_average=True) - self.entropy_loss(log_qy, unit_average=True)
pi_kl = self.cat_kl_loss(log_qy, log_py, batch_size, unit_average=True)
pi_h = self.entropy_loss(log_qy, unit_average=True)
results = Pack(nll=self.nll(dec_outputs, labels), mi=mi, pi_kl=pi_kl, pi_h=pi_h)
if return_latent:
results['latent_action'] = dec_init_state
return results
|
py | 7dff8d4aa28cefaf6d5dd4576dfa185664b93c01 | """
file contains implementation of several neural network models
"""
import tensorflow as tf
from keras.models import Model
from keras.layers import Input, Conv2D, Flatten, Dense, Concatenate, Lambda, Subtract, Add
from keras import backend as K
from keras import optimizers, losses
from keras.utils import plot_model
from tools import get_name
def err_print(*args, **kwargs):
"""
method for printing to stderr
"""
print(*args, file=sys.stderr, **kwargs)
def visualize_model(model, plot_mdl=[True, False]):
"""
method prints model to stdout and pdf
"""
if plot_mdl[0]:
model.summary()
if plot_mdl[1]:
name = get_name("model")
plot_model(model, to_file=name, show_shapes=True, show_layer_names=False)
def mse_mae(y_true, y_pred):
"""
loss function, which combines MSE and MAE
"""
error = y_true - y_pred
cond = K.abs(error) < 1.0
MSE = K.pow(error, 2)
MAE = K.abs(error)
loss = tf.where(cond, MSE, MAE)
return K.mean(loss)
class Network:
"""
class implements several neural network models
"""
def __init__(self, state_size, action_size, learning_rate, loss, plot_model=[True, False]):
self.state_size = state_size
self.action_size = action_size
self.learning_rate = learning_rate
if loss == "MSE":
self.loss = losses.mean_squared_error
elif loss == "MSE_MAE":
self.loss = mse_mae
elif loss == "HUBER":
self.loss = tf.losses.huber_loss
else:
err_print("Model file doesn't exist.")
sys.exit(-1)
self.plot_model = plot_model
def make_2layer_mdl(self, units):
"""
method returns 2 layer neural network model
"""
network_input = Input(shape=(self.state_size,))
net = Dense(units=units[0], activation="relu", kernel_initializer="he_uniform")(network_input)
net = Dense(units=units[1], activation="relu", kernel_initializer="he_uniform")(net)
net = Dense(units=self.action_size, activation="linear", kernel_initializer="he_uniform")(net)
model = Model(inputs=network_input, outputs=net)
visualize_model(model, self.plot_model)
model.compile(loss=self.loss, optimizer=optimizers.Adam(lr=self.learning_rate), metrics=['accuracy'])
return model
def make_4layer_mdl(self, units):
"""
method returns 2 layer neural network model
"""
network_input = Input(shape=(self.state_size,))
net = Dense(units=units[0], activation="relu", kernel_initializer="he_uniform")(network_input)
net = Dense(units=units[1], activation="relu", kernel_initializer="he_uniform")(net)
net = Dense(units=units[2], activation="relu", kernel_initializer="he_uniform")(net)
net = Dense(units=units[3], activation="relu", kernel_initializer="he_uniform")(net)
net = Dense(units=self.action_size, activation="linear", kernel_initializer="he_uniform")(net)
model = Model(inputs=network_input, outputs=net)
visualize_model(model, self.plot_model)
model.compile(loss=self.loss, optimizer=optimizers.Adam(lr=self.learning_rate), metrics=['accuracy'])
return model
def make_2048_experm_mdl(self, units):
"""
method returns complicated neural network model for playing 2048
"""
collumn1 = Input(shape=(4,))
d_collumn1 = Dense(units=6, activation="relu", kernel_initializer="he_uniform")(collumn1)
d_collumn1 = Dense(units=2, activation="relu", kernel_initializer="he_uniform")(d_collumn1)
collumn2 = Input(shape=(4,))
d_collumn2 = Dense(units=6, activation="relu", kernel_initializer="he_uniform")(collumn2)
d_collumn2 = Dense(units=2, activation="relu", kernel_initializer="he_uniform")(d_collumn2)
collumn3 = Input(shape=(4,))
d_collumn3 = Dense(units=6, activation="relu", kernel_initializer="he_uniform")(collumn3)
d_collumn3 = Dense(units=2, activation="relu", kernel_initializer="he_uniform")(d_collumn3)
collumn4 = Input(shape=(4,))
d_collumn4 = Dense(units=6, activation="relu", kernel_initializer="he_uniform")(collumn4)
d_collumn4 = Dense(units=2, activation="relu", kernel_initializer="he_uniform")(d_collumn4)
row1 = Input(shape=(4,))
d_row1 = Dense(units=6, activation="relu", kernel_initializer="he_uniform")(row1)
d_row1 = Dense(units=2, activation="relu", kernel_initializer="he_uniform")(d_row1)
row2 = Input(shape=(4,))
d_row2 = Dense(units=6, activation="relu", kernel_initializer="he_uniform")(row2)
d_row2 = Dense(units=2, activation="relu", kernel_initializer="he_uniform")(d_row2)
row3 = Input(shape=(4,))
d_row3 = Dense(units=6, activation="relu", kernel_initializer="he_uniform")(row3)
d_row3 = Dense(units=2, activation="relu", kernel_initializer="he_uniform")(d_row3)
row4 = Input(shape=(4,))
d_row4 = Dense(units=6, activation="relu", kernel_initializer="he_uniform")(row4)
d_row4 = Dense(units=2, activation="relu", kernel_initializer="he_uniform")(d_row4)
c_merge = Concatenate(axis=-1)([d_collumn1, d_collumn2, d_collumn3, d_collumn4])
r_merge = Concatenate(axis=-1)([d_row1, d_row2, d_row3, d_row4])
merge = Concatenate(axis=-1)([c_merge, r_merge])
net = Dense(units=units[0], activation="relu", kernel_initializer="he_uniform")(merge)
net = Dense(units=units[1], activation="relu", kernel_initializer="he_uniform")(net)
net = Dense(units=self.action_size, activation="linear", kernel_initializer="he_uniform")(net)
model = Model(inputs=[collumn1, collumn2, collumn3, collumn4, row1, row2, row3, row4], outputs=net)
visualize_model(model, self.plot_model)
model.compile(loss=self.loss, optimizer=optimizers.Adam(lr=self.learning_rate), metrics=['accuracy'])
return model
def make_2layer_duel_mdl(self, units):
"""
method returns 2 layer dueling neural network model
"""
network_input = Input(shape=(self.state_size,))
net = Dense(units=units[0], activation="relu", kernel_initializer="he_uniform")(network_input)
net = Dense(units=units[1], activation="relu", kernel_initializer="he_uniform")(net)
state_value = Dense(units=1, activation="linear", kernel_initializer="he_uniform")(net)
value_function = Concatenate(axis=-1)([state_value, state_value])
action_values = Dense(units=self.action_size, activation="linear", kernel_initializer="he_uniform")(net)
avg_action = Lambda(lambda x: K.mean(x, axis=1, keepdims=True))(action_values)
concat_avg_action = Concatenate(axis=-1)([avg_action, avg_action])
for _ in range(self.action_size-2):
value_function = Concatenate(axis=-1)([value_function, state_value])
concat_avg_action = Concatenate(axis=-1)([concat_avg_action, avg_action])
advantage_function = Subtract()([action_values, concat_avg_action])
net = Add()([value_function, advantage_function])
model = Model(inputs=network_input, outputs=net)
visualize_model(model, self.plot_model)
model.compile(loss=self.loss, optimizer=optimizers.Adam(lr=self.learning_rate), metrics=["accuracy"])
return model
def make_bsc_img_mdl(self):
"""
method returns DeepMind's neural network model
"""
network_input = Input(shape=(self.state_size))
net = Conv2D(filters=32, kernel_size=(8, 8), strides=(4, 4), activation="relu",
kernel_initializer="he_uniform", data_format="channels_first")(network_input)
net = Conv2D(filters=64, kernel_size=(4, 4), strides=(2, 2), activation="relu",
kernel_initializer="he_uniform")(net)
net = Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), activation="relu",
kernel_initializer="he_uniform")(net)
net = Flatten()(net)
net = Dense(units=512, activation="relu", kernel_initializer="he_uniform")(net)
net = Dense(units=self.action_size, activation="linear", kernel_initializer="he_uniform")(net)
model = Model(inputs=network_input, outputs=net)
visualize_model(model, self.plot_model)
model.compile(loss=self.loss, optimizer=optimizers.Adam(lr=self.learning_rate), metrics=['accuracy'])
return model
def make_duel_img_mdl(self):
"""
method returns DeepMind's dueling neural network model
"""
network_input = Input(shape=(self.state_size))
net = Conv2D(filters=32, kernel_size=(8, 8), strides=(4, 4), activation="relu",
kernel_initializer="he_uniform", data_format="channels_first")(network_input)
net = Conv2D(filters=64, kernel_size=(4, 4), strides=(2, 2), activation="relu",
kernel_initializer="he_uniform")(net)
net = Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), activation="relu",
kernel_initializer="he_uniform")(net)
net = Flatten()(net)
net = Dense(units=512, activation="relu", kernel_initializer="he_uniform")(net)
state_value = Dense(units=1, activation="linear", kernel_initializer="he_uniform")(net)
value_function = Concatenate(axis=-1)([state_value, state_value])
action_values = Dense(units=self.action_size, activation="linear", kernel_initializer="he_uniform")(net)
avg_action = Lambda(lambda x: K.mean(x, axis=1, keepdims=True))(action_values)
concat_avg_action = Concatenate(axis=-1)([avg_action, avg_action])
for _ in range(action_size-2):
value_function = Concatenate(axis=-1)([value_function, state_value])
concat_avg_action = Concatenate(axis=-1)([concat_avg_action, avg_action])
advantage_function = Subtract()([action_values, concat_avg_action])
net = Add()([value_function, advantage_function])
model = Model(inputs=network_input, outputs=net)
visualize_model(model, self.plot_model)
model.compile(loss=self.loss, optimizer=optimizers.Adam(lr=self.learning_rate), metrics=['accuracy'])
return model
def make_1layer_mdl(self, units):
"""
method returns 1 layer neural network model
"""
network_input = Input(shape=(self.state_size,))
net = Dense(units=units[0], activation="relu", kernel_initializer="he_uniform")(network_input)
net = Dense(units=self.action_size, activation="linear", kernel_initializer="he_uniform")(net)
model = Model(inputs=network_input, outputs=net)
visualize_model(model, self.plot_model)
model.compile(loss=self.loss, optimizer=optimizers.Adam(lr=self.learning_rate), metrics=['accuracy'])
return model
"""
def split_2048(vector):
method splits 2048 gameboard into several vectors
tensor = []
for x in range(8):
tensor.append(np.zeros((1,4)))
for i in range(4):
for e in range(4):
tensor[i][0][e] = vector[i*4+e]
for i in range(4):
for e in range(4):
tensor[e+4][0][i] = vector[i*4+e]
return tensor
"""
if __name__ == "__main__":
pass
|
py | 7dff8da6f162bae9c774dd3378fe263f9a544862 | #
# Copyright (c) 2017-2018 LabKey Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import unittest.mock as mock
from labkey.security import (
create_user,
reset_password,
activate_users,
deactivate_users,
delete_users,
add_to_group,
remove_from_group,
remove_from_role,
add_to_role,
get_roles,
list_groups,
)
from labkey.exceptions import (
RequestError,
QueryNotFoundError,
ServerNotFoundError,
RequestAuthorizationError,
)
from .utilities import MockLabKey, mock_server_context, success_test, throws_error_test
class MockSecurityController(MockLabKey):
default_action = "security"
default_success_body = {"success": True}
use_ssl = False
class MockUserController(MockLabKey):
default_action = "user"
default_success_body = {"success": True, "status_code": 200}
use_ssl = False
class TestCreateUser(unittest.TestCase):
__email = "[email protected]"
class MockCreateUser(MockSecurityController):
api = "createNewUser.api"
def setUp(self):
self.service = self.MockCreateUser()
self.expected_kwargs = {
"expected_args": [self.service.get_server_url()],
"data": {"email": TestCreateUser.__email, "sendEmail": False},
"headers": None,
"timeout": 300,
}
self.args = [mock_server_context(self.service), self.__email]
def test_success(self):
test = self
success_test(
test,
self.service.get_successful_response(),
create_user,
True,
*self.args,
**self.expected_kwargs
)
def test_unauthorized(self):
test = self
throws_error_test(
test,
RequestAuthorizationError,
self.service.get_unauthorized_response(),
create_user,
*self.args,
**self.expected_kwargs
)
def test_query_not_found(self):
test = self
throws_error_test(
test,
QueryNotFoundError,
self.service.get_query_not_found_response(),
create_user,
*self.args,
**self.expected_kwargs
)
def test_server_not_found(self):
test = self
throws_error_test(
test,
ServerNotFoundError,
self.service.get_server_not_found_response(),
create_user,
*self.args,
**self.expected_kwargs
)
def test_general_error(self):
test = self
throws_error_test(
test,
RequestError,
self.service.get_general_error_response(),
create_user,
*self.args,
**self.expected_kwargs
)
class TestResetPassword(unittest.TestCase):
__email = "[email protected]"
class MockResetPassword(MockSecurityController):
api = "adminRotatePassword.api"
def setUp(self):
self.service = self.MockResetPassword()
self.expected_kwargs = {
"expected_args": [self.service.get_server_url()],
"data": {"email": TestResetPassword.__email},
"headers": None,
"timeout": 300,
}
self.args = [mock_server_context(self.service), self.__email]
def test_success(self):
test = self
success_test(
test,
self.service.get_successful_response(),
reset_password,
True,
*self.args,
**self.expected_kwargs
)
def test_unauthorized(self):
test = self
throws_error_test(
test,
RequestAuthorizationError,
self.service.get_unauthorized_response(),
reset_password,
*self.args,
**self.expected_kwargs
)
def test_query_not_found(self):
test = self
throws_error_test(
test,
QueryNotFoundError,
self.service.get_query_not_found_response(),
reset_password,
*self.args,
**self.expected_kwargs
)
def test_server_not_found(self):
test = self
throws_error_test(
test,
ServerNotFoundError,
self.service.get_server_not_found_response(),
reset_password,
*self.args,
**self.expected_kwargs
)
def test_general_error(self):
test = self
throws_error_test(
test,
RequestError,
self.service.get_general_error_response(),
reset_password,
*self.args,
**self.expected_kwargs
)
class TestActivateUsers(unittest.TestCase):
__user_id = [123]
class MockActivateUser(MockUserController):
api = "activateUsers.api"
def setUp(self):
self.service = self.MockActivateUser()
self.expected_kwargs = {
"expected_args": [self.service.get_server_url()],
"data": {"userId": [123]},
"headers": None,
"timeout": 300,
}
self.args = [mock_server_context(self.service), self.__user_id]
def test_success(self):
test = self
success_test(
test,
self.service.get_successful_response(),
activate_users,
True,
*self.args,
**self.expected_kwargs
)
def test_unauthorized(self):
test = self
throws_error_test(
test,
RequestAuthorizationError,
self.service.get_unauthorized_response(),
activate_users,
*self.args,
**self.expected_kwargs
)
def test_query_not_found(self):
test = self
throws_error_test(
test,
QueryNotFoundError,
self.service.get_query_not_found_response(),
activate_users,
*self.args,
**self.expected_kwargs
)
def test_server_not_found(self):
test = self
throws_error_test(
test,
ServerNotFoundError,
self.service.get_server_not_found_response(),
activate_users,
*self.args,
**self.expected_kwargs
)
def test_general_error(self):
test = self
throws_error_test(
test,
RequestError,
self.service.get_general_error_response(),
activate_users,
*self.args,
**self.expected_kwargs
)
class TestDeactivateUsers(unittest.TestCase):
__user_id = [123]
class MockDeactivateUser(MockUserController):
api = "deactivateUsers.view"
def setUp(self):
self.service = self.MockDeactivateUser()
self.expected_kwargs = {
"expected_args": [self.service.get_server_url()],
"data": {"userId": [123]},
"headers": None,
"timeout": 300,
}
self.args = [mock_server_context(self.service), self.__user_id]
def test_success(self):
test = self
success_test(
test,
self.service.get_successful_response(),
deactivate_users,
False,
*self.args,
**self.expected_kwargs
)
def test_unauthorized(self):
test = self
throws_error_test(
test,
RequestAuthorizationError,
self.service.get_unauthorized_response(),
deactivate_users,
*self.args,
**self.expected_kwargs
)
def test_query_not_found(self):
test = self
throws_error_test(
test,
QueryNotFoundError,
self.service.get_query_not_found_response(),
deactivate_users,
*self.args,
**self.expected_kwargs
)
def test_server_not_found(self):
test = self
throws_error_test(
test,
ServerNotFoundError,
self.service.get_server_not_found_response(),
deactivate_users,
*self.args,
**self.expected_kwargs
)
def test_general_error(self):
test = self
throws_error_test(
test,
RequestError,
self.service.get_general_error_response(),
deactivate_users,
*self.args,
**self.expected_kwargs
)
class TestDeleteUsers(unittest.TestCase):
__user_id = [123]
class MockDeleteUser(MockUserController):
api = "deleteUsers.view"
def setUp(self):
self.service = self.MockDeleteUser()
self.expected_kwargs = {
"expected_args": [self.service.get_server_url()],
"data": {"userId": [123]},
"headers": None,
"timeout": 300,
}
self.args = [mock_server_context(self.service), self.__user_id]
def test_success(self):
test = self
success_test(
test,
self.service.get_successful_response(),
delete_users,
False,
*self.args,
**self.expected_kwargs
)
def test_unauthorized(self):
test = self
throws_error_test(
test,
RequestAuthorizationError,
self.service.get_unauthorized_response(),
delete_users,
*self.args,
**self.expected_kwargs
)
def test_query_not_found(self):
test = self
throws_error_test(
test,
QueryNotFoundError,
self.service.get_query_not_found_response(),
delete_users,
*self.args,
**self.expected_kwargs
)
def test_server_not_found(self):
test = self
throws_error_test(
test,
ServerNotFoundError,
self.service.get_server_not_found_response(),
delete_users,
*self.args,
**self.expected_kwargs
)
def test_general_error(self):
test = self
throws_error_test(
test,
RequestError,
self.service.get_general_error_response(),
delete_users,
*self.args,
**self.expected_kwargs
)
class TestAddToGroup(unittest.TestCase):
__user_id = 321
__group_id = 123
class MockAddGroupMember(MockSecurityController):
api = "addGroupMember.api"
def setUp(self):
self.service = self.MockAddGroupMember()
self.expected_kwargs = {
"expected_args": [self.service.get_server_url()],
"data": {"groupId": 123, "principalIds": [321]},
"headers": None,
"timeout": 300,
}
self.args = [mock_server_context(self.service), self.__user_id, self.__group_id]
def test_success(self):
test = self
success_test(
test,
self.service.get_successful_response(),
add_to_group,
False,
*self.args,
**self.expected_kwargs
)
def test_unauthorized(self):
test = self
throws_error_test(
test,
RequestAuthorizationError,
self.service.get_unauthorized_response(),
add_to_group,
*self.args,
**self.expected_kwargs
)
def test_query_not_found(self):
test = self
throws_error_test(
test,
QueryNotFoundError,
self.service.get_query_not_found_response(),
add_to_group,
*self.args,
**self.expected_kwargs
)
def test_server_not_found(self):
test = self
throws_error_test(
test,
ServerNotFoundError,
self.service.get_server_not_found_response(),
add_to_group,
*self.args,
**self.expected_kwargs
)
def test_general_error(self):
test = self
throws_error_test(
test,
RequestError,
self.service.get_general_error_response(),
add_to_group,
*self.args,
**self.expected_kwargs
)
class TestRemoveFromGroup(unittest.TestCase):
__user_id = 321
__group_id = 123
class MockRemoveGroupMember(MockSecurityController):
api = "removeGroupMember.api"
def setUp(self):
self.service = self.MockRemoveGroupMember()
self.expected_kwargs = {
"expected_args": [self.service.get_server_url()],
"data": {"groupId": 123, "principalIds": [321]},
"headers": None,
"timeout": 300,
}
self.args = [mock_server_context(self.service), self.__user_id, self.__group_id]
def test_success(self):
test = self
success_test(
test,
self.service.get_successful_response(),
remove_from_group,
False,
*self.args,
**self.expected_kwargs
)
def test_unauthorized(self):
test = self
throws_error_test(
test,
RequestAuthorizationError,
self.service.get_unauthorized_response(),
remove_from_group,
*self.args,
**self.expected_kwargs
)
def test_query_not_found(self):
test = self
throws_error_test(
test,
QueryNotFoundError,
self.service.get_query_not_found_response(),
remove_from_group,
*self.args,
**self.expected_kwargs
)
def test_server_not_found(self):
test = self
throws_error_test(
test,
ServerNotFoundError,
self.service.get_server_not_found_response(),
remove_from_group,
*self.args,
**self.expected_kwargs
)
def test_general_error(self):
test = self
throws_error_test(
test,
RequestError,
self.service.get_general_error_response(),
remove_from_group,
*self.args,
**self.expected_kwargs
)
class TestRemoveFromRole(unittest.TestCase):
__user_id = 321
__email = "[email protected]"
__role = {"uniqueName": "TestRole"}
class MockRemoveRole(MockSecurityController):
api = "removeAssignment.api"
def setUp(self):
self.service = self.MockRemoveRole()
self.expected_kwargs = {
"expected_args": [self.service.get_server_url()],
"data": {
"roleClassName": "TestRole",
"principalId": 321,
"email": "[email protected]",
},
"headers": None,
"timeout": 300,
}
self.args = [
mock_server_context(self.service),
self.__role,
self.__user_id,
self.__email,
]
def test_success(self):
test = self
success_test(
test,
self.service.get_successful_response(),
remove_from_role,
False,
*self.args,
**self.expected_kwargs
)
def test_unauthorized(self):
test = self
throws_error_test(
test,
RequestAuthorizationError,
self.service.get_unauthorized_response(),
remove_from_role,
*self.args,
**self.expected_kwargs
)
def test_query_not_found(self):
test = self
throws_error_test(
test,
QueryNotFoundError,
self.service.get_query_not_found_response(),
remove_from_role,
*self.args,
**self.expected_kwargs
)
def test_server_not_found(self):
test = self
throws_error_test(
test,
ServerNotFoundError,
self.service.get_server_not_found_response(),
remove_from_role,
*self.args,
**self.expected_kwargs
)
def test_general_error(self):
test = self
throws_error_test(
test,
RequestError,
self.service.get_general_error_response(),
remove_from_role,
*self.args,
**self.expected_kwargs
)
class TestAddToRole(unittest.TestCase):
__user_id = 321
__email = "[email protected]"
__role = {"uniqueName": "TestRole"}
class MockAddRole(MockSecurityController):
api = "addAssignment.api"
def setUp(self):
self.service = self.MockAddRole()
self.expected_kwargs = {
"expected_args": [self.service.get_server_url()],
"data": {
"roleClassName": "TestRole",
"principalId": 321,
"email": "[email protected]",
},
"headers": None,
"timeout": 300,
}
self.args = [
mock_server_context(self.service),
self.__role,
self.__user_id,
self.__email,
]
def test_success(self):
test = self
success_test(
test,
self.service.get_successful_response(),
add_to_role,
False,
*self.args,
**self.expected_kwargs
)
def test_unauthorized(self):
test = self
throws_error_test(
test,
RequestAuthorizationError,
self.service.get_unauthorized_response(),
add_to_role,
*self.args,
**self.expected_kwargs
)
def test_query_not_found(self):
test = self
throws_error_test(
test,
QueryNotFoundError,
self.service.get_query_not_found_response(),
add_to_role,
*self.args,
**self.expected_kwargs
)
def test_server_not_found(self):
test = self
throws_error_test(
test,
ServerNotFoundError,
self.service.get_server_not_found_response(),
add_to_role,
*self.args,
**self.expected_kwargs
)
def test_general_error(self):
test = self
throws_error_test(
test,
RequestError,
self.service.get_general_error_response(),
add_to_role,
*self.args,
**self.expected_kwargs
)
class TestGetRoles(unittest.TestCase):
class MockGetRoles(MockSecurityController):
api = "getRoles.api"
def setUp(self):
self.service = self.MockGetRoles()
self.expected_kwargs = {
"expected_args": [self.service.get_server_url()],
"data": None,
"headers": None,
"timeout": 300,
}
self.args = [mock_server_context(self.service)]
def test_success(self):
test = self
success_test(
test,
self.service.get_successful_response(),
get_roles,
False,
*self.args,
**self.expected_kwargs
)
def test_unauthorized(self):
test = self
throws_error_test(
test,
RequestAuthorizationError,
self.service.get_unauthorized_response(),
get_roles,
*self.args,
**self.expected_kwargs
)
def test_query_not_found(self):
test = self
throws_error_test(
test,
QueryNotFoundError,
self.service.get_query_not_found_response(),
get_roles,
*self.args,
**self.expected_kwargs
)
def test_server_not_found(self):
test = self
throws_error_test(
test,
ServerNotFoundError,
self.service.get_server_not_found_response(),
get_roles,
*self.args,
**self.expected_kwargs
)
def test_general_error(self):
test = self
throws_error_test(
test,
RequestError,
self.service.get_general_error_response(),
get_roles,
*self.args,
**self.expected_kwargs
)
class TestListGroups(unittest.TestCase):
class MockListGroups(MockSecurityController):
api = "listProjectGroups.api"
def setUp(self):
self.service = self.MockListGroups()
self.expected_kwargs = {
"expected_args": [self.service.get_server_url()],
"data": {"includeSiteGroups": True},
"headers": None,
"timeout": 300,
}
self.args = [mock_server_context(self.service), True]
def test_success(self):
test = self
success_test(
test,
self.service.get_successful_response(),
list_groups,
False,
*self.args,
**self.expected_kwargs
)
def test_unauthorized(self):
test = self
throws_error_test(
test,
RequestAuthorizationError,
self.service.get_unauthorized_response(),
list_groups,
*self.args,
**self.expected_kwargs
)
def test_query_not_found(self):
test = self
throws_error_test(
test,
QueryNotFoundError,
self.service.get_query_not_found_response(),
list_groups,
*self.args,
**self.expected_kwargs
)
def test_server_not_found(self):
test = self
throws_error_test(
test,
ServerNotFoundError,
self.service.get_server_not_found_response(),
list_groups,
*self.args,
**self.expected_kwargs
)
def test_general_error(self):
test = self
throws_error_test(
test,
RequestError,
self.service.get_general_error_response(),
list_groups,
*self.args,
**self.expected_kwargs
)
def suite():
load_tests = unittest.TestLoader().loadTestsFromTestCase
return unittest.TestSuite(
[
load_tests(TestCreateUser),
load_tests(TestActivateUsers),
load_tests(TestDeactivateUsers),
load_tests(TestDeleteUsers),
load_tests(TestRemoveFromGroup),
load_tests(TestAddToGroup),
load_tests(TestRemoveFromRole),
]
)
if __name__ == "__main__":
unittest.main()
|
py | 7dff8f5cf5f12a0ad3f58900f53e5da2d8cda50f | from functools import partial
import torch
from torch import nn
from deepqmc.torchext import SSP, get_log_dnn
from .distbasis import DistanceBasis
from .schnet import ElectronicSchNet, SubnetFactory
__version__ = '0.3.0'
__all__ = ['OmniSchNet']
class Jastrow(nn.Module):
r"""Jastrow network :math:`\eta_{\boldsymbol \theta}`.
The Jastrow factor consists of a vanilla neural network with logarithmically
progressing layer widths that maps the electronic embeddings to the final
Jastrow factor,
.. math::
J :=
\begin{cases}
\eta_{\boldsymbol \theta}\big(\textstyle\sum_i \mathbf x_i^
{(L)}\big) & \text{if }\texttt{sum\_first}\\
\textstyle\sum_i\eta_{\boldsymbol \theta}\big(\mathbf x_i^
{(L)}\big) & \text{otherwise}
\end{cases}
Args:
embedding_dim (int): :math:`\dim(\mathbf x_i^{(L)})`,
dimension of electronic embedding input
activation_factory (callable): creates activation functions between
layers
n_layers (int): number of neural network layers
sum_first (bool): whether embeddings are summed before passed to the
network
Shape:
- Input, :math:`\mathbf x_i^{(L)}`: :math:`(*,N,D)`
- Output, :math:`J`: :math:`(*)`
Attributes:
net: :class:`torch.nn.Sequential` representing vanilla neural network
"""
def __init__(
self, embedding_dim, activation_factory=SSP, *, n_layers=3, sum_first=True
):
super().__init__()
self.net = get_log_dnn(embedding_dim, 1, activation_factory, n_layers=n_layers)
self.sum_first = sum_first
def forward(self, xs):
if self.sum_first:
xs = self.net(xs.sum(dim=-2))
else:
xs = self.net(xs).sum(dim=-2)
return xs.squeeze(dim=-1)
class Backflow(nn.Module):
r"""Represents backflow networks :math:`\boldsymbol\kappa_{\boldsymbol\theta,q}`.
The backflow transformation consists of :math:`N_\text{bf}` vanilla neural
networks with logarithmically progressing layer width maping the electronic
embeddings to the backflow transformations,
.. math::
\mathbf f_i := \mathbf \kappa_{\boldsymbol \theta}\big(\mathbf x_i^{(L,
\text{mf/bf})}\big)
Args:
embedding_dim (int): :math:`\dim(\mathbf x_i^{(L)})`,
dimension of electronic embedding input
n_orbitals (int): :math:`N_\text{orb}` number of orbitals
n_backflows (int): :math:`N_\text{bf}` number of backflows
activation_factory (callable): creates activation functions between
layers
n_layers (int): number of neural network layers
Shape:
- Input, :math:`\mathbf x_i^{(L)}`: :math:`(*,N,D)`
- Output, :math:`f_{q\mu i}`: :math:`(*,N_\text{bf},N,N_\text{orb})`
Attributes:
nets: :class:`torch.nn.ModuleList` containing :math:`N_text{bf}`
vanilla neural networks
"""
def __init__(
self,
embedding_dim,
n_orbitals,
n_backflows,
activation_factory=SSP,
*,
n_layers=3,
):
super().__init__()
nets = [
get_log_dnn(
embedding_dim,
n_orbitals,
activation_factory,
n_layers=n_layers,
last_bias=True,
)
for _ in range(n_backflows)
]
self.nets = nn.ModuleList(nets)
def forward(self, xs):
return torch.stack([net(xs) for net in self.nets], dim=1)
class SchNetMeanFieldLayer(nn.Module):
def __init__(self, factory, n_up):
super().__init__()
self.w = factory.w_subnet()
self.g = factory.g_subnet()
def forward(self, x, Y, edges_elec, edges_nuc):
z_nuc = (self.w(edges_nuc) * Y[..., None, :, :]).sum(dim=-2)
return self.g(z_nuc)
class MeanFieldElectronicSchNet(ElectronicSchNet):
r"""Mean-field variant of :class:`ElectronicSchNet`.
This mean-field variant of the graph neural nework :class:`ElectronicSchNet`
uses :class:`SchNetMeanFieldLayer` as default, removing electronic
interactions and returning mean-field electronic embeddings. In contrast
to :class:`ElectronicSchNet` the :meth:`forward` only uses nuclear edges.
"""
LAYER_FACTORIES = {'mean-field': SchNetMeanFieldLayer}
def __init__(self, *args, **kwargs):
super().__init__(*args, version='mean-field', **kwargs)
def forward(self, edges_nuc):
*batch_dims, n_elec = edges_nuc.shape[:-2]
edges_elec_dummy = edges_nuc.new_empty(*batch_dims, n_elec, n_elec, 0)
return super().forward(edges_elec_dummy, edges_nuc)
class OmniSchNet(nn.Module):
r"""Combined Jastrow/backflow neural network based on SchNet.
This module uses an instance of :class:`ElectronicSchNet` to build a
many-body or a mean-field feature representation of electrons, which are
subsequently passed as an input into additional trainable functions to
obtain many-body or mean-field Jastrow factor and backflow transformations.
The mean-field embeddings are obtained with a variant of
:class:`ElectronicSchNet` with the electron--electron message passing omitted.
The type of embeddings used for Jastrow and backflow can be chosen
individually and equivalent embbedings are shared. The module is used to
generate the Jastrow factor and backflow transformation within
:class:`~deepqmc.wf.PauliNet`.
The Jastrow factor and backflow are obtained as
.. math::
J:=\eta_{\boldsymbol\theta}\big(\textstyle\sum_i\mathbf
x_i^{(L)}\big),\qquad
f_{q\mu i}(\mathbf r)
:=\Big(\boldsymbol\kappa_{\boldsymbol\theta,q}\big(\mathbf
x_i^{(L)}\big)\Big)_\mu
where :math:`\eta_{\boldsymbol\theta}` and
:math:`\boldsymbol\kappa_{\boldsymbol\theta,q}` are vanilla deep
neural networks and :math:`\mathbf x_i^{(L)}` are either the many-body or
mean-field embedding.
Args:
n_atoms (int): :math:`M`, number of atoms
n_up (int): :math:`N^\uparrow`, number of spin-up electrons
n_down (int): :math:`N^\downarrow`, number of spin-down electrons
n_orbitals (int): :math:`N_\text{orb}`, number of molecular orbitals
n_backflows (int): :math:`N_\text{bf}`, number of backflow channnels
dist_feat_dim (int): :math:`\dim(\mathbf e)`, number of distance features
dist_feat_cutoff (float, a.u.): distance at which distance features
go to zero
mb_embedding_dim (int): dimension of many-body SchNet embeddings
mf_embedding_dim (int): dimension of mean-field SchNet embeddings
jastrow (str): type of Jastrow -- :data:`None`, ``'mean-field'``, or
``'many-body'``
jastrow_kwargs (dict): extra arguments passed to :class:`Jastrow`
backflow (str): type of backflow -- :data:`None`, ``'mean-field'``, or
``'many-body'``
backflow_kwargs (dict): extra arguments passed to :class:`Backflow`
schnet_kwargs (dict): extra arguments passed to :class:`ElectronicSchNet`
subnet_kwargs (dict): extra arguments passed to :class:`SubnetFactory`
mf_schnet_kwargs (dict): extra arguments passed to the mean-field variant
of :class:`ElectronicSchNet`
mf_subnet_kwargs (dict): extra arguments passed to :class:`SubnetFactory`
Shape:
- Input1, :math:`\lvert\mathbf r_i-\mathbf r_j\rvert`: :math:`(*,N,N)`
- Input2, :math:`\lvert\mathbf r_i-\mathbf R_I\rvert`: :math:`(*,N,M)`
- Output1, :math:`J`: :math:`(*)`
- Output2, :math:`f_{q\mu i}`: :math:`(*,N_\text{bf},N,N_\text{orb})`
Attributes:
schnet: :class:`ElectronicSchNet` network
mf_schnet: mean-field variant of :class:`ElectronicSchNet` network
jastrow: :class:`Jastrow` network
backflow: :class:`Backflow` network
"""
def __init__(
self,
n_atoms,
n_up,
n_down,
n_orbitals,
n_backflows,
*,
dist_feat_dim=32,
dist_feat_cutoff=10.0,
mb_embedding_dim=128,
mf_embedding_dim=128,
jastrow='many-body',
jastrow_kwargs=None,
backflow='many-body',
backflow_kwargs=None,
schnet_kwargs=None,
subnet_kwargs=None,
mf_schnet_kwargs=None,
mf_subnet_kwargs=None,
):
assert jastrow in [None, 'mean-field', 'many-body']
assert backflow in [None, 'mean-field', 'many-body']
super().__init__()
self.dist_basis = DistanceBasis(
dist_feat_dim, cutoff=dist_feat_cutoff, envelope='nocusp'
)
self.schnet = (
ElectronicSchNet(
n_up,
n_down,
n_atoms,
dist_feat_dim=dist_feat_dim,
embedding_dim=mb_embedding_dim,
subnet_metafactory=partial(SubnetFactory, **(subnet_kwargs or {})),
**(schnet_kwargs or {}),
)
if 'many-body' in [jastrow, backflow]
else None
)
self.mf_schnet = (
MeanFieldElectronicSchNet(
n_up,
n_down,
n_atoms,
dist_feat_dim=dist_feat_dim,
embedding_dim=mf_embedding_dim,
subnet_metafactory=partial(SubnetFactory, **(mf_subnet_kwargs or {})),
**(mf_schnet_kwargs or {}),
)
if 'mean-field' in [jastrow, backflow]
else None
)
embedding_dim = {'mean-field': mf_embedding_dim, 'many-body': mb_embedding_dim}
self.jastrow_type = jastrow
if jastrow:
self.jastrow = Jastrow(embedding_dim[jastrow], **(jastrow_kwargs or {}))
self.backflow_type = backflow
if backflow:
self.backflow = Backflow(
embedding_dim[backflow],
n_orbitals,
n_backflows,
**(backflow_kwargs or {}),
)
def forward(self, dists_nuc, dists_elec):
edges_nuc = self.dist_basis(dists_nuc)
embeddings = {}
if self.mf_schnet:
embeddings['mean-field'] = self.mf_schnet(edges_nuc)
if self.schnet:
edges_elec = self.dist_basis(dists_elec)
embeddings['many-body'] = self.schnet(edges_elec, edges_nuc)
jastrow = (
self.jastrow(embeddings[self.jastrow_type]) if self.jastrow_type else None
)
backflow = (
self.backflow(embeddings[self.backflow_type])
if self.backflow_type
else None
)
return jastrow, backflow
|
py | 7dff8f671a20edfb756975160e85af8a92692638 | from csdl.core.standard_operation import StandardOperation
class expand(StandardOperation):
def __init__(self, *args, expand_indices=None, **kwargs):
self.nouts = 1
self.nargs = 1
super().__init__(*args, **kwargs)
self.literals['expand_indices'] = expand_indices
|
py | 7dff8f6df4d38792e611e09648795672151baa35 | from __future__ import absolute_import
import re, string, subprocess, signal
import lit.Test
from .base import FileBasedTest
def executeCommand(command, input):
p = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p.stdin.write(input)
out,err = p.communicate()
exitCode = p.wait()
# Detect Ctrl-C in subprocess.
if exitCode == -signal.SIGINT:
raise KeyboardInterrupt
# Ensure the resulting output is always of string type.
try:
out = str(out.decode('ascii'))
except:
out = str(out)
try:
err = str(err.decode('ascii'))
except:
err = str(err)
return out, err, exitCode
def readFile(path):
fd = open(path, 'r')
return fd.read()
class AliveTest(FileBasedTest):
def __init__(self):
self.regex = re.compile(r";\s*(ERROR:.*)")
self.regex_args = re.compile(r";\s*TEST-ARGS:(.*)")
def execute(self, test, litConfig):
test = test.getSourcePath()
cmd = ['python', 'alive.py']
input = readFile(test)
# add test-specific args
m = self.regex_args.search(input)
if m != None:
cmd += m.group(1).split()
out, err, exitCode = executeCommand(cmd, input)
m = self.regex.search(input)
if m == None:
if exitCode == 0 and string.find(out, 'Optimization is correct!') != -1:
return lit.Test.PASS, ''
return lit.Test.FAIL, out + err
if exitCode == 255 and string.find(out, m.group(1)) != -1:
return lit.Test.PASS, ''
return lit.Test.FAIL, out + err
|
py | 7dff92627b144eb2181af6a898b2b0e7632a5a0f | import os
from pathlib import Path
from unittest import mock
@mock.patch("click.get_app_dir", autospec=True)
def test_new_config(gad, tmp_path: Path, monkeypatch):
# TODO - is there a way to run this within the normal test framework, i.e. perhaps unloading/reloading datapane module so we can
# setup our mocks/patches first?
# patch the config file path and no_analytics
gad.return_value = str(tmp_path)
monkeypatch.chdir(tmp_path)
with mock.patch("datapane.client.analytics.posthog", autospec=True) as posthog, mock.patch(
"datapane.client.analytics._NO_ANALYTICS", False
), mock.patch("datapane.client.api.user.ping", autospect=True) as ping:
ping.return_value = "joebloggs"
from datapane.client import config as c
# check pre-invariants
assert c.config.version == 3
assert c.config.username == ""
assert not c.config.completed_action
assert posthog.identify.call_count == 0
assert posthog.capture.call_count == 0
# run login event
import datapane as dp
from datapane.client import config as c
username = dp.login(token="TOKEN")
assert username == "joebloggs"
# check config file
assert c.config.version == 3
assert c.config.username == "joebloggs"
assert c.config.completed_action
# check analytics
assert posthog.identify.call_count == 1
assert posthog.capture.call_count == 2
# load and check config file
_config = c.Config.load()
assert c.config.version == 3
assert _config.username == "joebloggs"
assert _config.completed_action
# run additional event
# depends on fe-components - only run locally
if "CI" not in os.environ:
from tests.client.local.api.test_reports import gen_report_simple
report = gen_report_simple()
report.save(path="test_out.html", name="My Wicked Report", author="Datapane Team")
assert posthog.identify.call_count == 1
assert posthog.capture.call_count == 3
|
py | 7dff94703a0637d1f9da68833717da88632d0a36 | import numpy as np
import sys
import os
import random
import tensorflow as tf
NET = 'bigger' # 'smaller'
LOSS = 'huber' # 'L2'
winit = tf.variance_scaling_initializer(scale=2) # tf.contrib.layers.xavier_initializer()
#--------------------------------------------------------------------------------------------------
class QNetwork():
def __init__(self, scope="QNet", VALID_ACTIONS=[0, 1, 2, 3]):
self.scope = scope
self.VALID_ACTIONS = VALID_ACTIONS
with tf.variable_scope(scope):
self._build_model()
def _build_model(self):
# input placeholders; input is 4 frames of shape 84x84
self.tf_X = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.uint8, name="X")
# TD
self.tf_y = tf.placeholder(shape=[None], dtype=tf.float32, name="y")
# action
self.tf_actions = tf.placeholder(shape=[None], dtype=tf.int32, name="actions")
# normalize input
X = tf.to_float(self.tf_X) / 255.0
batch_size = tf.shape(self.tf_X)[0]
#-------------
if (NET == 'bigger'):
# bigger net
# 3 conv layers
conv1 = tf.contrib.layers.conv2d(X, 32, 8, 4, padding='VALID', activation_fn=tf.nn.relu, weights_initializer=winit)
conv2 = tf.contrib.layers.conv2d(conv1, 64, 4, 2, padding='VALID', activation_fn=tf.nn.relu, weights_initializer=winit)
conv3 = tf.contrib.layers.conv2d(conv2, 64, 3, 1, padding='VALID', activation_fn=tf.nn.relu, weights_initializer=winit)
# fully connected layers
flattened = tf.contrib.layers.flatten(conv3)
fc1 = tf.contrib.layers.fully_connected(flattened, 512, activation_fn=tf.nn.relu, weights_initializer=winit)
elif (NET == 'smaller'):
# smaller net
# 2 conv layers
conv1 = tf.contrib.layers.conv2d(X, 16, 8, 4, padding='VALID', activation_fn=tf.nn.relu, weights_initializer=winit)
conv2 = tf.contrib.layers.conv2d(conv1, 32, 4, 2, padding='VALID', activation_fn=tf.nn.relu, weights_initializer=winit)
# fully connected layers
flattened = tf.contrib.layers.flatten(conv2)
fc1 = tf.contrib.layers.fully_connected(flattened, 256, activation_fn=tf.nn.relu, weights_initializer=winit)
#-------------
# Q(s,a)
self.predictions = tf.contrib.layers.fully_connected(fc1, len(self.VALID_ACTIONS), activation_fn=None, weights_initializer=winit)
action_one_hot = tf.one_hot(self.tf_actions, tf.shape(self.predictions)[1], 1.0, 0.0, name='action_one_hot')
self.action_predictions = tf.reduce_sum(self.predictions * action_one_hot, reduction_indices=1, name='act_pred')
if (LOSS == 'L2'):
# L2 loss
self.loss = tf.reduce_mean(tf.squared_difference(self.tf_y, self.action_predictions), name='loss')
elif (LOSS == 'huber'):
# Huber loss
self.loss = tf.reduce_mean(huber_loss(self.tf_y-self.action_predictions), name='loss')
# optimizer
#self.optimizer = tf.train.RMSPropOptimizer(learning_rate=0.00025, momentum=0.95, epsilon=0.01)
self.optimizer = tf.train.AdamOptimizer(learning_rate=2e-5)
self.train_op = self.optimizer.minimize(self.loss, global_step=tf.contrib.framework.get_global_step())
def predict(self, sess, s):
return sess.run(self.predictions, { self.tf_X: s})
def update(self, sess, s, a, y):
feed_dict = { self.tf_X: s, self.tf_y: y, self.tf_actions: a }
_, loss = sess.run([self.train_op, self.loss], feed_dict)
return loss
# huber loss
def huber_loss(x):
condition = tf.abs(x) < 1.0
output1 = 0.5 * tf.square(x)
output2 = tf.abs(x) - 0.5
return tf.where(condition, output1, output2)
|
py | 7dff948e44940b51cf84016b261c4fd31fd095a3 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from raspiot.libs.internals.event import Event
class AlertSmsSendEvent(Event):
"""
alert.sms.send event
"""
EVENT_NAME = u'alert.sms.send'
EVENT_SYSTEM = False
def __init__(self, bus, formatters_broker, events_broker):
"""
Constructor
Args:
bus (MessageBus): message bus instance
formatters_broker (FormattersBroker): formatters broker instance
events_broker (EventsBroker): events broker instance
"""
Event.__init__(self, bus, formatters_broker, events_broker)
def _check_params(self, params):
"""
Check event parameters
Args:
params (dict): event parameters
Return:
bool: True if params are valid, False otherwise
"""
return all(key in [u'message'] for key in params.keys())
|
py | 7dff94933f9ede25d55b185a33dbca3dcb81dc2e | #
import os
# Setup paths
BASE_CFG_PATH = os.path.abspath(os.path.dirname(__file__))
BASE_PATH = os.path.dirname(BASE_CFG_PATH)
# Configure logging
LOG_LEVEL = 'INFO'
DEBUG = APP_DEBUG
# Gather DB settings from a KMS encrypted user-data string. See documentation for more information
USE_USER_DATA = APP_USE_USER_DATA
KMS_ACCOUNT_NAME = 'APP_KMS_ACCOUNT_NAME'
USER_DATA_URL = 'APP_USER_DATA_URL'
# AWS API key. Only required if you are not using instance profiles to allow access to the AWS API's to assume roles
AWS_API_ACCESS_KEY = 'APP_AWS_API_ACCESS_KEY'
AWS_API_SECRET_KEY = 'APP_AWS_API_SECRET_KEY'
# Instance Profile ARN (only used if using static API keys)
AWS_API_INSTANCE_ROLE_ARN = 'APP_INSTANCE_ROLE_ARN'
# Flask recret key
SECRET_KEY = 'APP_SECRET_KEY'
# Database settings. if USE_USER_DATA is enabled, these will be overwritten with the
# information from the encrypted user data string, and can be left at default values
SQLALCHEMY_DATABASE_URI = 'APP_DB_URI'
SQLALCHEMY_POOL_SIZE = 50
SQLALCHEMY_MAX_OVERFLOW = 15
SQLALCHEMY_ECHO = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
# Do not sort JSON output
JSON_SORT_KEYS = False
|
py | 7dff95505e1b20d78526b9e3588ce0adb0c35f6d | import abc
import base64
import hashlib
from typing import Optional
from bech32 import bech32_encode, convertbits
from terra_sdk.core import AccAddress, AccPubKey, ValAddress, ValPubKey
from terra_sdk.core.auth import StdSignature, StdSignMsg, StdTx
BECH32_PUBKEY_DATA_PREFIX = "eb5ae98721"
__all__ = ["Key"]
def get_bech(prefix: str, payload: str) -> str:
data = convertbits(bytes.fromhex(payload), 8, 5)
if data is None:
raise ValueError(f"could not parse data: prefix {prefix}, payload {payload}")
return bech32_encode(prefix, data) # base64 -> base32
def address_from_public_key(public_key: bytes) -> bytes:
sha = hashlib.sha256()
rip = hashlib.new("ripemd160")
sha.update(public_key)
rip.update(sha.digest())
return rip.digest()
def pubkey_from_public_key(public_key: bytes) -> bytes:
arr = bytearray.fromhex(BECH32_PUBKEY_DATA_PREFIX)
arr += bytearray(public_key)
return bytes(arr)
class Key:
"""Abstract Key interface, representing an agent with transaction-signing capabilities.
Args:
public_key (Optional[bytes]): compressed public key bytes,
"""
public_key: Optional[bytes]
"""Compressed public key bytes, used to derive :data:`raw_address` and :data:`raw_pubkey`."""
raw_address: Optional[bytes]
"""Raw Bech32 words of address, used to derive associated account and validator
operator addresses.
"""
raw_pubkey: Optional[bytes]
"""Raw Bech32 words of pubkey, used to derive associated account and validator
pubkeys.
"""
def __init__(self, public_key: Optional[bytes] = None):
self.public_key = public_key
if public_key:
self.raw_address = address_from_public_key(public_key)
self.raw_pubkey = pubkey_from_public_key(public_key)
@abc.abstractmethod
def sign(self, payload: bytes) -> bytes:
"""Signs the data payload. An implementation of Key is expected to override this method.
Args:
payload (bytes): arbitrary data payload
Raises:
NotImplementedError: if not implemented
Returns:
bytes: signed payload
"""
raise NotImplementedError("an instance of Key must implement Key.sign")
@property
def acc_address(self) -> AccAddress:
"""Terra Bech32 account address. Default derivation via :data:`public_key` is provided.
Raises:
ValueError: if Key was not initialized with proper public key
Returns:
AccAddress: account address
"""
if not self.raw_address:
raise ValueError("could not compute acc_address: missing raw_address")
return AccAddress(get_bech("terra", self.raw_address.hex()))
@property
def val_address(self) -> ValAddress:
"""Terra Bech32 validator operator address. Default derivation via :data:`public_key` is provided.
Raises:
ValueError: if Key was not initialized with proper public key
Returns:
ValAddress: validator operator address
"""
if not self.raw_address:
raise ValueError("could not compute val_address: missing raw_address")
return ValAddress(get_bech("terravaloper", self.raw_address.hex()))
@property
def acc_pubkey(self) -> AccPubKey:
"""Terra Bech32 account pubkey. Default derivation via :data:`public_key` is provided.
Raises:
ValueError: if Key was not initialized with proper public key
Returns:
AccPubKey: account pubkey
"""
if not self.raw_pubkey:
raise ValueError("could not compute acc_pubkey: missing raw_pubkey")
return AccPubKey(get_bech("terrapub", self.raw_pubkey.hex()))
@property
def val_pubkey(self) -> ValPubKey:
"""Terra Bech32 validator pubkey. Default derivation via ``public_key`` is provided.
Raises:
ValueError: if Key was not initialized with proper public key
Returns:
ValPubKey: validator pubkey
"""
if not self.raw_pubkey:
raise ValueError("could not compute val_pubkey: missing raw_pubkey")
return ValPubKey(get_bech("terravaloperpub", self.raw_pubkey.hex()))
def create_signature(self, tx: StdSignMsg) -> StdSignature:
"""Signs the transaction with the signing algorithm provided by this Key implementation,
and outputs the signature. The signature is only returned, and must be manually added to
the ``signatures`` field of an :class:`StdTx`.
Args:
tx (StdSignMsg): unsigned transaction
Raises:
ValueError: if missing ``public_key``
Returns:
StdSignature: signature object
"""
if self.public_key is None:
raise ValueError(
"signature could not be created: Key instance missing public_key"
)
sig_buffer = self.sign(tx.to_json().strip().encode())
return StdSignature.from_data(
{
"signature": base64.b64encode(sig_buffer).decode(),
"pub_key": {
"type": "tendermint/PubKeySecp256k1",
"value": base64.b64encode(self.public_key).decode(),
},
}
)
def sign_tx(self, tx: StdSignMsg) -> StdTx:
"""Signs the transaction with the signing algorithm provided by this Key implementation,
and creates a ready-to-broadcast :class:`StdTx` object with the signature applied.
Args:
tx (StdSignMsg): unsigned transaction
Returns:
StdTx: ready-to-broadcast transaction object
"""
sig = self.create_signature(tx)
return StdTx(tx.msgs, tx.fee, [sig], tx.memo)
|
py | 7dff95c7e843803f33a0a5c1384b20285d93b89f | # coding=utf-8
# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 DistilBERT model
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import math
import copy
import sys
from io import open
import itertools
import numpy as np
import tensorflow as tf
from .configuration_distilbert import DistilBertConfig
from .modeling_tf_utils import TFPreTrainedModel, TFSharedEmbeddings, shape_list, get_initializer
from .file_utils import add_start_docstrings
logger = logging.getLogger(__name__)
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP = {
'distilbert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-tf_model.h5",
'distilbert-base-uncased-distilled-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-distilled-squad-tf_model.h5",
'distilbert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-multilingual-cased-tf_model.h5",
}
### UTILS AND BUILDING BLOCKS OF THE ARCHITECTURE ###
def gelu(x):
""" Gaussian Error Linear Unit.
Original Implementation of the gelu activation function in Google Bert repo when initially created.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
cdf = 0.5 * (1.0 + tf.math.erf(x / tf.math.sqrt(2.0)))
return x * cdf
def gelu_new(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
class TFEmbeddings(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super(TFEmbeddings, self).__init__(**kwargs)
self.vocab_size = config.vocab_size
self.dim = config.dim
self.initializer_range = config.initializer_range
self.word_embeddings = TFSharedEmbeddings(config.vocab_size,
config.dim,
initializer_range=config.initializer_range,
name='word_embeddings') # padding_idx=0)
self.position_embeddings = tf.keras.layers.Embedding(config.max_position_embeddings,
config.dim,
embeddings_initializer=get_initializer(config.initializer_range),
name='position_embeddings')
if config.sinusoidal_pos_embds:
raise NotImplementedError
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=1e-12, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(config.dropout)
def build(self, input_shape):
"""Build shared word embedding layer """
with tf.name_scope("word_embeddings"):
# Create and initialize weights. The random normal initializer was chosen
# arbitrarily, and works well.
self.word_embeddings = self.add_weight(
"weight",
shape=[self.vocab_size, self.dim],
initializer=get_initializer(self.initializer_range))
super(TFEmbeddings, self).build(input_shape)
def call(self, inputs, inputs_embeds=None, mode="embedding", training=False):
"""Get token embeddings of inputs.
Args:
inputs: list of three int64 tensors with shape [batch_size, length]: (input_ids, position_ids, token_type_ids)
mode: string, a valid value is one of "embedding" and "linear".
Returns:
outputs: (1) If mode == "embedding", output embedding tensor, float32 with
shape [batch_size, length, embedding_size]; (2) mode == "linear", output
linear tensor, float32 with shape [batch_size, length, vocab_size].
Raises:
ValueError: if mode is not valid.
Shared weights logic adapted from
https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
"""
if mode == "embedding":
return self._embedding(inputs, inputs_embeds=inputs_embeds, training=training)
elif mode == "linear":
return self._linear(inputs)
else:
raise ValueError("mode {} is not valid.".format(mode))
def _embedding(self, inputs, inputs_embeds=None, training=False):
"""
Parameters
----------
input_ids: tf.Tensor(bs, max_seq_length)
The token ids to embed.
Outputs
-------
embeddings: tf.Tensor(bs, max_seq_length, dim)
The embedded tokens (plus position embeddings, no token_type embeddings)
"""
if not isinstance(inputs, (tuple, list)):
input_ids = inputs
position_ids = None
else:
input_ids, position_ids = inputs
if input_ids is not None:
seq_length = shape_list(input_ids)[1]
else:
seq_length = shape_list(inputs_embeds)[1]
if position_ids is None:
position_ids = tf.range(seq_length, dtype=tf.int32)[tf.newaxis, :]
if inputs_embeds is None:
inputs_embeds = tf.gather(self.word_embeddings, input_ids)
position_embeddings = self.position_embeddings(position_ids) # (bs, max_seq_length, dim)
embeddings = inputs_embeds + position_embeddings # (bs, max_seq_length, dim)
embeddings = self.LayerNorm(embeddings) # (bs, max_seq_length, dim)
embeddings = self.dropout(embeddings, training=training) # (bs, max_seq_length, dim)
return embeddings
def _linear(self, inputs):
"""Computes logits by running inputs through a linear layer.
Args:
inputs: A float32 tensor with shape [batch_size, length, hidden_size]
Returns:
float32 tensor with shape [batch_size, length, vocab_size].
"""
batch_size = shape_list(inputs)[0]
length = shape_list(inputs)[1]
x = tf.reshape(inputs, [-1, self.dim])
logits = tf.matmul(x, self.word_embeddings, transpose_b=True)
return tf.reshape(logits, [batch_size, length, self.vocab_size])
class TFMultiHeadSelfAttention(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super(TFMultiHeadSelfAttention, self).__init__(**kwargs)
self.n_heads = config.n_heads
self.dim = config.dim
self.dropout = tf.keras.layers.Dropout(config.attention_dropout)
self.output_attentions = config.output_attentions
assert self.dim % self.n_heads == 0
self.q_lin = tf.keras.layers.Dense(config.dim,
kernel_initializer=get_initializer(config.initializer_range),
name="q_lin")
self.k_lin = tf.keras.layers.Dense(config.dim,
kernel_initializer=get_initializer(config.initializer_range),
name="k_lin")
self.v_lin = tf.keras.layers.Dense(config.dim,
kernel_initializer=get_initializer(config.initializer_range),
name="v_lin")
self.out_lin = tf.keras.layers.Dense(config.dim,
kernel_initializer=get_initializer(config.initializer_range),
name="out_lin")
self.pruned_heads = set()
def prune_heads(self, heads):
raise NotImplementedError
def call(self, inputs, training=False):
"""
Parameters
----------
query: tf.Tensor(bs, seq_length, dim)
key: tf.Tensor(bs, seq_length, dim)
value: tf.Tensor(bs, seq_length, dim)
mask: tf.Tensor(bs, seq_length)
Outputs
-------
weights: tf.Tensor(bs, n_heads, seq_length, seq_length)
Attention weights
context: tf.Tensor(bs, seq_length, dim)
Contextualized layer. Optional: only if `output_attentions=True`
"""
query, key, value, mask, head_mask = inputs
bs, q_length, dim = shape_list(query)
k_length = shape_list(key)[1]
# assert dim == self.dim, 'Dimensions do not match: %s data vs %s configured' % (dim, self.dim)
# assert key.size() == value.size()
dim_per_head = self.dim // self.n_heads
mask_reshape = [bs, 1, 1, k_length]
def shape(x):
""" separate heads """
return tf.transpose(tf.reshape(x, (bs, -1, self.n_heads, dim_per_head)), perm=(0, 2, 1, 3))
def unshape(x):
""" group heads """
return tf.reshape(tf.transpose(x, perm=(0, 2, 1, 3)), (bs, -1, self.n_heads * dim_per_head))
q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head)
k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head)
v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head)
q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head)
scores = tf.matmul(q, k, transpose_b=True) # (bs, n_heads, q_length, k_length)
mask = tf.reshape(mask, mask_reshape) # (bs, n_heads, qlen, klen)
# scores.masked_fill_(mask, -float('inf')) # (bs, n_heads, q_length, k_length)
scores = scores - 1e30 * (1.0 - mask)
weights = tf.nn.softmax(scores, axis=-1) # (bs, n_heads, qlen, klen)
weights = self.dropout(weights, training=training) # (bs, n_heads, qlen, klen)
# Mask heads if we want to
if head_mask is not None:
weights = weights * head_mask
context = tf.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)
context = unshape(context) # (bs, q_length, dim)
context = self.out_lin(context) # (bs, q_length, dim)
if self.output_attentions:
return (context, weights)
else:
return (context,)
class TFFFN(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super(TFFFN, self).__init__(**kwargs)
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.lin1 = tf.keras.layers.Dense(config.hidden_dim,
kernel_initializer=get_initializer(config.initializer_range),
name="lin1")
self.lin2 = tf.keras.layers.Dense(config.dim,
kernel_initializer=get_initializer(config.initializer_range),
name="lin2")
assert config.activation in ['relu', 'gelu'], "activation ({}) must be in ['relu', 'gelu']".format(config.activation)
self.activation = tf.keras.layers.Activation(gelu) if config.activation=='gelu' else tf.keras.activations.gelu
def call(self, input, training=False):
x = self.lin1(input)
x = self.activation(x)
x = self.lin2(x)
x = self.dropout(x, training=training)
return x
class TFTransformerBlock(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super(TFTransformerBlock, self).__init__(**kwargs)
self.n_heads = config.n_heads
self.dim = config.dim
self.hidden_dim = config.hidden_dim
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.activation = config.activation
self.output_attentions = config.output_attentions
assert config.dim % config.n_heads == 0
self.attention = TFMultiHeadSelfAttention(config, name="attention")
self.sa_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-12, name="sa_layer_norm")
self.ffn = TFFFN(config, name="ffn")
self.output_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-12, name="output_layer_norm")
def call(self, inputs, training=False): # removed: src_enc=None, src_len=None
"""
Parameters
----------
x: tf.Tensor(bs, seq_length, dim)
attn_mask: tf.Tensor(bs, seq_length)
Outputs
-------
sa_weights: tf.Tensor(bs, n_heads, seq_length, seq_length)
The attention weights
ffn_output: tf.Tensor(bs, seq_length, dim)
The output of the transformer block contextualization.
"""
x, attn_mask, head_mask = inputs
# Self-Attention
sa_output = self.attention([x, x, x, attn_mask, head_mask], training=training)
if self.output_attentions:
sa_output, sa_weights = sa_output # (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length)
else: # To handle these `output_attention` or `output_hidden_states` cases returning tuples
# assert type(sa_output) == tuple
sa_output = sa_output[0]
sa_output = self.sa_layer_norm(sa_output + x) # (bs, seq_length, dim)
# Feed Forward Network
ffn_output = self.ffn(sa_output, training=training) # (bs, seq_length, dim)
ffn_output = self.output_layer_norm(ffn_output + sa_output) # (bs, seq_length, dim)
output = (ffn_output,)
if self.output_attentions:
output = (sa_weights,) + output
return output
class TFTransformer(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super(TFTransformer, self).__init__(**kwargs)
self.n_layers = config.n_layers
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.layer = [TFTransformerBlock(config, name='layer_._{}'.format(i))
for i in range(config.n_layers)]
def call(self, inputs, training=False):
"""
Parameters
----------
x: tf.Tensor(bs, seq_length, dim)
Input sequence embedded.
attn_mask: tf.Tensor(bs, seq_length)
Attention mask on the sequence.
Outputs
-------
hidden_state: tf.Tensor(bs, seq_length, dim)
Sequence of hiddens states in the last (top) layer
all_hidden_states: Tuple[tf.Tensor(bs, seq_length, dim)]
Tuple of length n_layers with the hidden states from each layer.
Optional: only if output_hidden_states=True
all_attentions: Tuple[tf.Tensor(bs, n_heads, seq_length, seq_length)]
Tuple of length n_layers with the attention weights from each layer
Optional: only if output_attentions=True
"""
x, attn_mask, head_mask = inputs
all_hidden_states = ()
all_attentions = ()
hidden_state = x
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_state,)
layer_outputs = layer_module([hidden_state, attn_mask, head_mask[i]], training=training)
hidden_state = layer_outputs[-1]
if self.output_attentions:
assert len(layer_outputs) == 2
attentions = layer_outputs[0]
all_attentions = all_attentions + (attentions,)
else:
assert len(layer_outputs) == 1
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_state,)
outputs = (hidden_state,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # last-layer hidden state, (all hidden states), (all attentions)
class TFDistilBertMainLayer(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super(TFDistilBertMainLayer, self).__init__(**kwargs)
self.num_hidden_layers = config.num_hidden_layers
self.embeddings = TFEmbeddings(config, name="embeddings") # Embeddings
self.transformer = TFTransformer(config, name="transformer") # Encoder
def get_input_embeddings(self):
return self.embeddings
def _resize_token_embeddings(self, new_num_tokens):
raise NotImplementedError
def _prune_heads(self, heads_to_prune):
raise NotImplementedError
def call(self, inputs, attention_mask=None, head_mask=None, inputs_embeds=None, training=False):
if isinstance(inputs, (tuple, list)):
input_ids = inputs[0]
attention_mask = inputs[1] if len(inputs) > 1 else attention_mask
head_mask = inputs[2] if len(inputs) > 2 else head_mask
inputs_embeds = inputs[3] if len(inputs) > 3 else inputs_embeds
assert len(inputs) <= 4, "Too many inputs."
elif isinstance(inputs, dict):
input_ids = inputs.get('input_ids')
attention_mask = inputs.get('attention_mask', attention_mask)
head_mask = inputs.get('head_mask', head_mask)
inputs_embeds = inputs.get('inputs_embeds', inputs_embeds)
assert len(inputs) <= 4, "Too many inputs."
else:
input_ids = inputs
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = shape_list(input_ids)
elif inputs_embeds is not None:
input_shape = shape_list(inputs_embeds)[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if attention_mask is None:
attention_mask = tf.ones(input_shape) # (bs, seq_length)
attention_mask = tf.cast(attention_mask, dtype=tf.float32)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# data head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
raise NotImplementedError
else:
head_mask = [None] * self.num_hidden_layers
embedding_output = self.embeddings(input_ids, inputs_embeds=inputs_embeds) # (bs, seq_length, dim)
tfmr_output = self.transformer([embedding_output, attention_mask, head_mask], training=training)
return tfmr_output # last-layer hidden-state, (all hidden_states), (all attentions)
### INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL ###
class TFDistilBertPreTrainedModel(TFPreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = DistilBertConfig
pretrained_model_archive_map = TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "distilbert"
DISTILBERT_START_DOCSTRING = r"""
DistilBERT is a small, fast, cheap and light Transformer model
trained by distilling Bert base. It has 40% less parameters than
`bert-base-uncased`, runs 60% faster while preserving over 95% of
Bert's performances as measured on the GLUE language understanding benchmark.
Here are the differences between the interface of Bert and DistilBert:
- DistilBert doesn't have `token_type_ids`, you don't need to indicate which token belongs to which segment. Just separate your segments with the separation token `tokenizer.sep_token` (or `[SEP]`)
- DistilBert doesn't have options to select the data positions (`position_ids` data). This could be added if necessary though, just let's us know if you need this option.
For more information on DistilBERT, please refer to our
`detailed blog post`_
This model is a tf.keras.Model `tf.keras.Model`_ sub-class. Use it as a regular TF 2.0 Keras Model and
refer to the TF 2.0 documentation for all matter related to general usage and behavior.
.. _`detailed blog post`:
https://medium.com/huggingface/distilbert-8cf3380435b5
.. _`tf.keras.Model`:
https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/Model
Note on the model inputs:
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is usefull when using `tf.keras.Model.fit()` method which currently requires having all the tensors in the first argument of the model call function: `model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the data Tensors in the first positional argument :
- a single Tensor with input_ids only and nothing else: `model(inputs_ids)
- a list of varying length with one or several data Tensors IN THE ORDER given in the docstring:
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several data Tensors associaed to the data names given in the docstring:
`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`
Parameters:
config (:class:`~transformers.DistilBertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
DISTILBERT_INPUTS_DOCSTRING = r"""
Inputs:
**input_ids** ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``:
Indices of data sequence tokens in the vocabulary.
The data sequences should start with `[CLS]` and end with `[SEP]` tokens.
For now, ONLY BertTokenizer(`bert-base-uncased`) is supported and you should use this tokenizer when using DistilBERT.
**attention_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**head_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
**inputs_embeds**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length, embedding_dim)``:
Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
"""
@add_start_docstrings("The bare DistilBERT encoder/transformer outputing raw hidden-states without any specific head on top.",
DISTILBERT_START_DOCSTRING, DISTILBERT_INPUTS_DOCSTRING)
class TFDistilBertModel(TFDistilBertPreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``tf.Tensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the output of the last layer of the model.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``tf.Tensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import DistilBertTokenizer, TFDistilBertModel
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
model = TFDistilBertModel.from_pretrained('distilbert-base-uncased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config, *inputs, **kwargs):
super(TFDistilBertModel, self).__init__(config, *inputs, **kwargs)
self.distilbert = TFDistilBertMainLayer(config, name="distilbert") # Embeddings
def call(self, inputs, **kwargs):
outputs = self.distilbert(inputs, **kwargs)
return outputs
class TFDistilBertLMHead(tf.keras.layers.Layer):
def __init__(self, config, input_embeddings, **kwargs):
super(TFDistilBertLMHead, self).__init__(**kwargs)
self.vocab_size = config.vocab_size
# The output weights are the same as the data embeddings, but there is
# an output-only bias for each token.
self.input_embeddings = input_embeddings
def build(self, input_shape):
self.bias = self.add_weight(shape=(self.vocab_size,),
initializer='zeros',
trainable=True,
name='bias')
super(TFDistilBertLMHead, self).build(input_shape)
def call(self, hidden_states):
hidden_states = self.input_embeddings(hidden_states, mode="linear")
hidden_states = hidden_states + self.bias
return hidden_states
@add_start_docstrings("""DistilBert Model with a `masked language modeling` head on top. """,
DISTILBERT_START_DOCSTRING, DISTILBERT_INPUTS_DOCSTRING)
class TFDistilBertForMaskedLM(TFDistilBertPreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**prediction_scores**: ``tf.Tensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``tf.Tensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import DistilBertTokenizer, TFDistilBertForMaskedLM
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
model = TFDistilBertForMaskedLM.from_pretrained('distilbert-base-uncased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
outputs = model(input_ids)
prediction_scores = outputs[0]
"""
def __init__(self, config, *inputs, **kwargs):
super(TFDistilBertForMaskedLM, self).__init__(config, *inputs, **kwargs)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.vocab_size = config.vocab_size
self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
self.vocab_transform = tf.keras.layers.Dense(config.dim,
kernel_initializer=get_initializer(config.initializer_range),
name="vocab_transform")
self.act = tf.keras.layers.Activation(gelu)
self.vocab_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-12, name="vocab_layer_norm")
self.vocab_projector = TFDistilBertLMHead(config, self.distilbert.embeddings, name="vocab_projector")
def get_output_embeddings(self):
return self.vocab_projector.input_embeddings
def call(self, inputs, **kwargs):
distilbert_output = self.distilbert(inputs, **kwargs)
hidden_states = distilbert_output[0] # (bs, seq_length, dim)
prediction_logits = self.vocab_transform(hidden_states) # (bs, seq_length, dim)
prediction_logits = self.act(prediction_logits) # (bs, seq_length, dim)
prediction_logits = self.vocab_layer_norm(prediction_logits) # (bs, seq_length, dim)
prediction_logits = self.vocab_projector(prediction_logits)
outputs = (prediction_logits,) + distilbert_output[1:]
return outputs # logits, (hidden_states), (attentions)
@add_start_docstrings("""DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
DISTILBERT_START_DOCSTRING, DISTILBERT_INPUTS_DOCSTRING)
class TFDistilBertForSequenceClassification(TFDistilBertPreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**logits**: ``tf.Tensor`` of shape ``(batch_size, config.num_labels)``
Classification (or regression if config.num_labels==1) scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``tf.Tensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import BertTokenizer, TFDistilBertForSequenceClassification
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
model = TFDistilBertForSequenceClassification.from_pretrained('distilbert-base-uncased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
outputs = model(input_ids)
logits = outputs[0]
"""
def __init__(self, config, *inputs, **kwargs):
super(TFDistilBertForSequenceClassification, self).__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
self.pre_classifier = tf.keras.layers.Dense(config.dim,
kernel_initializer=get_initializer(config.initializer_range),
activation='relu',
name="pre_classifier")
self.classifier = tf.keras.layers.Dense(config.num_labels,
kernel_initializer=get_initializer(config.initializer_range),
name="classifier")
self.dropout = tf.keras.layers.Dropout(config.seq_classif_dropout)
def call(self, inputs, **kwargs):
distilbert_output = self.distilbert(inputs, **kwargs)
hidden_state = distilbert_output[0] # (bs, seq_len, dim)
pooled_output = hidden_state[:, 0] # (bs, dim)
pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
pooled_output = self.dropout(pooled_output, training=kwargs.get('training', False)) # (bs, dim)
logits = self.classifier(pooled_output) # (bs, dim)
outputs = (logits,) + distilbert_output[1:]
return outputs # logits, (hidden_states), (attentions)
@add_start_docstrings("""DistilBert Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
DISTILBERT_START_DOCSTRING, DISTILBERT_INPUTS_DOCSTRING)
class TFDistilBertForTokenClassification(TFDistilBertPreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**scores**: ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length, config.num_labels)``
Classification scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``Numpy array`` or ``tf.Tensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``Numpy array`` or ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import DistilBertTokenizer, TFDistilBertForTokenClassification
tokenizer = DistilBertTokenizer.from_pretrained('bert-base-uncased')
model = TFDistilBertForTokenClassification.from_pretrained('bert-base-uncased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
outputs = model(input_ids)
scores = outputs[0]
"""
def __init__(self, config, *inputs, **kwargs):
super(TFDistilBertForTokenClassification, self).__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.distilbert = TFDistilBertMainLayer(config, name='distilbert')
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.classifier = tf.keras.layers.Dense(config.num_labels,
kernel_initializer=get_initializer(config.initializer_range),
name='classifier')
def call(self, inputs, **kwargs):
outputs = self.distilbert(inputs, **kwargs)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output, training=kwargs.get('training', False))
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
return outputs # scores, (hidden_states), (attentions)
@add_start_docstrings("""DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
DISTILBERT_START_DOCSTRING, DISTILBERT_INPUTS_DOCSTRING)
class TFDistilBertForQuestionAnswering(TFDistilBertPreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**start_scores**: ``tf.Tensor`` of shape ``(batch_size, sequence_length,)``
Span-start scores (before SoftMax).
**end_scores**: ``tf.Tensor`` of shape ``(batch_size, sequence_length,)``
Span-end scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``tf.Tensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import BertTokenizer, TFDistilBertForQuestionAnswering
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
model = TFDistilBertForQuestionAnswering.from_pretrained('distilbert-base-uncased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
outputs = model(input_ids)
start_scores, end_scores = outputs[:2]
"""
def __init__(self, config, *inputs, **kwargs):
super(TFDistilBertForQuestionAnswering, self).__init__(config, *inputs, **kwargs)
self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
self.qa_outputs = tf.keras.layers.Dense(config.num_labels,
kernel_initializer=get_initializer(config.initializer_range),
name='qa_outputs')
assert config.num_labels == 2
self.dropout = tf.keras.layers.Dropout(config.qa_dropout)
def call(self, inputs, **kwargs):
distilbert_output = self.distilbert(inputs, **kwargs)
hidden_states = distilbert_output[0] # (bs, max_query_len, dim)
hidden_states = self.dropout(hidden_states, training=kwargs.get('training', False)) # (bs, max_query_len, dim)
logits = self.qa_outputs(hidden_states) # (bs, max_query_len, 2)
start_logits, end_logits = tf.split(logits, 2, axis=-1)
start_logits = tf.squeeze(start_logits, axis=-1)
end_logits = tf.squeeze(end_logits, axis=-1)
outputs = (start_logits, end_logits,) + distilbert_output[1:]
return outputs # start_logits, end_logits, (hidden_states), (attentions)
|
py | 7dff9667152722256566ddaa5e18685ead28413f | '''
Created on Mar 27, 2015
@author: maxz
'''
from GPyNotebook.plotting.legend import Legend
from GPyNotebook.plotting.scatter import Scatter
from IPython.html.widgets.widget_box import Box, HBox, FlexBox
from GPyNotebook.controls.select import DimSelect
from IPython.html.widgets.widget_selection import Dropdown
class LatentView(Box):
def __init__(self, X, lab_dict, dim_names=None, colors=None, markers=None, figsize=None, figtype=None, *a, **kw):
self.scatter = Scatter(X, lab_dict, colors, markers, figsize, figtype)
self.legend = Legend(self.scatter)
self.input_dim = X.shape[1]
self.lab_dict = lab_dict
dx, dy, dl = cntrls = self._controls(dim_names)
kw['children'] = [
HBox(children=cntrls),
FlexBox(children = [self.scatter, self.legend], margin=0, orientation='horizontal')
]
kw['orientation'] = 'vertical'
dx.on_trait_change(self.scatter.change_x_dim, 'value')
dy.on_trait_change(self.scatter.change_y_dim, 'value')
dl.on_trait_change(self.scatter.change_labels, 'value')
#dl.on_trait_change(lab_up_html(html_show, labs, cs, ''), 'value')
dl.on_trait_change(self.legend.update_legend, 'value')
super(LatentView, self).__init__(*a, **kw)
def _controls(self, names=None):
if names is not None:
v1, v2 = names[:2]
else:
v1, v2 = 0, 1
return (DimSelect(self.input_dim, names, value=v1, description='x'),
DimSelect(self.input_dim, names, value=v2, description='y'),
Dropdown(options=self.lab_dict.keys(), description='labels')
)
|
py | 7dff96f3fd36f1d0033aeef5a1be63b583d098d1 | """
Just a script to diplay a download progress bar
for the dataset. Taken from:
https://stackoverflow.com/a/46825841
"""
import progressbar
pbar = None
def show_progress(block_num, block_size, total_size):
global pbar
if pbar is None:
pbar = progressbar.ProgressBar(maxval=total_size)
pbar.start()
downloaded = block_num * block_size
if downloaded < total_size:
pbar.update(downloaded)
else:
pbar.finish()
pbar = None
|
py | 7dff9767757a2f18a252291c7ecf68e26314deab | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import base64
import random
from logzero import logger
import XX.DB.RedisHelper as Rh
class IPPollingProxy(object):
# ping from all province, then get cdn ip
proxy_cdn = []
# crawl everyday, then check them
proxy_public = []
# You must be allowed within the range of IP
proxy_private = [
'proxy.scrapy.com:23088',
]
# all proxy pool
proxy = proxy_public + proxy_private
def process_request(self, request, spider):
proxy_ip = random.choice(self.proxy)
if proxy_ip:
request.meta['proxy'] = "http://" + proxy_ip
# 验证代理用户名和密码
# proxy_user_pass = "USERNAME:PASSWORD"
# setup basic authentication for the proxy
# encoded_user_pass = base64.encodestring(proxy_user_pass)
# request.headers['Proxy-Authorization'] = 'Basic ' + encoded_user_pass
# 根据国内国外ip确定启动哪一组代理池
# ip发送到ping.chinaz.com查询地址
# 根据地址来判断启动代理池
# if u'国内' in area:
# request.meta['proxy'] = '这里设置国内http代理'
# elif u'国外' in area:
# request.meta['proxy'] = '这里设置国外http代理'
# else:
# pass
return
class AbuyunProxy(object):
@classmethod
def from_crawler(cls, crawler):
cls.settings = crawler.settings
return cls()
def process_request(self, request, spider):
# 代理服务器
proxy_url = "http://http-dyn.abuyun.com:9020"
code = bytes((self.settings.get("PROXY_UN") + ":" + self.settings.get("PROXY_PWD")), "ascii")
proxyAuth = "Basic " + base64.urlsafe_b64encode(code).decode("utf8")
request.meta["proxy"] = proxy_url
request.headers["Proxy-Authorization"] = proxyAuth
class IpProxy(object):
def __init__(self):
# 获取Redis连接
pass
def process_request(self, request, spider):
# 这是ip用例,如果想要实时更新,就可以从DB中获取
ips = ["49.85.6.229:39896", "49.85.6.229:39896", "49.85.6.229:39896"]
ip = random.choice(ips)
request.meta["proxy"] = ip
# Goubanjia代理
class Goubanjia(object):
def __init__(self):
# 获取Redis连接
pass
def process_request(self, request, spider):
# 这是ip用例,如果想要实时更新,就可以从DB中获取
ips = ["49.85.6.229:39896"]
ip = random.choice(ips)
request.meta["proxy"] = ip
class MimvpProxy(object):
@classmethod
def from_crawler(cls, crawler):
cls.settings = crawler.settings
return cls()
def __init__(self):
self.conn_redis = Rh.RedisHelper.get_redis_connect(self.settings.get("REDIS_HOST"),
password=self.settings.get("REDIS_PWD"), db=9)
def process_request(self, request, spider):
ips = self.conn_redis.keys()
if ips:
request.meta["proxy"] = "http://c1259e45c76b:b7b9bd6b48@" + random.choice(ips)
logger.info("Proxy is " + str(request.meta["proxy"]))
else:
logger.info("No Proxy ip")
|
py | 7dff976c862966f9be9f0cea35b5e818f9fdea73 | import os
# replace "\\" in a filepath with "/" because somehow glob() inserts unconsistent slashes (?)
def fix_path(file_list):
return [path.replace("\\", "/") for path in file_list]
|
py | 7dff98bbb11583ccb0df83aca38bb8c460d83ea0 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/component/vehicle/shared_proton_torpedo_array.iff"
result.attribute_template_id = -1
result.stfName("craft_item_ingredients_n","proton_torpedo_array")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
py | 7dff98c5fd2b8b88e767688191746c9a6508886c | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import json
import os
import pickle
import shelve
from pathlib import Path
import gym
import ray
from ray.rllib.agents.registry import get_agent_class
from ray.rllib.env import MultiAgentEnv
from ray.rllib.env.base_env import _DUMMY_AGENT_ID
from ray.rllib.evaluation.episode import _flatten_action
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
from ray.tune.util import merge_dicts
EXAMPLE_USAGE = """
Example Usage via RLlib CLI:
rllib rollout /tmp/ray/checkpoint_dir/checkpoint-0 --run DQN
--env CartPole-v0 --steps 1000000 --out rollouts.pkl
Example Usage via executable:
./rollout.py /tmp/ray/checkpoint_dir/checkpoint-0 --run DQN
--env CartPole-v0 --steps 1000000 --out rollouts.pkl
"""
# Note: if you use any custom models or envs, register them here first, e.g.:
#
# ModelCatalog.register_custom_model("pa_model", ParametricActionsModel)
# register_env("pa_cartpole", lambda _: ParametricActionCartpole(10))
class RolloutSaver:
"""Utility class for storing rollouts.
Currently supports two behaviours: the original, which
simply dumps everything to a pickle file once complete,
and a mode which stores each rollout as an entry in a Python
shelf db file. The latter mode is more robust to memory problems
or crashes part-way through the rollout generation. Each rollout
is stored with a key based on the episode number (0-indexed),
and the number of episodes is stored with the key "num_episodes",
so to load the shelf file, use something like:
with shelve.open('rollouts.pkl') as rollouts:
for episode_index in range(rollouts["num_episodes"]):
rollout = rollouts[str(episode_index)]
If outfile is None, this class does nothing.
"""
def __init__(self,
outfile=None,
use_shelve=False,
write_update_file=False,
target_steps=None,
target_episodes=None,
save_info=False):
self._outfile = outfile
self._update_file = None
self._use_shelve = use_shelve
self._write_update_file = write_update_file
self._shelf = None
self._num_episodes = 0
self._rollouts = []
self._current_rollout = []
self._total_steps = 0
self._target_episodes = target_episodes
self._target_steps = target_steps
self._save_info = save_info
def _get_tmp_progress_filename(self):
outpath = Path(self._outfile)
return outpath.parent / ("__progress_" + outpath.name)
@property
def outfile(self):
return self._outfile
def __enter__(self):
if self._outfile:
if self._use_shelve:
# Open a shelf file to store each rollout as they come in
self._shelf = shelve.open(self._outfile)
else:
# Original behaviour - keep all rollouts in memory and save
# them all at the end.
# But check we can actually write to the outfile before going
# through the effort of generating the rollouts:
try:
with open(self._outfile, "wb") as _:
pass
except IOError as x:
print("Can not open {} for writing - cancelling rollouts.".
format(self._outfile))
raise x
if self._write_update_file:
# Open a file to track rollout progress:
self._update_file = self._get_tmp_progress_filename().open(
mode="w")
return self
def __exit__(self, type, value, traceback):
if self._shelf:
# Close the shelf file, and store the number of episodes for ease
self._shelf["num_episodes"] = self._num_episodes
self._shelf.close()
elif self._outfile and not self._use_shelve:
# Dump everything as one big pickle:
pickle.dump(self._rollouts, open(self._outfile, "wb"))
if self._update_file:
# Remove the temp progress file:
self._get_tmp_progress_filename().unlink()
self._update_file = None
def _get_progress(self):
if self._target_episodes:
return "{} / {} episodes completed".format(self._num_episodes,
self._target_episodes)
elif self._target_steps:
return "{} / {} steps completed".format(self._total_steps,
self._target_steps)
else:
return "{} episodes completed".format(self._num_episodes)
def begin_rollout(self):
self._current_rollout = []
def end_rollout(self):
if self._outfile:
if self._use_shelve:
# Save this episode as a new entry in the shelf database,
# using the episode number as the key.
self._shelf[str(self._num_episodes)] = self._current_rollout
else:
# Append this rollout to our list, to save laer.
self._rollouts.append(self._current_rollout)
self._num_episodes += 1
if self._update_file:
self._update_file.seek(0)
self._update_file.write(self._get_progress() + "\n")
self._update_file.flush()
def append_step(self, obs, action, next_obs, reward, done, info):
"""Add a step to the current rollout, if we are saving them"""
if self._outfile:
if self._save_info:
self._current_rollout.append(
[obs, action, next_obs, reward, done, info])
else:
self._current_rollout.append(
[obs, action, next_obs, reward, done])
self._total_steps += 1
def create_parser(parser_creator=None):
parser_creator = parser_creator or argparse.ArgumentParser
parser = parser_creator(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Roll out a reinforcement learning agent "
"given a checkpoint.",
epilog=EXAMPLE_USAGE)
parser.add_argument(
"checkpoint", type=str, help="Checkpoint from which to roll out.")
required_named = parser.add_argument_group("required named arguments")
required_named.add_argument(
"--run",
type=str,
required=True,
help="The algorithm or model to train. This may refer to the name "
"of a built-on algorithm (e.g. RLLib's DQN or PPO), or a "
"user-defined trainable function or class registered in the "
"tune registry.")
required_named.add_argument(
"--env", type=str, help="The gym environment to use.")
parser.add_argument(
"--no-render",
default=False,
action="store_const",
const=True,
help="Surpress rendering of the environment.")
parser.add_argument(
"--monitor",
default=False,
action="store_const",
const=True,
help="Wrap environment in gym Monitor to record video.")
parser.add_argument(
"--steps", default=10000, help="Number of steps to roll out.")
parser.add_argument("--out", default=None, help="Output filename.")
parser.add_argument(
"--config",
default="{}",
type=json.loads,
help="Algorithm-specific configuration (e.g. env, hyperparams). "
"Surpresses loading of configuration from checkpoint.")
parser.add_argument(
"--episodes",
default=0,
help="Number of complete episodes to roll out. (Overrides --steps)")
parser.add_argument(
"--save-info",
default=False,
action="store_true",
help="Save the info field generated by the step() method, "
"as well as the action, observations, rewards and done fields.")
parser.add_argument(
"--use-shelve",
default=False,
action="store_true",
help="Save rollouts into a python shelf file (will save each episode "
"as it is generated). An output filename must be set using --out.")
parser.add_argument(
"--track-progress",
default=False,
action="store_true",
help="Write progress to a temporary file (updated "
"after each episode). An output filename must be set using --out; "
"the progress file will live in the same folder.")
return parser
def run(args, parser):
config = {}
# Load configuration from file
config_dir = os.path.dirname(args.checkpoint)
config_path = os.path.join(config_dir, "params.pkl")
if not os.path.exists(config_path):
config_path = os.path.join(config_dir, "../params.pkl")
if not os.path.exists(config_path):
if not args.config:
raise ValueError(
"Could not find params.pkl in either the checkpoint dir or "
"its parent directory.")
else:
with open(config_path, "rb") as f:
config = pickle.load(f)
if "num_workers" in config:
config["num_workers"] = min(2, config["num_workers"])
config = merge_dicts(config, args.config)
if not args.env:
if not config.get("env"):
parser.error("the following arguments are required: --env")
args.env = config.get("env")
ray.init()
cls = get_agent_class(args.run)
agent = cls(env=args.env, config=config)
agent.restore(args.checkpoint)
num_steps = int(args.steps)
num_episodes = int(args.episodes)
with RolloutSaver(
args.out,
args.use_shelve,
write_update_file=args.track_progress,
target_steps=num_steps,
target_episodes=num_episodes,
save_info=args.save_info) as saver:
rollout(agent, args.env, num_steps, num_episodes, saver,
args.no_render, args.monitor)
class DefaultMapping(collections.defaultdict):
"""default_factory now takes as an argument the missing key."""
def __missing__(self, key):
self[key] = value = self.default_factory(key)
return value
def default_policy_agent_mapping(unused_agent_id):
return DEFAULT_POLICY_ID
def keep_going(steps, num_steps, episodes, num_episodes):
"""Determine whether we've collected enough data"""
# if num_episodes is set, this overrides num_steps
if num_episodes:
return episodes < num_episodes
# if num_steps is set, continue until we reach the limit
if num_steps:
return steps < num_steps
# otherwise keep going forever
return True
def rollout(agent,
env_name,
num_steps,
num_episodes=0,
saver=RolloutSaver(),
no_render=True,
monitor=False):
policy_agent_mapping = default_policy_agent_mapping
if hasattr(agent, "workers"):
env = agent.workers.local_worker().env
multiagent = isinstance(env, MultiAgentEnv)
if agent.workers.local_worker().multiagent:
policy_agent_mapping = agent.config["multiagent"][
"policy_mapping_fn"]
policy_map = agent.workers.local_worker().policy_map
state_init = {p: m.get_initial_state() for p, m in policy_map.items()}
use_lstm = {p: len(s) > 0 for p, s in state_init.items()}
action_init = {
p: _flatten_action(m.action_space.sample())
for p, m in policy_map.items()
}
else:
env = gym.make(env_name)
multiagent = False
use_lstm = {DEFAULT_POLICY_ID: False}
if monitor and not no_render and saver and saver.outfile is not None:
# If monitoring has been requested,
# manually wrap our environment with a gym monitor
# which is set to record every episode.
env = gym.wrappers.Monitor(
env, os.path.join(os.path.dirname(saver.outfile), "monitor"),
lambda x: True)
steps = 0
episodes = 0
while keep_going(steps, num_steps, episodes, num_episodes):
mapping_cache = {} # in case policy_agent_mapping is stochastic
saver.begin_rollout()
obs = env.reset()
agent_states = DefaultMapping(
lambda agent_id: state_init[mapping_cache[agent_id]])
prev_actions = DefaultMapping(
lambda agent_id: action_init[mapping_cache[agent_id]])
prev_rewards = collections.defaultdict(lambda: 0.)
done = False
reward_total = 0.0
while not done and keep_going(steps, num_steps, episodes,
num_episodes):
multi_obs = obs if multiagent else {_DUMMY_AGENT_ID: obs}
action_dict = {}
for agent_id, a_obs in multi_obs.items():
if a_obs is not None:
policy_id = mapping_cache.setdefault(
agent_id, policy_agent_mapping(agent_id))
p_use_lstm = use_lstm[policy_id]
if p_use_lstm:
a_action, p_state, _ = agent.compute_action(
a_obs,
state=agent_states[agent_id],
prev_action=prev_actions[agent_id],
prev_reward=prev_rewards[agent_id],
policy_id=policy_id)
agent_states[agent_id] = p_state
else:
a_action = agent.compute_action(
a_obs,
prev_action=prev_actions[agent_id],
prev_reward=prev_rewards[agent_id],
policy_id=policy_id)
a_action = _flatten_action(a_action) # tuple actions
action_dict[agent_id] = a_action
prev_actions[agent_id] = a_action
action = action_dict
action = action if multiagent else action[_DUMMY_AGENT_ID]
next_obs, reward, done, info = env.step(action)
if multiagent:
for agent_id, r in reward.items():
prev_rewards[agent_id] = r
else:
prev_rewards[_DUMMY_AGENT_ID] = reward
if multiagent:
done = done["__all__"]
reward_total += sum(reward.values())
else:
reward_total += reward
if not no_render:
env.render()
saver.append_step(obs, action, next_obs, reward, done, info)
steps += 1
obs = next_obs
saver.end_rollout()
print("Episode #{}: reward: {}".format(episodes, reward_total))
if done:
episodes += 1
if __name__ == "__main__":
parser = create_parser()
args = parser.parse_args()
run(args, parser)
|
py | 7dff992c9d5da1eb720f9135095813481cd1eeb4 | # Copyright 2021 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains the fixtures that are reusable by any tests within
this directory. You don't need to import the fixtures as pytest will
discover them automatically. More info here:
https://docs.pytest.org/en/latest/fixture.html
"""
import sys
import tempfile
from importlib import import_module
from os import makedirs
from pathlib import Path
import yaml
from click.testing import CliRunner
from pytest import fixture
from kedro import __version__ as kedro_version
from kedro.framework.cli.cli import cli
from kedro.framework.project import configure_project
from kedro.framework.startup import ProjectMetadata
MOCKED_HOME = "user/path/"
REPO_NAME = "dummy_project"
PACKAGE_NAME = "dummy_package"
@fixture(name="cli_runner")
def cli_runner_fixture():
runner = CliRunner()
with runner.isolated_filesystem():
makedirs(MOCKED_HOME)
yield runner
@fixture
def entry_points(mocker):
return mocker.patch("pkg_resources.iter_entry_points")
@fixture
def entry_point(mocker, entry_points):
ep = mocker.MagicMock()
entry_points.return_value = [ep]
return ep
@fixture(scope="module")
def fake_root_dir():
# using tempfile as tmp_path fixture doesn't support module scope
with tempfile.TemporaryDirectory() as tmp_root:
yield Path(tmp_root).resolve()
@fixture(scope="module")
def fake_package_path(fake_root_dir):
return fake_root_dir.resolve() / REPO_NAME / "src" / PACKAGE_NAME
@fixture(scope="module")
def fake_repo_path(fake_root_dir):
return fake_root_dir.resolve() / REPO_NAME
@fixture(scope="module")
def dummy_config(fake_root_dir, fake_metadata):
config = {
"project_name": fake_metadata.project_name,
"repo_name": REPO_NAME,
"python_package": fake_metadata.package_name,
"output_dir": str(fake_root_dir),
}
config_path = fake_root_dir / "dummy_config.yml"
with config_path.open("w") as f:
yaml.dump(config, f)
return config_path
@fixture(scope="module")
def fake_metadata(fake_root_dir):
metadata = ProjectMetadata(
fake_root_dir / REPO_NAME / "pyproject.toml",
PACKAGE_NAME,
"CLI Testing Project",
fake_root_dir / REPO_NAME,
kedro_version,
fake_root_dir / REPO_NAME / "src",
)
return metadata
@fixture(scope="module")
def fake_project_cli(fake_repo_path: Path, dummy_config: Path):
starter_path = Path(__file__).parents[3].resolve()
starter_path = starter_path / "features" / "steps" / "test_starter"
CliRunner().invoke(
cli, ["new", "-c", str(dummy_config), "--starter", str(starter_path)],
)
# NOTE: Here we load a couple of modules, as they would be imported in
# the code and tests.
# It's safe to remove the new entries from path due to the python
# module caching mechanism. Any `reload` on it will not work though.
old_path = sys.path.copy()
sys.path = [str(fake_repo_path / "src")] + sys.path
import_module(PACKAGE_NAME)
configure_project(PACKAGE_NAME)
yield import_module(f"{PACKAGE_NAME}.cli")
sys.path = old_path
del sys.modules[PACKAGE_NAME]
@fixture
def chdir_to_dummy_project(fake_repo_path, monkeypatch):
monkeypatch.chdir(str(fake_repo_path))
@fixture
def patch_log(mocker):
mocker.patch("logging.config.dictConfig")
|
py | 7dff9b1df3d2dbc07a80235cac47161f3b9ccb16 | from math import pi
def qft_rotations(circuit, n_qubits):
"""Performs qft on the first n qubits in circuit (without swaps)"""
if n_qubits == 0:
return circuit
n = n_qubits-1
circuit.h(n)
for qubit in range(n):
circuit.cp(pi/2**(n-qubit), qubit, n)
# At the end of our function, we call the same function again on
# the next qubits (we reduced n by one earlier in the function)
qft_rotations(circuit, n)
return circuit
def swap_registers(circuit, n_qubits):
for qubit_idx in range(n_qubits//2):
circuit.swap(qubit_idx, n_qubits-qubit_idx-1)
return circuit
def qft(circuit, n_qubits):
"""QFT on the first n qubits in circuit"""
qft_rotations(circuit, n_qubits)
swap_registers(circuit, n_qubits)
return circuit
def grover_diffuser(circuit, n_qubits):
"""Applies the grover diffuser on the first n qubits in circuit"""
qubit_indices = [i for i in range(n_qubits)]
# -----------Diffuser Protocol
# Apply transformation |s> -> |00..0> (H-gates)
circuit.h(qubit_indices)
# Apply transformation |00..0> -> |11..1> (X-gates)
circuit.x(qubit_indices)
# Do multi-controlled-Z gate
circuit.h([-1])
circuit.mct(qubit_indices[:-1], [-1]) # multi-controlled-toffoli
circuit.h([-1])
# Apply transformation |11..1> -> |00..0>
circuit.x(qubit_indices)
# Apply transformation |00..0> -> |s>
circuit.h(qubit_indices)
return circuit |
py | 7dff9b8d7cd6bb268e60389f34278979ca4496da | """
transitions.extensions
----------------------
Additional functionality such as hierarchical (nested) machine support, Graphviz-based diagram creation
and threadsafe execution of machine methods. Additionally, combinations of all those features are possible
and made easier to access with a convenience factory.
"""
from .diagrams import GraphMachine
from .nesting import HierarchicalMachine
from .locking import LockedMachine
from .factory import MachineFactory, HierarchicalGraphMachine, LockedHierarchicalGraphMachine
from .factory import LockedHierarchicalMachine, LockedGraphMachine
from .factory import AsyncMachine, AsyncGraphMachine, HierarchicalAsyncMachine, HierarchicalAsyncGraphMachine
|
py | 7dff9bbad45bd53cb944c4c08420895f6cc69044 | #!usr/bin/env python3.6
__author__ = "X Zhang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020 Westwood Robotics Corp."
__date__ = "Feb 14, 2020"
__version__ = "0.1.0"
__status__ = "Beta"
'''
This module is used to communicate with the Dynamixel X series actuator on the DAnTE platform
'''
from dynamixel_sdk import * # Uses Dynamixel SDK library
from Settings.DXL_CONTROL_TABLE import *
from Settings.Robot import *
import math
import pdb
class DynamixelController(object):
def __init__(self, DXL_ID, port, BAUDRATE=2000000):
# Protocol version
self.PROTOCOL_VERSION = 2.0 # See which protocol version is used in the Dynamixel
# Default setting
self.DXL_ID = DXL_ID # Dynamixel ID : 1
self.BAUDRATE = BAUDRATE # Dynamixel default baudrate : 57600
self.port = port
self.port_handler = None
self.packet_handler = None
self.open_port()
def open_port(self):
# Initialize PortHandler instance
# Set the port path
# Get methods and members of PortHandlerLinux or PortHandlerWindows
self.port_handler = PortHandler(self.port)
# Initialize PacketHandler instance
# Set the protocol version
# Get methods and members of Protocol1PacketHandler or Protocol2PacketHandler
self.packet_handler = PacketHandler(self.PROTOCOL_VERSION)
# Open port
if self.port_handler.openPort():
# Set port baudrate
if self.port_handler.setBaudRate(self.BAUDRATE):
return True
else:
pass
else:
print("Failed to open the Dyanmixel port")
return False
def close_port(self):
# Close port
self.port_handler.closePort()
def ping(self):
# Ping Dynamixel
# Get Dynamixel model number
model_number, comm_result, error = self.packet_handler.ping(self.port_handler, self.DXL_ID)
if comm_result != COMM_SUCCESS:
print("%s" % self.packet_handler.getTxRxResult(comm_result))
return False
elif error != 0:
print("%s" % self.packet_handler.getRxPacketError(error))
return False
else:
return model_number
def set_mode(self, mode):
# Set the operation mode
if mode == 'position':
m = 3
elif mode == 'velocity':
m = 1
elif mode == 'extended position':
m = 4
elif mode == 'PWM':
m = 16
else:
print('Invalid operation mode.')
return False
comm_result, error = self.packet_handler.write1ByteTxRx(self.port_handler, self.DXL_ID, ADDR_X_MODE, m)
if comm_result != COMM_SUCCESS:
print("%s" % self.packet_handler.getTxRxResult(comm_result))
return False
elif error != 0:
print("%s" % self.packet_handler.getRxPacketError(error))
return False
else:
return True
def set_baudrate(self, val):
# Set the communication baudrate
if val == 0:
print("Baudrate set to 9600.")
elif val == 1:
print("Baudrate set to 57600.") # Dynamixel default
elif val == 2:
print("Baudrate set to 115200.")
elif val == 3:
print("Baudrate set to 1000000.")
elif val == 4:
print("Baudrate set to 2000000.")
else:
print('Invalid baudrate selection.')
return False
comm_result, error = self.packet_handler.write1ByteTxRx(self.port_handler, self.DXL_ID, ADDR_X_BAUDRATE, val)
if comm_result != COMM_SUCCESS:
print("%s" % self.packet_handler.getTxRxResult(comm_result))
return False
elif error != 0:
print("%s" % self.packet_handler.getRxPacketError(error))
return False
else:
return True
def set_drive_mode(self):
# Set to velocity based profile, normal mode
mode = 0
comm_result, error = self.packet_handler.write1ByteTxRx(self.port_handler, self.DXL_ID, ADDR_X_DRIVE_MODE, mode)
if comm_result != COMM_SUCCESS:
print("%s" % self.packet_handler.getTxRxResult(comm_result))
return False
elif error != 0:
print("%s" % self.packet_handler.getRxPacketError(error))
return False
else:
return True
def set_homing_offset(self, homing_offset):
# Write homing offset, value in radian, -pi/2 ~ pi/2
# Only worked in joint mode/basic position mode
if abs(homing_offset) > math.pi / 2:
print("Input out of range.")
return False
homing_offset = int(homing_offset / POSITION_UNIT)
comm_result, error = self.packet_handler.write4ByteTxRx(self.port_handler, self.DXL_ID, ADDR_X_HOMING_OFFSET,
homing_offset)
if comm_result != COMM_SUCCESS:
print("%s" % self.packet_handler.getTxRxResult(comm_result))
return False
elif error != 0:
print("%s" % self.packet_handler.getRxPacketError(error))
return False
else:
return True
def get_homing_offset(self):
# Read present position, value in radian
homing_offset, comm_result, error = self.packet_handler.read4ByteTxRx(self.port_handler, self.DXL_ID,
ADDR_X_HOMING_OFFSET)
if comm_result != COMM_SUCCESS:
print("%s" % self.packet_handler.getTxRxResult(comm_result))
return False
elif error != 0:
print("%s" % self.packet_handler.getRxPacketError(error))
return False
else:
if homing_offset > 1024:
# Negative homing offset
homing_offset = homing_offset - 4294967296
homing_offset = homing_offset * POSITION_UNIT
return homing_offset
def torque_enable(self, val):
# Enable/dis-enable Dynamixel Torque
comm_result, error = self.packet_handler.write1ByteTxRx(self.port_handler, self.DXL_ID, ADDR_X_TORQUE_ENABLE,
val)
if comm_result != COMM_SUCCESS:
print("%s" % self.packet_handler.getTxRxResult(comm_result))
return False
elif error != 0:
print("%s" % self.packet_handler.getRxPacketError(error))
return False
else:
return True
def get_enable(self):
# Get torque enable status of Dynamixel
val, comm_result, error = self.packet_handler.read1ByteTxRx(self.port_handler, self.DXL_ID,
ADDR_X_TORQUE_ENABLE)
if comm_result != COMM_SUCCESS:
print("%s" % self.packet_handler.getTxRxResult(comm_result))
return False
elif error != 0:
print("%s" % self.packet_handler.getRxPacketError(error))
return False
else:
return bool(val)
def set_goal_position(self, goal_position):
# Write goal position, value in radian
goal_position = int(goal_position / POSITION_UNIT)
comm_result, error = self.packet_handler.write4ByteTxRx(self.port_handler, self.DXL_ID, ADDR_X_GOAL_POSITION,
goal_position)
if comm_result != COMM_SUCCESS:
print("%s" % self.packet_handler.getTxRxResult(comm_result))
return False
elif error != 0:
print("%s" % self.packet_handler.getRxPacketError(error))
return False
else:
return True
def get_goal_position(self):
# Read present position, result in radian
goal_position, comm_result, error = self.packet_handler.read4ByteTxRx(self.port_handler, self.DXL_ID,
ADDR_X_GOAL_POSITION)
if comm_result != COMM_SUCCESS:
print("%s" % self.packet_handler.getTxRxResult(comm_result))
return False
elif error != 0:
print("%s" % self.packet_handler.getRxPacketError(error))
return False
else:
goal_position = goal_position * POSITION_UNIT
return goal_position
def get_present_position(self):
# Read present position, result in radian
present_position, comm_result, error = self.packet_handler.read4ByteTxRx(self.port_handler, self.DXL_ID,
ADDR_X_PRESENT_POSITION)
if comm_result != COMM_SUCCESS:
print("%s" % self.packet_handler.getTxRxResult(comm_result))
return False
elif error != 0:
print("%s" % self.packet_handler.getRxPacketError(error))
return False
else:
present_position = present_position * POSITION_UNIT
return present_position
def set_goal_velocity(self, goal_velocity):
goal_velocity = int(goal_velocity / VELOCITY_UNIT)
comm_result, error = self.packet_handler.write4ByteTxRx(self.port_handler, self.DXL_ID, ADDR_X_GOAL_VELOCITY,
goal_velocity)
if comm_result != COMM_SUCCESS:
print("%s" % self.packet_handler.getTxRxResult(comm_result))
return False
elif error != 0:
print("%s" % self.packet_handler.getRxPacketError(error))
return False
else:
return True
def get_goal_velocity(self):
goal_velocity, comm_result, error = self.packet_handler.read4ByteTxRx(self.port_handler, self.DXL_ID,
ADDR_X_GOAL_VELOCITY)
if comm_result != COMM_SUCCESS:
print("%s" % self.packet_handler.getTxRxResult(comm_result))
return False
elif error != 0:
print("%s" % self.packet_handler.getRxPacketError(error))
return False
else:
goal_velocity = goal_velocity * VELOCITY_UNIT
return goal_velocity
def get_present_velocity(self):
present_velocity, comm_result, error = self.packet_handler.read4ByteTxRx(self.port_handler, self.DXL_ID,
ADDR_X_PRESENT_VELOCITY)
if comm_result != COMM_SUCCESS:
print("%s" % self.packet_handler.getTxRxResult(comm_result))
return False
elif error != 0:
print("%s" % self.packet_handler.getRxPacketError(error))
return False
else:
present_velocity = present_velocity * VELOCITY_UNIT
return present_velocity
def set_profile_acceleration(self, val):
"""
Sets acceleration of the Profile
Unit: 214.577[rev / min2]
Range: 0 ~ 32767 ‘0’ stands for an infinite acceleration
:return:
"""
comm_result, error = self.packet_handler.write4ByteTxRx(self.port_handler, self.DXL_ID,
ADDR_X_PROFILE_ACCELERATION,
val)
if comm_result != COMM_SUCCESS:
print("%s" % self.packet_handler.getTxRxResult(comm_result))
return False
elif error != 0:
print("%s" % self.packet_handler.getRxPacketError(error))
return False
else:
return True
def set_profile_velocity(self, val):
"""
Sets maximum velocity of the Profile
Unit: 0.229 [rev/min]
Range: 0 ~ 32767 ‘0’ stands for an infinite velocity
:return:
"""
comm_result, error = self.packet_handler.write4ByteTxRx(self.port_handler, self.DXL_ID,
ADDR_X_PROFILE_VELOCITY,
val)
if comm_result != COMM_SUCCESS:
print("%s" % self.packet_handler.getTxRxResult(comm_result))
return False
elif error != 0:
print("%s" % self.packet_handler.getRxPacketError(error))
return False
else:
return True
def set_p_gain_velocity(self, val):
# Set P gain for velocity loop
if val > 16383 or val < 0:
print("Input out of range.")
return False
comm_result, error = self.packet_handler.write2ByteTxRx(self.port_handler, self.DXL_ID, ADDR_X_P_GAIN_VELOCITY,
val)
if comm_result != COMM_SUCCESS:
print("%s" % self.packet_handler.getTxRxResult(comm_result))
return False
elif error != 0:
print("%s" % self.packet_handler.getRxPacketError(error))
return False
else:
return True
def get_p_gain_velocity(self):
# Read P gain for velocity loop
val, comm_result, error = self.packet_handler.read2ByteTxRx(self.port_handler, self.DXL_ID,
ADDR_X_P_GAIN_VELOCITY)
if comm_result != COMM_SUCCESS:
print("%s" % self.packet_handler.getTxRxResult(comm_result))
return False
elif error != 0:
print("%s" % self.packet_handler.getRxPacketError(error))
return False
else:
return val
def set_i_gain_velocity(self, val):
# Set I gain for velocity loop
if val > 16383 or val < 0:
print("Input out of range.")
return False
comm_result, error = self.packet_handler.write2ByteTxRx(self.port_handler, self.DXL_ID, ADDR_X_I_GAIN_VELOCITY,
val)
if comm_result != COMM_SUCCESS:
print("%s" % self.packet_handler.getTxRxResult(comm_result))
return False
elif error != 0:
print("%s" % self.packet_handler.getRxPacketError(error))
return False
else:
return True
def get_i_gain_velocity(self):
# Read P gain for velocity loop
val, comm_result, error = self.packet_handler.read2ByteTxRx(self.port_handler, self.DXL_ID,
ADDR_X_I_GAIN_VELOCITY)
if comm_result != COMM_SUCCESS:
print("%s" % self.packet_handler.getTxRxResult(comm_result))
return False
elif error != 0:
print("%s" % self.packet_handler.getRxPacketError(error))
return False
else:
return val
def set_p_gain_position(self, val):
# Set P gain for velocity loop
if val > 16383 or val < 0:
print("Input out of range.")
return False
comm_result, error = self.packet_handler.write2ByteTxRx(self.port_handler, self.DXL_ID, ADDR_X_P_GAIN_POSITION,
val)
if comm_result != COMM_SUCCESS:
print("%s" % self.packet_handler.getTxRxResult(comm_result))
return False
elif error != 0:
print("%s" % self.packet_handler.getRxPacketError(error))
return False
else:
return True
def get_p_gain_position(self):
# Read P gain for velocity loop
val, comm_result, error = self.packet_handler.read2ByteTxRx(self.port_handler, self.DXL_ID,
ADDR_X_P_GAIN_POSITION)
if comm_result != COMM_SUCCESS:
print("%s" % self.packet_handler.getTxRxResult(comm_result))
return False
elif error != 0:
print("%s" % self.packet_handler.getRxPacketError(error))
return False
else:
return val
def set_i_gain_position(self, val):
# Set I gain for velocity loop
if val > 16383 or val < 0:
print("Input out of range.")
return False
comm_result, error = self.packet_handler.write2ByteTxRx(self.port_handler, self.DXL_ID, ADDR_X_I_GAIN_POSITION,
val)
if comm_result != COMM_SUCCESS:
print("%s" % self.packet_handler.getTxRxResult(comm_result))
return False
elif error != 0:
print("%s" % self.packet_handler.getRxPacketError(error))
return False
else:
return True
def get_i_gain_position(self):
# Read P gain for velocity loop
val, comm_result, error = self.packet_handler.read2ByteTxRx(self.port_handler, self.DXL_ID,
ADDR_X_I_GAIN_POSITION)
if comm_result != COMM_SUCCESS:
print("%s" % self.packet_handler.getTxRxResult(comm_result))
return False
elif error != 0:
print("%s" % self.packet_handler.getRxPacketError(error))
return False
else:
return val
def set_d_gain_position(self, val):
# Set I gain for velocity loop
if val > 16383 or val < 0:
print("Input out of range.")
return False
comm_result, error = self.packet_handler.write2ByteTxRx(self.port_handler, self.DXL_ID, ADDR_X_D_GAIN_POSITION,
val)
if comm_result != COMM_SUCCESS:
print("%s" % self.packet_handler.getTxRxResult(comm_result))
return False
elif error != 0:
print("%s" % self.packet_handler.getRxPacketError(error))
return False
else:
return True
def get_d_gain_position(self):
# Read P gain for velocity loop
val, comm_result, error = self.packet_handler.read2ByteTxRx(self.port_handler, self.DXL_ID,
ADDR_X_D_GAIN_POSITION)
if comm_result != COMM_SUCCESS:
print("%s" % self.packet_handler.getTxRxResult(comm_result))
return False
elif error != 0:
print("%s" % self.packet_handler.getRxPacketError(error))
return False
else:
return val
def set_velocity_limit(self, velocity_limit):
velocity_limit = int(velocity_limit / VELOCITY_UNIT)
comm_result, error = self.packet_handler.write4ByteTxRx(self.port_handler, self.DXL_ID, ADDR_X_VELOCITY_LIMIT,
velocity_limit)
if comm_result != COMM_SUCCESS:
print("%s" % self.packet_handler.getTxRxResult(comm_result))
return False
elif error != 0:
print("%s" % self.packet_handler.getRxPacketError(error))
return False
else:
return True
def get_velocity_limit(self):
velocity_limit, comm_result, error = self.packet_handler.read4ByteTxRx(self.port_handler, self.DXL_ID,
ADDR_X_VELOCITY_LIMIT)
if comm_result != COMM_SUCCESS:
print("%s" % self.packet_handler.getTxRxResult(comm_result))
return False
elif error != 0:
print("%s" % self.packet_handler.getRxPacketError(error))
return False
else:
velocity_limit = velocity_limit * VELOCITY_UNIT
return velocity_limit
def set_position_limit_max(self, position_limit_max):
position_limit_max = int(position_limit_max / POSITION_UNIT)
comm_result, error = self.packet_handler.write4ByteTxRx(self.port_handler, self.DXL_ID,
ADDR_X_POSITION_LIMIT_MAX,
position_limit_max)
if comm_result != COMM_SUCCESS:
print("%s" % self.packet_handler.getTxRxResult(comm_result))
return False
elif error != 0:
print("%s" % self.packet_handler.getRxPacketError(error))
return False
else:
return True
def get_position_limit_max(self):
position_limit_max, comm_result, error = self.packet_handler.read4ByteTxRx(self.port_handler, self.DXL_ID,
ADDR_X_POSITION_LIMIT_MAX)
if comm_result != COMM_SUCCESS:
print("%s" % self.packet_handler.getTxRxResult(comm_result))
return False
elif error != 0:
print("%s" % self.packet_handler.getRxPacketError(error))
return False
else:
position_limit_max = position_limit_max * POSITION_UNIT
return position_limit_max
def set_position_limit_min(self, position_limit_min):
position_limit_min = int(position_limit_min / POSITION_UNIT)
comm_result, error = self.packet_handler.write4ByteTxRx(self.port_handler, self.DXL_ID,
ADDR_X_POSITION_LIMIT_MIN,
position_limit_min)
if comm_result != COMM_SUCCESS:
print("%s" % self.packet_handler.getTxRxResult(comm_result))
return False
elif error != 0:
print("%s" % self.packet_handler.getRxPacketError(error))
return False
else:
return True
def get_position_limit_min(self):
position_limit_min, comm_result, error = self.packet_handler.read4ByteTxRx(self.port_handler, self.DXL_ID,
ADDR_X_POSITION_LIMIT_MIN)
if comm_result != COMM_SUCCESS:
print("%s" % self.packet_handler.getTxRxResult(comm_result))
return False
elif error != 0:
print("%s" % self.packet_handler.getRxPacketError(error))
return False
else:
position_limit_min = position_limit_min * POSITION_UNIT
return position_limit_min
|
py | 7dff9bbf3e53173a8e49303c117f44427962548b | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RegionOperations:
"""RegionOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.apimanagement.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_service(
self,
resource_group_name: str,
service_name: str,
**kwargs
) -> AsyncIterable["_models.RegionListResult"]:
"""Lists all azure regions in which the service exists.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RegionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.apimanagement.models.RegionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RegionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_service.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RegionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_service.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/regions'} # type: ignore
|
py | 7dff9bd901077d41f608fbca3a31a9c8503fc407 | """Testing for transformers."""
# Author: Johann Faouzi <[email protected]>
# License: BSD-3-Clause
import numpy as np
from scipy.stats import boxcox, norm, yeojohnson
from pyts.preprocessing import StandardScaler
from pyts.preprocessing import PowerTransformer, QuantileTransformer
X = np.arange(1, 34).reshape(3, 11)
def test_actual_results_power_transformer_box_cox():
"""Test that the actual results are the expected ones."""
for standardize in [True, False]:
pt = PowerTransformer(method='box-cox', standardize=standardize)
arr_actual = pt.fit_transform(X)
arr_desired = [boxcox(X[i])[0] for i in range(3)]
if standardize:
arr_desired = StandardScaler().transform(arr_desired)
np.testing.assert_allclose(arr_actual, arr_desired, atol=1e-5, rtol=0.)
def test_actual_results_power_transformer_yeo_johnson():
"""Test that the actual results are the expected ones."""
for standardize in [True, False]:
pt = PowerTransformer(method='yeo-johnson', standardize=standardize)
arr_actual = pt.fit_transform(X)
arr_desired = [yeojohnson(X[i].astype('float64'))[0] for i in range(3)]
if standardize:
arr_desired = StandardScaler().transform(arr_desired)
np.testing.assert_allclose(arr_actual, arr_desired, atol=1e-5, rtol=0.)
def test_actual_results_quantile_transformer_uniform():
"""Test that the actual results are the expected ones."""
transformer = QuantileTransformer(n_quantiles=11)
arr_actual = transformer.fit_transform(X)
arr_desired = [np.linspace(0, 1, 11) for _ in range(3)]
np.testing.assert_allclose(arr_actual, arr_desired, atol=1e-5, rtol=0.)
def test_actual_results_quantile_transformer_normal():
"""Test that the actual results are the expected ones."""
X_ppf = norm.ppf(np.linspace(0, 1, 1000)[1:-1])
weights = np.round(norm.pdf(X_ppf) * 1000).astype('int64')
X = []
for value, weight in zip(X_ppf, weights):
X += [value] * weight
X = np.asarray(X).reshape(1, -1)
transformer = QuantileTransformer(n_quantiles=11,
output_distribution='normal')
arr_actual = transformer.fit_transform(X)
arr_desired = X
atol = 0.01 * X.shape[1]
np.testing.assert_allclose(arr_actual, arr_desired, atol=atol, rtol=0.)
|
py | 7dff9c128a57b1b24b150fcf487309a8425710c4 | from dataclasses import dataclass
from server.service.helper.dict_helper import normalize
from server.service.strategy.base import BaseStrategy
def reset_function(n):
return 1 / (2 ** n)
@dataclass
class SmoothStrategy(BaseStrategy):
def update(self, *, indices_selected: list[int], **kwargs) -> list[float]:
reset_value = reset_function(len(self.weight_list))
for index_selected in indices_selected:
self.weight_list[index_selected] = reset_value
self.weight_list = normalize(self.weight_list)
return self.weight_list
|
py | 7dff9ced92e9ec418e471f1c0a529df9e02ec7ea |
from ..utils import bfs, fzset
from ..common import GrammarError, is_terminal
class Rule(object):
"""
origin : a symbol
expansion : a list of symbols
"""
def __init__(self, origin, expansion, alias=None, options=None):
self.origin = origin
self.expansion = expansion
self.alias = alias
self.options = options
def __repr__(self):
return '<%s : %s>' % (self.origin, ' '.join(map(str,self.expansion)))
class RulePtr(object):
def __init__(self, rule, index):
assert isinstance(rule, Rule)
assert index <= len(rule.expansion)
self.rule = rule
self.index = index
def __repr__(self):
before = self.rule.expansion[:self.index]
after = self.rule.expansion[self.index:]
return '<%s : %s * %s>' % (self.rule.origin, ' '.join(before), ' '.join(after))
@property
def next(self):
return self.rule.expansion[self.index]
def advance(self, sym):
assert self.next == sym
return RulePtr(self.rule, self.index+1)
@property
def is_satisfied(self):
return self.index == len(self.rule.expansion)
def __eq__(self, other):
return self.rule == other.rule and self.index == other.index
def __hash__(self):
return hash((self.rule, self.index))
def pairs(lst):
return zip(lst[:-1], lst[1:])
def update_set(set1, set2):
copy = set(set1)
set1 |= set2
return set1 != copy
def calculate_sets(rules):
"""Calculate FOLLOW sets.
Adapted from: http://lara.epfl.ch/w/cc09:algorithm_for_first_and_follow_sets"""
symbols = {sym for rule in rules for sym in rule.expansion} | {rule.origin for rule in rules}
symbols.add('$root') # what about other unused rules?
# foreach grammar rule X ::= Y(1) ... Y(k)
# if k=0 or {Y(1),...,Y(k)} subset of NULLABLE then
# NULLABLE = NULLABLE union {X}
# for i = 1 to k
# if i=1 or {Y(1),...,Y(i-1)} subset of NULLABLE then
# FIRST(X) = FIRST(X) union FIRST(Y(i))
# for j = i+1 to k
# if i=k or {Y(i+1),...Y(k)} subset of NULLABLE then
# FOLLOW(Y(i)) = FOLLOW(Y(i)) union FOLLOW(X)
# if i+1=j or {Y(i+1),...,Y(j-1)} subset of NULLABLE then
# FOLLOW(Y(i)) = FOLLOW(Y(i)) union FIRST(Y(j))
# until none of NULLABLE,FIRST,FOLLOW changed in last iteration
NULLABLE = set()
FIRST = {}
FOLLOW = {}
for sym in symbols:
FIRST[sym]={sym} if is_terminal(sym) else set()
FOLLOW[sym]=set()
changed = True
while changed:
changed = False
for rule in rules:
if set(rule.expansion) <= NULLABLE:
if update_set(NULLABLE, {rule.origin}):
changed = True
for i, sym in enumerate(rule.expansion):
if set(rule.expansion[:i]) <= NULLABLE:
if update_set(FIRST[rule.origin], FIRST[sym]):
changed = True
if i==len(rule.expansion)-1 or set(rule.expansion[i:]) <= NULLABLE:
if update_set(FOLLOW[sym], FOLLOW[rule.origin]):
changed = True
for j in range(i+1, len(rule.expansion)):
if set(rule.expansion[i+1:j]) <= NULLABLE:
if update_set(FOLLOW[sym], FIRST[rule.expansion[j]]):
changed = True
return FIRST, FOLLOW, NULLABLE
class GrammarAnalyzer(object):
def __init__(self, rule_tuples, start_symbol, debug=False):
self.start_symbol = start_symbol
self.debug = debug
rule_tuples = list(rule_tuples)
rule_tuples.append(('$root', [start_symbol, '$end']))
rule_tuples = [(t[0], t[1], None, None) if len(t)==2 else t for t in rule_tuples]
self.rules = set()
self.rules_by_origin = {o: [] for o, _x, _a, _opt in rule_tuples}
for origin, exp, alias, options in rule_tuples:
r = Rule( origin, exp, alias, options )
self.rules.add(r)
self.rules_by_origin[origin].append(r)
for r in self.rules:
for sym in r.expansion:
if not (is_terminal(sym) or sym in self.rules_by_origin):
raise GrammarError("Using an undefined rule: %s" % sym)
self.init_state = self.expand_rule(start_symbol)
self.FIRST, self.FOLLOW, self.NULLABLE = calculate_sets(self.rules)
def expand_rule(self, rule):
"Returns all init_ptrs accessible by rule (recursive)"
init_ptrs = set()
def _expand_rule(rule):
assert not is_terminal(rule), rule
for r in self.rules_by_origin[rule]:
init_ptr = RulePtr(r, 0)
init_ptrs.add(init_ptr)
if r.expansion: # if not empty rule
new_r = init_ptr.next
if not is_terminal(new_r):
yield new_r
_ = list(bfs([rule], _expand_rule))
return fzset(init_ptrs)
def _first(self, r):
if is_terminal(r):
return {r}
else:
return {rp.next for rp in self.expand_rule(r) if is_terminal(rp.next)}
|
py | 7dff9de5521b475d86b1a935f789b290987040fa | import warnings
# Copyright (c) 2012-2013, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
from aws import Action as BaseAction
from aws import BaseARN
service_name = 'Amazon S3'
prefix = 's3'
class Action(BaseAction):
def __init__(self, action=None):
sup = super(Action, self)
sup.__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource='', region='', account=''):
sup = super(ARN, self)
sup.__init__(service=prefix, resource=resource, region=region,
account=account)
class S3_ARN(ARN):
def __init__(self, *args, **kwargs):
super(S3_ARN, self).__init__(*args, **kwargs)
warnings.warn("This class is going away. Use s3.ARN instead.",
FutureWarning)
AbortMultipartUpload = Action('AbortMultipartUpload')
CreateBucket = Action('CreateBucket')
DeleteBucket = Action('DeleteBucket')
DeleteBucketPolicy = Action('DeleteBucketPolicy')
DeleteBucketWebsite = Action('DeleteBucketWebsite')
DeleteObject = Action('DeleteObject')
DeleteObjectVersion = Action('DeleteObjectVersion')
GetAccelerateConfiguration = Action('GetAccelerateConfiguration')
GetBucketAcl = Action('GetBucketAcl')
GetBucketCORS = Action('GetBucketCORS')
GetBucketLocation = Action('GetBucketLocation')
GetBucketLogging = Action('GetBucketLogging')
GetBucketNotification = Action('GetBucketNotification')
GetBucketPolicy = Action('GetBucketPolicy')
GetBucketRequestPayment = Action('GetBucketRequestPayment')
GetBucketTagging = Action('GetBucketTagging')
GetBucketVersioning = Action('GetBucketVersioning')
GetBucketWebsite = Action('GetBucketWebsite')
GetLifecycleConfiguration = Action('GetLifecycleConfiguration')
GetObject = Action('GetObject')
GetObjectAcl = Action('GetObjectAcl')
GetObjectTagging = Action('GetObjectTagging')
GetObjectTorrent = Action('GetObjectTorrent')
GetObjectVersion = Action('GetObjectVersion')
GetObjectVersionAcl = Action('GetObjectVersionAcl')
GetObjectVersionTagging = Action('GetObjectVersionTagging')
GetObjectVersionTorrent = Action('GetObjectVersionTorrent')
GetReplicationConfiguration = Action('GetReplicationConfiguration')
ListAllMyBuckets = Action('ListAllMyBuckets')
ListBucket = Action('ListBucket')
ListBucketMultipartUploads = Action('ListBucketMultipartUploads')
ListBucketVersions = Action('ListBucketVersions')
ListMultipartUploadParts = Action('ListMultipartUploadParts')
ObjectOwnerOverrideToBucketOwner = \
Action('ObjectOwnerOverrideToBucketOwner')
PutAccelerateConfiguration = Action('PutAccelerateConfiguration')
PutBucketAcl = Action('PutBucketAcl')
PutBucketCORS = Action('PutBucketCORS')
PutBucketLogging = Action('PutBucketLogging')
PutBucketNotification = Action('PutBucketNotification')
PutBucketPolicy = Action('PutBucketPolicy')
PutBucketRequestPayment = Action('PutBucketRequestPayment')
PutBucketTagging = Action('PutBucketTagging')
PutBucketVersioning = Action('PutBucketVersioning')
PutBucketWebsite = Action('PutBucketWebsite')
PutLifecycleConfiguration = Action('PutLifecycleConfiguration')
PutObject = Action('PutObject')
PutObjectAcl = Action('PutObjectAcl')
PutObjectTagging = Action('PutObjectTagging')
PutObjectVersionAcl = Action('PutObjectVersionAcl')
PutObjectVersionTagging = Action('PutObjectVersionTagging')
PutReplicationConfiguration = Action('PutReplicationConfiguration')
ReplicateDelete = Action('ReplicateDelete')
ReplicateObject = Action('ReplicateObject')
ReplicateTags = Action('ReplicateTags')
RestoreObject = Action('RestoreObject')
|
py | 7dff9e696f6fe91d148267437c2b6fa9ebebebc8 | # -*- coding: utf-8 -*-
# This file sets up a shortcut for importing so that you can do...
# from simmate.workflows.relaxation.all import mit_relaxation, mp_relaxation, ...
# instead of what's written below. You should only use this shortcut if you are
# using ALL of the classes below or if you are running some quick interactive test.
from simmate.calculators.vasp.workflows.energy.all import energy_mit, energy_quality04
|
py | 7dff9e7a7abbcca598d9db39e7d2eb59e59efff6 | from os import path
print('_________________________________')
check1 = 'na na boo boo'
file1 = input('Enter file name: ')
file1 = file1.lower()
if path.exists(file1):
file2 = open(file1)
for line in file2:
line = file2.readlines()
line = len(line)
print('There are ',line,'lines in the file')
elif file1 == check1:
print('NA NA BOO BOO - YOU HAVE BEEN PRANKED')
else:
print("File doesn't exist") |
py | 7dff9ee7dbf9d563e905f1ea680818307bb25ef5 | # exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('C6A', ['C8pro'])
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'SmacM'])
Monomer('Ligand', ['Receptor'])
Monomer('C6pro', ['C3A'])
Monomer('ParpU', ['C3A'])
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('BidM', ['BaxM'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('Xiap', ['SmacC', 'C3A'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C3ub')
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('C3pro', ['C8A'])
Monomer('SmacM', ['BaxA'])
Monomer('SmacC', ['Xiap'])
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('ParpC')
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('C6A_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('Ligand_0', 1000.0)
Parameter('C6pro_0', 100.0)
Parameter('ParpU_0', 1000000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('BaxM_0', 40000.0)
Parameter('C8A_0', 0.0)
Parameter('Xiap_0', 180750.0)
Parameter('Receptor_0', 100.0)
Parameter('C3ub_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('C3pro_0', 21000.0)
Parameter('SmacM_0', 100000.0)
Parameter('SmacC_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('ParpC_0', 0.0)
Observable('C6A_obs', C6A())
Observable('BaxA_obs', BaxA())
Observable('Ligand_obs', Ligand())
Observable('C6pro_obs', C6pro())
Observable('ParpU_obs', ParpU())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('BidM_obs', BidM())
Observable('BaxM_obs', BaxM())
Observable('C8A_obs', C8A())
Observable('Xiap_obs', Xiap())
Observable('Receptor_obs', Receptor())
Observable('C3ub_obs', C3ub())
Observable('Fadd_obs', Fadd())
Observable('C3pro_obs', C3pro())
Observable('SmacM_obs', SmacM())
Observable('SmacC_obs', SmacC())
Observable('C8pro_obs', C8pro())
Observable('ParpC_obs', ParpC())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5) % SmacM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(C6A(C8pro=None), C6A_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None), BaxA_0)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(C6pro(C3A=None), C6pro_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(Xiap(SmacC=None, C3A=None), Xiap_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C3ub(), C3ub_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(C3pro(C8A=None), C3pro_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(ParpC(), ParpC_0)
|
py | 7dff9f6bc6ccd2bd7ee3ef67b83adfbe7355139c | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = u'AFL'
copyright = u' 2019, Google'
author = u'Michal Zalewski and Contributors'
# The short X.Y version
version = u'2.53'
# The full version, including alpha/beta/rc tags
release = u'2.53b'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'AFLdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'AFL.tex', u'AFL Documentation',
u'Michal Zalewski', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'afl', u'AFL Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'AFL', u'AFL Documentation',
author, 'AFL', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
html_theme_options = {
'logo': 'logo.gif',
'logo_name': 'true',
'description': 'american fuzzy lop',
'github_user': 'google',
'github_repo': 'AFL',
'github_button': 'true',
'travis_button': 'true',
'sidebar_collapse': False
}
default_role = 'command'
|
py | 7dffa0076580e7f06111b0869f30fc7366444cdf | """Instantiate the Report Dash application.
"""
import dash
from dash import dcc
from dash import html
from dash import dash_table
from .data import read_stats
from .layout import html_layout
def init_report(server):
"""Create a Plotly Dash dashboard.
:param server: Flask server.
:type server: Flask
:returns: Dash app server.
:rtype: Dash
"""
dash_app = dash.Dash(
server=server,
routes_pathname_prefix=u"/report/",
external_stylesheets=[
u"/static/dist/css/styles.css",
u"https://fonts.googleapis.com/css?family=Lato",
],
)
# Custom HTML layout
dash_app.index_string = html_layout
# Create Layout
dash_app.layout = html.Div(
children=[
html.Div(
children=create_data_table(
read_stats().dropna(),
u"database-table-stats"
)
)
],
id=u"dash-container",
)
return dash_app.server
def create_data_table(df, id):
"""Create Dash datatable from Pandas DataFrame.
DEMO
"""
table = dash_table.DataTable(
id=id,
columns=[{u"name": i, u"id": i} for i in df.columns],
data=df.to_dict(u"records"),
fixed_rows={'headers': True},
sort_action=u"native",
sort_mode=u"native",
page_size=5,
style_header={
'overflow': 'hidden',
'textOverflow': 'ellipsis',
'minWidth': 95, 'maxWidth': 95, 'width': 95,
}
)
return table
|
py | 7dffa2bab66c19e0232846afbbe04f15a8e381e1 | #!/usr/bin/python
import sqlite3
import os
import shutil
import sys
import lxml.etree as lxml
import argparse as ap
from xtp__cluster__ import *
parser=ap.ArgumentParser(description="Starts multiple zmultipole jobs for one sqlfile")
parser.add_argument("-o","--options",type=str,required=True,help="Optionfile")
parser.add_argument("-f","--system",type=str,required=True,help="*.sql file")
parser.add_argument("--foldername",type=str,default="ZMULTIPOLE",help="folder for work")
parser.add_argument("--noofjobs",type=int,default=6,help="Number of jobs to submit to the cluster")
parser.add_argument("--startseg",type=int,default=1,help="Segment to start at")
parser.add_argument("--endseg",type=int,default=-1,help="Segment to end at")
parser.add_argument("--sub",action='store_const', const=1, default=0,help="Submit jobs to cluster")
args=parser.parse_args()
foldername=args.foldername
calculator="zmultipole"
def getnumberofsegments(sysfile):
sqlstatement = "SELECT seq FROM sqlite_sequence WHERE name='segments'"
con = sqlite3.connect(sysfile)
with con:
cur = con.cursor()
cur.execute(sqlstatement)
rows = cur.fetchall()
noofseg=int(rows[0][0])
return noofseg
def readoptionsfile(optionfile):
parser=lxml.XMLParser(remove_comments=True)
tree = lxml.parse(optionfile,parser)
root = tree.getroot()
for i in root: #deletes all elements which do not belong to calculator
if i.tag!=calculator:
root.remove(i)
mappath=(root.find("{}/multipoles".format(calculator))).text #find path of map.xml
mappath=os.path.realpath(mappath)
return root,mappath
def writeoptionsfile(optionssketch,firstseg,lastseg,datfile,xmlfile,mappath):
print "Writing optionsfile {}".format(xmlfile)
(optionssketch.find("{}/multipoles".format(calculator))).text=mappath
(optionssketch.find("{}/control/first".format(calculator))).text=str(firstseg)
(optionssketch.find("{}/control/last".format(calculator))).text=str(lastseg)
(optionssketch.find("{}/control/output".format(calculator))).text=datfile
with open(xmlfile, 'w') as f:
f.write(lxml.tostring(optionssketch, pretty_print=True))
def setupjobs(optionfile,startseg,endseg,noofjobs,sysfile):
segtocalculate=(endseg-startseg+1)
segmentsperjob=segtocalculate/noofjobs
while segmentsperjob*noofjobs<segtocalculate:
segmentsperjob+=1
if segmentsperjob<17:
noofjobs=(segtocalculate)/16
print "The number of jobs is too high. Each processor has less than two jobs to do. The number of jobs is set to {}.".format(noofjobs)
segmentsperjob=16
#if segmentsperjob>32:
# noofjobs=(segtocalculate)/32
# print "The number of of segments per node is too high. The job will probably hit the walltime. The number of jobs is set to {}.".format(noofjobs)
print "Each job calculates {} segments.".format(segmentsperjob)
optionsketch,mappath=readoptionsfile(optionfile)
createfolder(foldername)
sqlfile=os.path.realpath(sysfile)
os.chdir(foldername)
os.system("ln -s ../MP_FILES")
for jobid in range(1,noofjobs+1):
firstseg=(jobid-1)*segmentsperjob+1
if jobid<noofjobs:
lastseg=firstseg+segmentsperjob-1
elif jobid==noofjobs:
lastseg=endseg
else:
"Something is smelly here. Exiting"
filekey="_job{:02d}_seg{:03d}-{:03d}".format(jobid,firstseg,lastseg)
#print firstseg,lastseg
xmlfile="option{}.xml".format(filekey)
logfile="log{}.txt".format(filekey)
subfile="sub{}.sh".format(filekey)
datfile="e_sites{}.dat".format(filekey)
writeoptionsfile(optionsketch,firstseg,lastseg,datfile,xmlfile,mappath)
runcommand="(ctp_run -e {} -o {} -f {} -t 8 -s \"0\" >{} )&& echo \"Done with all jobs.\"".format(calculator,xmlfile,sqlfile,logfile)
submitscript(subfile,jobid,runcommand)
os.system("chmod 755 \"{}\"".format(subfile))
if args.sub:
os.system("qsub {}".format(subfile))
os.chdir("..")
def createfolder(foldername):
i=1
temp=foldername
while os.path.isdir(foldername):
self.foldername=temp+str(i)
i+=1
os.mkdir(foldername)
print "creating folder {}".format(foldername)
def submitscript(filename,jobid,runcommand):
write_cluster_batch(runcommand,"zmultipole_{}".format(jobid),outfile=filename,module=["gaussian/g03","votca/icc_cluster"],source=False,rsync="*'job{:02d}'*".format(jobid))
if args.endseg<0:
args.endseg=getnumberofsegments(args.system)
setupjobs(args.options,args.startseg,args.endseg,args.noofjobs,args.system)
|
py | 7dffa481a035c08c80251f866316d1cdf4855493 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import codecs
import multiprocessing
from os import path
import re
import shutil
try:
import simplejson as json
except ImportError:
import json
special_case_namespaces_path = path.join(path.dirname(path.abspath(__file__)), 'special_case_namespaces.json')
class DependencyPreprocessor(object):
def __init__(self, descriptors, temp_frontend_path, devtools_frontend_path):
self.descriptors = descriptors
self.temp_frontend_path = temp_frontend_path
self.module_descriptors = descriptors.modules
self.modules = set(self.descriptors.sorted_modules())
shutil.copytree(devtools_frontend_path, self.temp_frontend_path)
with open(special_case_namespaces_path) as json_file:
self._special_case_namespaces = json.load(json_file)
def enforce_dependencies(self):
arg_list = []
for module in self.modules:
dependencies = set(self.descriptors.sorted_dependencies_closure(module))
excluded_modules = self.modules - {module} - dependencies
excluded_namespaces = [self._map_module_to_namespace(m) for m in excluded_modules]
file_paths = [path.join(self.temp_frontend_path, module, file_name)
for file_name in self.descriptors.module_compiled_files(module)]
arg = {
'excluded_namespaces': excluded_namespaces,
'file_paths': file_paths,
}
arg_list.append(arg)
parallelize(poison_module, arg_list)
def _map_module_to_namespace(self, module):
return self._special_case_namespaces.get(module, self._to_camel_case(module))
def _to_camel_case(self, snake_string):
components = snake_string.split('_')
return ''.join(x.title() for x in components)
def poison_module(target):
excluded_namespaces = target['excluded_namespaces']
file_paths = target['file_paths']
for file_path in file_paths:
with codecs.open(file_path, 'r', 'utf-8') as file:
file_contents = file.read()
file_contents = poison_contents_for_namespaces(file_contents, excluded_namespaces)
with codecs.open(file_path, 'w', 'utf-8') as file:
file.write(file_contents)
def poison_contents_for_namespaces(file_contents, namespaces):
# Technically, should be [^.]\s*\b + NAMESPACES + \b\s*[^:]
# but we rely on clang-format to format like this:
# SomeModule
# .Console
regex = r'([^.]\b)(' + '|'.join(namespaces) + r')(\b[^:])'
replace = r'\1$$UndeclaredDependency_\2$$\3'
return re.sub(regex, replace, file_contents)
def parallelize(fn, arg_list):
number_of_processes = min(multiprocessing.cpu_count(), 8)
pool = multiprocessing.Pool(number_of_processes)
pool.map(fn, arg_list)
pool.close()
pool.join()
|
py | 7dffa75ad0fd2f16dfdca9b5a02024322865667a | """
A non-python example, with tests for IRKernel (irkernel.github.io).
(Beware of python quoting/string escaping rules being different to the
language being tested)
"""
import unittest
import shutil
from jupyter_client.kernelspec import NoSuchKernel
import jupyter_kernel_test as jkt
class XeusClingKernelTests(jkt.KernelTests):
kernel_name = "xcpp17"
@classmethod
def setUpClass(cls):
try:
cls.km, cls.kc = jkt.start_new_kernel(kernel_name=cls.kernel_name)
except NoSuchKernel:
raise unittest.SkipTest('Xeus-Cling Kernel not installed')
language_name = "c++"
file_extension = ".cpp"
code_hello_world = '#include <iostream>\nstd::cout << "hello, world!" << std::endl;'
code_stderr = '#include <iostream>\nstd::cerr << "some error" << std::endl;'
complete_code_samples = ['1', "int j=5"]
incomplete_code_samples = ["double sqr(double a"]
code_generate_error = 'throw std::runtime_error("Unknown exception");'
code_execute_result = [
{'code': 'int j = 5;j', 'result': "5"},
]
if __name__ == '__main__':
unittest.main()
|
py | 7dffa7a7825319c9eebf0c0944d5be1536f27905 | import sys
sys.path.append('Project')
from input_library import *
path_external = 'Dataset'
global date_check
date_check = ['2020-12-10', '2020-12-11', '2020-12-12', '2020-12-13', '2020-12-14', '2020-12-15', '2020-12-16',
'2020-12-17', '2020-12-18']
f_save = open('Dataset/MSA_demographics.pkl', 'rb')
MSA_statistics = pickle.load(f_save)
f_save.close()
MSA_all = pandas.read_csv(path_external + 'covid19-intervention-data/MSA_summary_indicators.csv')
MSA_all = MSA_all[MSA_all['date'].isin(date_check)]
def func_beta(x, a, b, c):
return (a * x[0] + 1) * (b * x[1] + c * x[2] + 1) - 1
def func_delta(x, a, b, c):
return (a * x[0] + 1) * (b * x[1] + c * x[2] + 1) - 1
def vaccination_sigmoid(n, cap, n_max):
if cap==0.9:
sigma_dict = {120: 6, 180: 5, 270: 3.5, 360: 2.8}
if cap==0.8:
sigma_dict = {120: 5.8, 180: 4.6, 270: 3.3, 360: 2.7}
if cap==0.7:
sigma_dict = {120: 5.4, 180: 4.3, 270: 3.1, 360: 2.6}
if cap==0.6:
sigma_dict = {120: 5.2, 180: 4.0, 270: 2.9, 360: 2.4}
if cap==0.5:
sigma_dict = {120: 5.1, 180: 3.9, 270: 2.8, 360: 2.2}
if cap==0.4:
sigma_dict = {120: 5.0, 180: 3.5, 270: 2.7, 360: 1.8}
if cap==0.3:
sigma_dict = {120: 4.5, 180: 3.0, 270: 2.0, 360: 1.0}
if cap==0.2:
sigma_dict = {120: 3.0, 180: 1.5, 270: 0.8, 360: 0.4}
if cap==0.1:
sigma_dict = {120: 0.5, 180: 0.1, 270: 0.1, 360: 0.1}
sigma_dict = {120: 6, 180: 5, 270: 3.5, 360: 2.8}
result = trunced_normal_distribution(0, n, sigma_dict[n])
y = [result[0]] + [result[i + 1] - result[i] for i in range(n - 1)]
y = [i * cap for i in y] + [0 for i in range(n_max - n)]
return y
def vaccination_linear(n, cap, n_max):
each = cap / n
y = [each for i in range(n)] + [0 for i in range(n_max - n)]
return y
def SIRD(t, y):
S = y[0]
I = y[1]
R = y[2]
D = y[3]
# print(y[0],y[1],y[2])
u = y[4]
beta = y[5]
gamma = y[6]
delta = y[7]
return ([-beta * S * I + u * (S + I + R) - u * S, beta * S * I - gamma * I - delta * I - u * I, gamma * I - u * R,
delta * I, 0, 0, 0, 0])
def zero_infection_days(list_temp, pop_sum, run_days):
newlist = [i if i > 0 else 0 for i in list_temp]
for i in range(0, run_days - 90):
sum_infection = np.sum(newlist[i:i + 90])
if sum_infection <= 1:
return "true", i
break
return "false", run_days - 1
def scenario_test(period, run_days, case, vaccination_type, path_files):
MSA_beta = pd.read_csv('results_interventions/reported/MSA_impact_coeffecient_beta(t).csv')
MSA_delta = pd.read_csv('results_interventions/reported/MSA_impact_coeffecient_delta(t).csv')
df_result = pd.DataFrame(
columns=['vaccination', 'MSA_code', 'zero_new_infection', 'days', 'accumulated I', 'accumulated R',
'accumulated D', 'sum_pop'])
count_x = 0
for vaccination in [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]: #
print(vaccination)
if vaccination_type == 'linear':
vaccine_list = vaccination_linear(period, vaccination, run_days)
if vaccination_type == 'sigmoid':
vaccine_list = vaccination_sigmoid(period, vaccination, run_days)
for msa in MSA_beta['MSA_code']:
# if msa in [35620,31080, 16980, 19100, 26420, 47900, 33100, 37980, 12060, 38060, 14460, 41860, 40140,19820]:#
MSA_all_temp = MSA_all[MSA_all['MSA_code'] == int(msa)]
Sd = MSA_all_temp['S(t)'].values[-1]
Id = MSA_all_temp['I(t)'].values[-1]
Rd = MSA_all_temp['R(t)'].values[-1]
Dd = MSA_all_temp['D(t)'].values[-1]
sum_pop = (Sd + Id + Rd + Dd)
u = MSA_statistics['birth_death_rate'][msa]
beta_0 = MSA_all_temp['beta_0'].values[0]
delta_0 = MSA_all_temp['delta_0'].values[0]
gamma = np.mean(MSA_all_temp['gamma(t)'].values)
beta = np.mean(MSA_all_temp['beta(t)'].values)
delta = np.mean(MSA_all_temp['delta(t)'].values)
a = MSA_beta[MSA_beta['MSA_code'] == msa]['ratio of excessive time at home'].values
b = MSA_beta[MSA_beta['MSA_code'] == msa]['ratio of people wearing face masks'].values
c = MSA_beta[MSA_beta['MSA_code'] == msa]['ratio of people taking testing'].values
d = MSA_delta[MSA_delta['MSA_code'] == msa]['ratio of excessive time at home'].values
e = MSA_delta[MSA_delta['MSA_code'] == msa]['ratio of people wearing face masks'].values
f = MSA_delta[MSA_delta['MSA_code'] == msa]['ratio of people taking testing'].values
stay_at_home = np.mean(MSA_all_temp['ratio of excessive time at home'].values)
facemask = np.mean(MSA_all_temp['ratio of people wearing face masks'].values)
testing = np.mean(MSA_all_temp['ratio of people taking testing'].values)
if beta < 0:
beta = 0
if delta < 0:
delta = 0
print(msa, Sd, Id, Rd, Dd, stay_at_home, facemask, testing, beta, gamma, delta)
[S_current, I_current, R_current, D_current] = [Sd / sum_pop, Id / sum_pop, Rd / sum_pop, Dd / sum_pop]
df_infection = pd.DataFrame(
columns=['date', 'S', 'I', 'R', 'D', 'newI', 'beta', 'gamma', 'delta', 'vaccination'])
count = 0
for vaccinex in vaccine_list:
# print('vaccinex',vaccinex)
[S_current_old, I_current_old, R_current_old, D_current_old] = [S_current, I_current, R_current,
D_current]
I_old_sum = I_current_old + R_current_old + D_current_old
if (S_current - vaccinex) > 0:
S_current = S_current - vaccinex * 0.9
else:
S_current = 0
sol = solve_ivp(SIRD, [0, 1], [S_current, I_current, R_current, D_current] + [u, beta, gamma, delta],
t_eval=np.arange(0, 1 + 0.2, 0.2))
[S_current, I_current, R_current, D_current] = [sol.y[0][-1], sol.y[1][-1], sol.y[2][-1], sol.y[3][-1]]
I_sum = I_current + R_current + D_current
df_infection.loc[count] = [count] + [S_current * sum_pop, I_current * sum_pop, R_current * sum_pop,
D_current * sum_pop, (I_sum - I_old_sum) * sum_pop, beta, gamma,
delta, vaccinex]
count += 1
df_infection.to_csv(path_external + 'temp/' + str(msa) + case + str(vaccination) + ".csv")
list_tempx = [x for x in (df_infection['newI'])]
judge, day = zero_infection_days(list_tempx, sum_pop, run_days)
if judge == 'true':
df_result.loc[count_x] = [vaccination, msa, judge, day, df_infection['I'].values[day],
df_infection['R'].values[day], df_infection['D'].values[day], sum_pop]
print('true',
[vaccination, msa, judge, day, df_infection['I'].values[day], df_infection['R'].values[day],
df_infection['D'].values[day], sum_pop])
else:
print('false',
[vaccination, msa, judge, day, df_infection['I'].values[day], df_infection['R'].values[day],
df_infection['D'].values[day], sum_pop])
df_result.loc[count_x] = [vaccination, msa, judge, day, df_infection['I'].values[day],
df_infection['R'].values[day], df_infection['D'].values[day], sum_pop]
count_x += 1
df_result.to_csv(path_files + case + '-vaccination-' + str(period) + '.csv')
def plot_senario(df_result, case, path_files, period_day):
# df_result['accumulated I']=[i+j+k for i,j,k in zip(df_result['accumulated I'],df_result['accumualted R'],df_result['accumualted D'])]
# df_result['accumulated D'] = [i * df_result['sum_pop'].values[0] for i in df_result['accumulated D']]
dict_temp = {}
for msa in pd.unique(MSA_all['MSA_code']):
Sd = MSA_all[MSA_all['MSA_code'] == int(msa)]['S(t)'].values[-1]
Id = MSA_all[MSA_all['MSA_code'] == int(msa)]['I(t)'].values[-1]
Rd = MSA_all[MSA_all['MSA_code'] == int(msa)]['R(t)'].values[-1]
Dd = MSA_all[MSA_all['MSA_code'] == int(msa)]['D(t)'].values[-1]
sum_pop = MSA_all[MSA_all['MSA_code'] == int(msa)]['S(t)'].values[0]
dict_temp[msa] = [Sd, Id, Rd, Dd]
for index, row in df_result.iterrows():
msa_temp = row['MSA_code']
print(msa_temp)
df_result['accumulated I'][index] = (df_result['accumulated I'][index] + df_result['accumualted R'][index] +
df_result['accumualted D'][index]) \
- row['vaccination'] * sum_pop - (
dict_temp[msa_temp][1] + dict_temp[msa_temp][2] +
dict_temp[msa_temp][3])
df_result['accumualted R'][index] = df_result['accumualted R'][index] - dict_temp[msa_temp][2]
df_result['accumualted D'][index] = df_result['accumualted D'][index] - dict_temp[msa_temp][3]
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(12, 4))
sns.stripplot(x='vaccination', y='days', jitter=True, split=True, linewidth=0.1, alpha=0.1, data=df_result, size=3,
palette="Blues", ax=ax1)
sns.boxplot(x='vaccination', y='days', data=df_result, showfliers=False, showmeans=False, palette="Blues", ax=ax1)
ax1.set_ylabel("days needed for zero infection")
sns.stripplot(x='vaccination', y='accumulated I', jitter=True, split=True, linewidth=0.1, alpha=0.1, data=df_result,
size=3,
palette="Blues", ax=ax2)
sns.boxplot(x='vaccination', y='accumulated I', data=df_result, showfliers=False, showmeans=False, palette="Blues",
ax=ax2)
ax2.set_ylabel("additional infected cases")
sns.stripplot(x='vaccination', y='accumualted D', jitter=True, split=True, linewidth=0.1, alpha=0.1, data=df_result,
size=3,
palette="Blues", ax=ax3)
sns.boxplot(x='vaccination', y='accumualted D', data=df_result, showfliers=False, showmeans=False, palette="Blues",
ax=ax3)
ax3.set_ylabel("additional dead cases")
plt.tight_layout()
fig.savefig(path_files + case + str(period_day) + ".png", dip=600)
plot_senario_each_msa(df_result, case)
def plot_senario_each_msa(df_result, case):
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(12, 4))
for msa in df_result['MSA_code']:
df_temp = df_result[df_result['MSA_code'] == msa]
df_temp = df_temp.sort_values(by=['vaccination'])
ax1.plot(df_temp['vaccination'], df_temp['days'], color='#9ecae1', alpha=0.2, linewidth=1)
ax2.plot(df_temp['vaccination'], df_temp['accumulated I'], color='#9ecae1', alpha=0.2, linewidth=1)
ax3.plot(df_temp['vaccination'], df_temp['accumualted D'], color='#9ecae1', alpha=0.2, linewidth=1)
ax1.set_ylabel("days needed for zero infection")
ax2.set_ylabel("accumulated infected cases")
ax3.set_ylabel("accumulated dead cases")
plt.tight_layout()
fig.savefig('analysis-results/scenarios/' + case + "(each_MSA).png", dip=600)
def adoption_figure_linear(n, path_file):
###take 0.7 as an example
list_temp = vaccination_linear(n, 0.7, 270)
list_temp = [np.sum(list_temp[0:i]) for i in range(len(list_temp))]
fig = plt.figure(figsize=(4, 3))
ax = fig.add_subplot(111)
# ax.plot([i for i in range(n)],list_k,color='blue')
ax.axvline(180, color='black', linewidth=1)
ax.plot([i for i in range(270)], list_temp, color='#a50f15', linestyle='-')
ax.set_xticks([0, 90, 180, 270])
ax.set_xticklabels([' ', '', 'x months', ''])
ax.set_yticks([0, 0.7])
ax.set_yticklabels([' ', 'maximum ratio of' + '\n' + ' people full vaccinated'], rotation=90, fontsize=8)
plt.tight_layout()
fig.savefig(path_file + "adotpionfigure_linear.png", dpi=600)
def trunced_normal_distribution(lower_bar, upper_bar, sigma_range):
mu = int((upper_bar - lower_bar) / 2)
sigma = (upper_bar - lower_bar) / sigma_range
s = [1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(- (bins - mu) ** 2 / (2 * sigma ** 2)) for bins in
range(lower_bar, upper_bar)]
s1 = np.cumsum([1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(- (bins - mu) ** 2 / (2 * sigma ** 2)) for bins in
range(lower_bar, upper_bar)])
result = np.cumsum([i for i in s])
result = [i / np.max(result) for i in result]
return result
def adoption_real_data():
from sklearn.metrics import mean_squared_error
as_of_days = 36
vaccine_df = pd.read_csv(path_external + "covid19-intervention-data/vaccination/20200221US_vaccination_SUM.csv")
list_df = [i / 328200000 for i in vaccine_df['people_fully_vaccinated'].values]
fig = plt.figure(figsize=(4, 3))
ax = fig.add_subplot(111)
cap=0.9
color_dict = {120: "#c6dbef", 180: '#4292c6', 270: '#08519c', 360: '#08306b'}
if cap==0.9:
sigma_dict = {120: 6, 180: 5, 270: 3.5, 360: 2.8}
if cap==0.8:
sigma_dict = {120: 5.8, 180: 4.6, 270: 3.3, 360: 2.7}
if cap==0.7:
sigma_dict = {120: 5.4, 180: 4.3, 270: 3.1, 360: 2.6}
if cap==0.6:
sigma_dict = {120: 5.2, 180: 4.0, 270: 2.9, 360: 2.4}
if cap==0.5:
sigma_dict = {120: 5.1, 180: 3.9, 270: 2.8, 360: 2.2}
if cap==0.4:
sigma_dict = {120: 5.0, 180: 3.5, 270: 2.7, 360: 1.8}
if cap==0.3:
sigma_dict = {120: 4.5, 180: 3.0, 270: 2.0, 360: 1.0}
if cap==0.2:
sigma_dict = {120: 3.0, 180: 1.5, 270: 0.8, 360: 0.4}
if cap==0.1:
sigma_dict = {120: 0.5, 180: 0.1, 270: 0.1, 360: 0.1}
for day in [120, 180, 270, 360]:
results = trunced_normal_distribution(0, day, sigma_dict[day])
print(results)
results = [i * cap for i in results]
error = mean_squared_error(results[0:as_of_days], list_df, squared=False)
print(day, error)
results = list(results) + [results[-1] for i in range(480 - day)]
#ax.plot([i for i in range(0, len(results))], results, color=color_dict[day], linewidth=1,label="Fitted full vaccinated people in " + str(day) + " days")
plt.plot([i for i in range(0, as_of_days)], results[0:as_of_days], color='red')
plt.plot([i for i in range(0, as_of_days)], list_df, color='blue')
plt.show()
'''
ax.plot([i for i in range(0, as_of_days)], list_df, color='#ff7f00', linewidth=1,
label="Reported full vaccinated people till Feb-20-2021")
# plt.show()
ax.legend(loc=0, fontsize=7)
ax.set_xticks([0, 90, 180, 270, 360, 450])
ax.set_xticklabels([0, 90, 180, 270, 360, 450])
ax.set_yticks([0, 0.1, 0.9])
ax.set_yticklabels([' 0', '0.1', 'max'], fontsize=8)
plt.tight_layout()
fig.savefig(path_file + "adotpionfigure_sigmoid_all.png", dpi=600)
'''
if __name__ == '__main__':
f_save = open('Dataset/MSA_demographics.pkl', 'rb')
MSA_statistics = pickle.load(f_save)
f_save.close()
path_file = '/Users/luzhong/Documents/LuZHONGResearch/20200720COVID-Controllers/results_scenarios/'
###we have several intervention, stay at home, testing, facemask, vaccination
case='best_case_linear' ### immidiately
#case = 'best_case_sigmoid'
#case='adoption_figure'
run_days = 1000
if case == 'best_case_linear':
for finish_period in [120, 180, 270, 360]:
vaccination_type = 'linear'
scenario_test(finish_period, run_days, case, vaccination_type,
path_file) ###output: time needed for 1,0.1,0.01, 0,
df_result = pd.read_csv(path_file + case + '-vaccination-' + str(finish_period) + '.csv')
if case == 'best_case_sigmoid':
for finish_period in [120, 180, 270, 360]:
vaccination_type = 'sigmoid'
scenario_test(finish_period, run_days, case, vaccination_type,
path_file) ###output: time needed for 1,0.1,0.01, 0,
df_result = pd.read_csv(path_file + case + '-vaccination-' + str(finish_period) + '.csv')
if case == 'adoption_figure':
adoption_real_data()
# adoption_figure_linear(finish_period,path_file)
|
py | 7dffa7b0d6ca626e2865248289055f69363f5e6b | from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework import viewsets, mixins, status
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from core.models import Recipe, Tag, Ingredient
from recipe import serializers
class BaseRecipeAttributesViewSet(
viewsets.GenericViewSet,
mixins.ListModelMixin,
mixins.CreateModelMixin
):
"""Manage recipe attr in the database"""
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated, )
def get_queryset(self):
"""Return object for the current auth user only"""
assigned_only = bool(
int(self.request.query_params.get('assigned_only', 0))
)
queryset = self.queryset
if assigned_only:
queryset = queryset.filter(recipe__isnull=False)
return queryset.filter(
user=self.request.user
).order_by('-name').distinct()
def perform_create(self, serializer):
"""Create a new object"""
serializer.save(user=self.request.user)
class TagViewSet(BaseRecipeAttributesViewSet):
"""Manage tags in the database"""
queryset = Tag.objects.all()
serializer_class = serializers.TagSerializer
class IngredientViewSet(BaseRecipeAttributesViewSet):
"""Manage the ingredients in database"""
queryset = Ingredient.objects.all()
serializer_class = serializers.IngredientSerializer
class RecipeViewSet(viewsets.ModelViewSet):
"""Manage recipes in database"""
serializer_class = serializers.RecipeSerializer
queryset = Recipe.objects.all()
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated, )
def _params_to_ints(self, qs):
"""Convert a list of srting IDs to a list of integers"""
return [int(str_id) for str_id in qs.split(',')]
def get_queryset(self):
"""Retrieve the recipes for the authenticated user"""
tags = self.request.query_params.get('tags')
ingredients = self.request.query_params.get('ingredients')
queryset = self.queryset
if tags:
tag_ids = self._params_to_ints(tags)
queryset = queryset.filter(tags__id__in=tag_ids)
if ingredients:
ingredient_ids = self._params_to_ints(ingredients)
queryset = queryset.filter(ingredients__id__in=ingredient_ids)
return queryset.filter(user=self.request.user)
def get_serializer_class(self):
"""Return appropriate serializer class"""
if self.action == 'retrieve':
return serializers.RecipeDetailSerializer
elif self.action == 'upload_image':
return serializers.RecipeImageSerizlizer
return self.serializer_class
def perform_create(self, serializer):
"""Create a new recipe"""
serializer.save(user=self.request.user)
@action(methods=['POST'], detail=True, url_path='upload_image')
def upload_image(self, request, pk=None):
"""Upload an image to a recipe"""
recipe = self.get_object()
serializer = self.get_serializer(
recipe,
data=request.data
)
if serializer.is_valid():
serializer.save()
return Response(
serializer.data,
status=status.HTTP_200_OK
)
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
|
py | 7dffa7f44a5ffab5cd5767922aa150a53bc5fa5a | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
from future import standard_library
standard_library.install_aliases()
import re
import os
from datetime import datetime
from collections import OrderedDict, Counter
import codecs
from urllib.parse import urljoin
import json
import tempfile
import filecmp
from bs4 import BeautifulSoup
from lxml import etree
import lxml.html
import requests
from layeredconfig import LayeredConfig
from cached_property import cached_property
from rdflib import Literal, URIRef
from rdflib.namespace import DCTERMS
from ferenda import util, decorators
from ferenda.elements import Preformatted, Body
from ferenda import CompositeRepository, CompositeStore
from ferenda import TextReader, PDFAnalyzer
from ferenda import DocumentEntry, Facet, PDFDocumentRepository
from ferenda.pdfreader import StreamingPDFReader, Textbox
from . import (Trips, NoMoreLinks, Regeringen, Riksdagen,
SwedishLegalSource, SwedishLegalStore, RPUBL, Offtryck)
from .fixedlayoutsource import FixedLayoutStore, FixedLayoutSource
from .swedishlegalsource import lazyread, SwedishLegalStore
from .elements import Sidbrytning
def prop_sanitize_identifier(identifier):
if not identifier:
return identifier # allow infer_identifier to do it's magic later
if identifier.startswith("prop"):
identifier = util.ucfirst(identifier)
if identifier.startswith("PROP"):
identifier = identifier.replace("PROP", "Prop")
if identifier.startswith("Prop "):
identifier = identifier.replace("Prop ", "Prop. ")
if re.match("Prop\.\d{4}", identifier): # missing space
identifier = identifier.replace("Prop.", "Prop. ")
if "\xa0" in identifier: # Non-breakable space
identifier = identifier.replace("\xa0", " ")
if not identifier.startswith("Prop. "):
identifier = "Prop. " + identifier
# identify and correct the not-uncommon "2009/2010:87" pattern (should be 2009/10:87)
m = re.search(r"(\d{4})/(\d{4}):(\d+)$", identifier)
if m and m.group(2) != "2000" and int(m.group(1)) == int(m.group(2)) - 1:
identifier = identifier.replace(m.group(2), m.group(2)[-2:])
if not re.match(r"^Prop\. (19|20)\d{2}(|/\d{2}|/2000):(|B ?|U ?)[1-9]\d{0,2}$", identifier):
raise ValueError("Irregular identifier %s" % identifier)
return Literal(identifier)
class PropAnalyzer(PDFAnalyzer):
# NOTE: The cutoff used to be 0.5% but it turns out that in
# particular h2's can be quite rare, occuring maybe two times
# in an entire document.
style_significance_threshold = 0.001
@cached_property
def documents(self):
def boxmatch(page, textpattern, bb=None):
if bb is None:
bb = page.boundingbox(bottom=page.height / 5)
for box in bb:
m = re.match(textpattern, str(box))
if m:
return m.group(1)
return None
documents = []
mainstyles = Counter()
pagedims = {'pagewidth': util.TopCounter(),
'pageheight': util.TopCounter()}
currentappendix = None
for pageidx, page in enumerate(self.pdf):
styles = self.count_styles(pageidx, 1)
# find the most dominant style on the page. If it uses the
# EU font, it's a separate section.
if styles and styles.most_common(1)[0][0][0].startswith("EUAlbertina"):
currentdoc = 'eudok'
currentappendix = boxmatch(page, "Bilaga (\d)\s*$")
else:
# if there is a text box matching "Bilaga \d" in top
# margin and the bilagenummer is new and dominant
# style (family) is different from any of the
# top 3 currrent dominant styles:
#
# NOTE that normally we want to treat appendicies as
# part of the regular text (so that
# offtryck_parser.is_appendix et al can do their
# thing. This heuristic should only catch appendicies
# that are v. different.
appendix = boxmatch(page, "Bilaga (\d)\s*$")
if (appendix and
appendix != currentappendix and
styles.most_common(1) and
styles.most_common(1)[0][0][0] not in [x[0][0] for x in mainstyles.most_common(3)]):
currentdoc = 'appendix'
elif ".hocr." in self.pdf.filename:
# scanned sources have fluctuating page sizes,
# plus it's not possible to identify appendicies
# by differing page dimensions
currentdoc = "main"
elif pageidx == 0 and boxmatch(page, "(REGERINGENS PROPOSITION)", page.boundingbox(top=page.height * 0.8)):
currentdoc = "frontmatter"
else:
if (pagedims['pageheight'] and
(abs(pagedims['pageheight'].top() - page.height) > 1 or
abs(pagedims['pagewidth'].top() - page.width) > 1)):
# if the page dimensions suddenly change,
# that's a dead giveaway that some external
# appendix has been lifted right into the PDF
#
# But in some cases dimension change does NOT
# mean external appendix. In Prop 2015/16:195,
# which is split in 4 pdfs (2 logical volumes)
# it's just an artifact due to the 2nd pdf
# being properly cropped while the 1st
# isn't. In prop 2008/09:140, which
# uncharacteristically includes frontmatter, a
# dimension change signals the change from
# frontmatter to main
if currentdoc == "frontmatter":
currentdoc = "main"
else:
currentdoc = 'appendix'
else:
currentdoc = 'main'
currentappendix = appendix
if currentdoc == "main":
mainstyles += styles
pagedims['pagewidth'][page.width] += 1
pagedims['pageheight'][page.height] += 1
# update the current document segment tuple or start a new one
if documents and documents[-1][2] == currentdoc:
documents[-1][1] += 1
else:
documents.append([pageidx, 1, currentdoc])
return documents
def guess_pagenumber_select(self, candidates, probable_pagenumber):
if self.scanned_source:
# try to avoid assuming that smudges and crap equals
# lower-case L and other things that might be interpreted
# as roman numeral
if util.is_roman(candidates[0]) and str(probable_pagenumber) == "1":
return 1 # Do not interpret a single 'l' as roman 50
# -- it's probably a badly OCR:ed '
else:
# be a little more conservative with what a good guess
# is compared to PDFAnalyzer.guess_pagenumber_select:
# only accept the smallest candidate larger-or-equal
# to the probable_pagenumber -- but not if it's a
# too-large gap. Also, assume no roman numerals
try:
return next(c for c in sorted(candidates) if c >= probable_pagenumber and c <= probable_pagenumber * 2)
except StopIteration: # no suitable candidate
return None
# otherwise fall back to superclass implementation
return super(PropAnalyzer, self).guess_pagenumber_select(candidates, probable_pagenumber)
def guess_pagenumber_boxes(self, page):
"""Return a suitable number of textboxes to scan for a possible page number. """
if self.scanned_source:
# For scanned source, the default strategy works so-so
# (many OCR errors may result in misinterpreting things as
# pagenumbers) so we also take into account the text box
# property. Only select thin boxes (less than 1/50th of
# the page width) -- page numbers should stand by
# themselves and naturally be pretty thin
return [b for b in list(reversed(page))[:5] + list(page)[:5] if b.width < page.width/50]
else:
return super(PropAnalyzer, self).guess_pagenumber_boxes(page)
def metrics(self, metricspath=None, plotpath=None, startpage=0,
pagecount=None, force=False):
docsegments = self.documents
if len(docsegments) == 1:
return super(PropAnalyzer, self).metrics(metricspath,
plotpath,
startpage,
pagecount, force)
else:
r = []
exclude = []
mainidx = None
for idx, (startpage, pagecount, tag) in enumerate(docsegments):
r.append(super(PropAnalyzer,
self).metrics(startpage=startpage,
pagecount=pagecount))
if tag != 'main':
exclude.extend(list(range(startpage, startpage+pagecount)))
elif mainidx is None:
mainidx = idx
r[mainidx]['excludedpages'] = exclude
# since we don't pass metricspath to super().metrics, that
# func does not create a metrics.json cache file. So we
# generate that now (using the same data as we return)
util.ensure_dir(metricspath)
with open(metricspath, "w") as fp:
s = json.dumps(r[mainidx], indent=4, separators=(', ', ': '), sort_keys=True)
fp.write(s)
return r[mainidx]
def count_styles(self, startpage, pagecount):
# we should avoid counting the styles on the front page, as
# that page uses a title font, not used anywhere else in the
# document, which is then mistaken for the h1 font.
if not startpage:
startpage = 1
return super(PropAnalyzer, self).count_styles(startpage, pagecount)
class PropRegeringen(Regeringen):
alias = "propregeringen"
re_basefile_strict = re.compile(r'Prop. (\d{4}/\d{2,4}:\d+)')
re_basefile_lax = re.compile(
r'(?:Prop\.?|) ?(\d{4}/\d{2,4}:\d+)', re.IGNORECASE)
re_urlbasefile_strict = re.compile("proposition/\d+/\d+/[a-z]*\.?-?(\d{6})(\d+)-?/$")
re_urlbasefile_lax = re.compile("proposition/\d+/\d+/.*?(\d{4}_?\d{2})[_-]?(\d+)")
rdf_type = RPUBL.Proposition
document_type = Regeringen.PROPOSITION
# sparql_annotations = "sparql/prop-annotations.rq"
def attribs_from_url(self, url):
attribs = super(PropRegeringen, self).attribs_from_url(url)
# correct the not uncommon "2007/20:08123" -> "2007/2008:123" issue
total = attribs["rpubl:arsutgava"] + attribs["rpubl:lopnummer"]
if total.isdigit() and int(total[:4]) - int(total[4:8]) == - 1:
# convert to "2007/2008:123" and let santize_basefile make
# canonical (and warn). This way we don't need to
# specialcase "1999/2000:123"
attribs["rpubl:arsutgava"] = total[:8]
attribs["rpubl:lopnummer"] = total[8:]
y = attribs["rpubl:arsutgava"]
if "/" not in y:
attribs['rpubl:arsutgava'] = "%s/%s" % (y[:4], y[4:])
return attribs
def sanitize_identifier(self, identifier):
return prop_sanitize_identifier(identifier)
class PropTripsStore(FixedLayoutStore):
# 1993/94 and 1994/95 has only plaintext (wrapped in .html)
# 1995/96 to 2006/07 has plaintext + doc
# 2007/08 onwards has plaintext, doc and pdf
doctypes = OrderedDict([
(".pdf", b'%PDF'),
(".doc", b'\xd0\xcf\x11\xe0'),
(".docx", b'PK\x03\x04'),
(".wpd", b'\xffWPC'),
(".html", b'<!DO'),
])
def intermediate_path(self, basefile, version=None, attachment=None, suffix=None):
# we need to select a suitable intermediate suffix based upon
# the downloaded suffix (pdf->xml, html->txt)
if self.downloaded_path(basefile).endswith(".html"):
from ferenda.documentstore import _compressed_suffix
return self.path(basefile, "intermediate", ".txt" + _compressed_suffix(self.compression))
else:
return super(PropTripsStore, self).intermediate_path(basefile, version, attachment, suffix)
# We derive from Trips for downloading, from FixedLayoutSource for
# downloaded_to_intermediate, extract_{head,metadata,body}, and from
# Offtryck for most everything else. FIXME: This is not manageble.
class PropTrips(Trips, Offtryck, FixedLayoutSource):
alias = "proptrips"
ar = ""
start_url = "http://rkrattsbaser.gov.se/prop/adv?dok=P&sort=asc&ar={c.lastyear}"
document_url_template = "http://rkrattsbaser.gov.se/prop?ar=%(year)s&dok=P&dokid=%(ordinal)s"
basefile_regex = "(?P<basefile>\d+/\d+:\d+)$"
downloaded_suffix = ".html"
rdf_type = RPUBL.Proposition
KOMMITTEDIREKTIV = SOU = DS = None
PROPOSITION = "prop"
document_type = PROPOSITION
storage_policy = "dir"
documentstore_class = PropTripsStore
urispace_segment = "prop"
@classmethod
def get_default_options(cls):
opts = super(PropTrips, cls).get_default_options()
opts['lastyear'] = ""
return opts
# don't use @recordlastdownload -- download_get_basefiles_page
# should set self.config.lastyear instead
def download(self, basefile=None):
if self.config.ipbasedurls:
self._make_ipbasedurls()
urlmap_path = self.store.path("urls", "downloaded", ".map",
storage_policy="file")
self.urlmap = {}
if os.path.exists(urlmap_path):
with codecs.open(urlmap_path, encoding="utf-8") as fp:
for line in fp:
if len(line.split("\t")) < 2:
self.log.warning("Malformed url.map line: %r" % line)
continue
url, attachment = line.split("\t")
self.urlmap[url] = attachment.strip()
if basefile:
return super(PropTrips, self).download(basefile)
try:
now = datetime.now()
r = False
if ('lastyear' in self.config and
self.config.lastyear and
not self.config.refresh):
maxyear = "%s/%s" % (now.year, (now.year + 1) % 100)
while self.config.lastyear != maxyear:
r = self.inner_download()
else:
self.config.lastyear = ''
r = self.inner_download()
self.config.lastyear = "%s/%s" % (now.year - 1,
(now.year % 100))
LayeredConfig.write(self.config) # assume we have data to write
return r
finally:
with codecs.open(urlmap_path, "w", encoding="utf-8") as fp:
for url, attachment in self.urlmap.items():
fp.write("%s\t%s\n" % (url, attachment))
def inner_download(self):
refresh = self.config.refresh
updated = False
for basefile, url in self.download_get_basefiles(None):
if url in self.urlmap:
attachment = self.urlmap[url]
else:
attachment = self.sniff_attachment(url)
if attachment:
self.urlmap[url] = attachment
attachment += ".html"
else:
self.urlmap[url] = ''
attachment = None # instead of the empty string
if (refresh or
(not os.path.exists(self.store.downloaded_path(basefile, attachment=attachment)))):
ret = self.download_single(basefile, url)
updated = updated or ret
return updated
def sniff_attachment(self, url):
r = requests.get(url, stream=True)
head = r.raw.read(8000)
soup = BeautifulSoup(head, "lxml")
return self.find_attachment(soup)
def find_attachment(self, soup):
results = soup.find("div", "search-results-content")
dokid = results.find("span", string="Dokument:")
if not dokid or not dokid.next_sibling:
return None
dokid = dokid.next_sibling.strip().split(" ")[-1]
if "/" in dokid:
dokid, attachment = dokid.split("/")
else:
attachment = None
return attachment
def _next_year(self, year):
# "1992/93" -> "1993/94"
# "1998/99" -> "1999/00"
assert len(year) == 7, "invalid year specifier %s" % year
y1, y2 = int(year[:4]) + 1, int(year[-2:]) + 1
return "%04d/%02d" % (int(y1), int(y2) % 100)
def _prev_year(self, year):
# "1993/94" -> "1992/93"
# "1999/00" -> "1998/99"
assert len(year) == 7, "invalid year specifier %s" % year
y1, y2 = int(year[:4]) - 1, int(year[-2:]) - 1
return "%04d/%02d" % (int(y1), int(y2) % 100)
def remote_url(self, basefile):
year, ordinal = basefile.split(":")
return self.document_url_template % locals()
def download_get_basefiles_page(self, soup):
nextpage = None
for hit in soup.findAll("div", "search-hit-info-num"):
basefile = hit.text.split(": ", 1)[1].strip()
m = re.search(self.basefile_regex, basefile)
if m:
basefile = m.group()
else:
self.log.warning("Couldn't find a basefile in this label: %r" % basefile)
continue
docurl = urljoin(self.start_url, hit.parent.a["href"])
yield(self.sanitize_basefile(basefile), docurl)
nextpage = soup.find("div", "search-opt-next")
if nextpage:
nextpage = nextpage.a
else:
raise NoMoreLinks()
if nextpage:
nextpage = urljoin(self.start_url,
nextpage.get("href"))
else:
if self.config.lastyear:
b = self._next_year(self.config.lastyear)
else:
now = datetime.now()
b = "%s/%s" % (now.year - 1, (now.year) % 100)
self.log.debug("Advancing year from %s to %s" % (self.config.lastyear, b))
self.config.lastyear = b
raise NoMoreLinks(nextpage)
def download_single(self, basefile, url=None):
if url is None:
url = self.remote_url(basefile)
if not url: # remote_url failed
return
updated = created = False
checked = True
mainattachment = None
if url in self.urlmap:
attachment = self.urlmap[url]
else:
attachment = self.sniff_attachment(url)
if attachment:
self.urlmap[url] = attachment
attachment += ".html"
else:
self.urlmap[url] = ''
attachment = "index.html"
downloaded_path = self.store.downloaded_path(basefile,
attachment=attachment)
created = not os.path.exists(downloaded_path)
if self.download_if_needed(url, basefile, filename=downloaded_path):
text = util.readfile(downloaded_path)
if "<div>Inga tr\xe4ffar</div>" in text:
self.log.warning("%s: Could not find this prop at %s, might be a bug" % (basefile, url))
util.robust_remove(downloaded_path)
return False
if created:
self.log.info("%s: download OK from %s" % (basefile, url))
else:
self.log.info(
"%s: download OK (new version) from %s" % (basefile, url))
updated = True
else:
self.log.debug("%s: exists and is unchanged" % basefile)
text = util.readfile(downloaded_path)
soup = BeautifulSoup(text, "lxml")
del text
attachment = self.find_attachment(soup)
extraurls = []
results = soup.find("div", "search-results-content")
a = results.find("a", string="Hämta Pdf")
if a:
extraurls.append(a.get("href"))
a = results.find("a", string="Hämta Doc")
if a:
extraurls.append(a.get("href"))
# parse downloaded html/text page and find out extraurls
for url in extraurls:
if url.endswith('get=doc'):
# NOTE: We cannot be sure that this is
# actually a Word (CDF) file. For older files
# it might be a WordPerfect file (.wpd) or a
# RDF file, for newer it might be a .docx. We
# cannot be sure until we've downloaded it.
# So we quickly read the first 4 bytes
r = requests.get(url, stream=True)
sig = r.raw.read(4)
# r.raw.close()
#bodyidx = head.index("\n\n")
#sig = head[bodyidx:bodyidx+4]
if sig == b'\xffWPC':
doctype = ".wpd"
elif sig == b'\xd0\xcf\x11\xe0':
doctype = ".doc"
elif sig == b'PK\x03\x04':
doctype = ".docx"
elif sig == b'{\\rt':
doctype = ".rtf"
else:
self.log.error(
"%s: Attached file has signature %r -- don't know what type this is" % (basefile, sig))
continue
elif url.endswith('get=pdf'):
doctype = ".pdf"
else:
self.log.warning("Unknown doc type %s" %
url.split("get=")[-1])
doctype = None
if doctype:
if attachment:
filename = self.store.downloaded_path(
basefile, attachment=attachment + doctype)
else:
filename = self.store.downloaded_path(
basefile,
attachment="index" +
doctype)
self.log.debug("%s: downloading attachment %s" % (basefile, filename))
self.download_if_needed(url, basefile, filename=filename)
entry = DocumentEntry(self.store.documententry_path(basefile))
now = datetime.now()
entry.orig_url = url
if created:
entry.orig_created = now
if updated:
entry.orig_updated = now
if checked:
entry.orig_checked = now
entry.save()
return updated
# Correct some invalid identifiers spotted in the wild:
# 1999/20 -> 1999/2000
# 2000/2001 -> 2000/01
# 1999/98 -> 1999/2000
def sanitize_basefile(self, basefile):
(y1, y2, idx) = re.split("[:/]", basefile)
assert len(
y1) == 4, "Basefile %s is invalid beyond sanitization" % basefile
if y1 == "1999" and y2 != "2000":
sanitized = "1999/2000:" + idx
self.log.warning("Basefile given as %s, correcting to %s" %
(basefile, sanitized))
elif (y1 != "1999" and
(len(y2) != 2 or # eg "2000/001"
int(y1[2:]) + 1 != int(y2))): # eg "1999/98
sanitized = "%s/%02d:%s" % (y1, int(y1[2:]) + 1, idx)
self.log.warning("Basefile given as %s, correcting to %s" %
(basefile, sanitized))
else:
sanitized = basefile
return sanitized
def sanitize_identifier(self, identifier):
return prop_sanitize_identifier(identifier)
# FixedLayoutSource.downloaded_to_intermediate will always convert
# things to pdf, even html files. But if we only have html
# (eg. plaintext, we should work with that)
def downloaded_to_intermediate(self, basefile, attachment=None):
downloaded_path = self.store.downloaded_path(basefile, attachment=attachment)
if downloaded_path.endswith(".html"):
return self._extract_text(basefile)
else:
return super(PropTrips, self).downloaded_to_intermediate(basefile, attachment)
def extract_head(self, fp, basefile):
# get metadata from plaintext html even if we have doc/pdf,
# since plaintext is easiest to extract basic metadata from
txt = self._extract_text_inner(basefile)[:1000]
return txt.split("-"*64)[0]
def extract_metadata(self, rawheader, basefile):
d = self.metadata_from_basefile(basefile)
lines = [x.strip() for x in rawheader.split("\n\n") if x.strip()]
d["dcterms:identifier"] = "Prop. " + lines[0].split('\xb7')[1].strip()
d["dcterms:title"] = lines[1].strip()
for p in lines[2:]:
if p.startswith("Ansvarig myndighet: "):
d["rpubl:departement"] = p.split(": ", 1)[1]
elif p.startswith("Dokument: "):
pass
else:
self.log.warning("%s: Unknown header %s" % p)
return d
def sanitize_metadata(self, attribs, basefile):
attribs = super(PropTrips, self).sanitize_metadata(attribs, basefile)
if ('dcterms:title' in attribs and
'dcterms:identifier' in attribs and
attribs['dcterms:title'].endswith(attribs['dcterms:identifier'])):
x = attribs['dcterms:title'][:-len(attribs['dcterms:identifier'])]
attribs['dcterms:title'] = util.normalize_space(x)
return attribs
def extract_body(self, fp, basefile):
if util.name_from_fp(fp).endswith((".txt", ".txt.bz2")):
bodystring = fp.read()
if isinstance(bodystring, bytes):
# fp is opened in bytestream mode
bodystring = bodystring.decode("utf-8")
return TextReader(string=bodystring)
else:
reader = super(PropTrips, self).extract_body(fp, basefile)
pdffile = self.store.downloaded_path(basefile, attachment="index.pdf")
for page in reader:
page.src = pdffile
return reader
def sanitize_body(self, rawbody):
if isinstance(rawbody, TextReader):
return rawbody
else:
return super(PropTrips, self).sanitize_body(rawbody)
def get_parser(self, basefile, sanitized, initialstate=None, startpage=None, pagecount=None, parseconfig="default"):
if isinstance(sanitized, TextReader):
return self.textparser
else:
return super(PropTrips, self).get_parser(basefile, sanitized, initialstate, startpage, pagecount, parseconfig=parseconfig)
def tokenize(self, reader):
if isinstance(reader, TextReader):
return reader.getiterator(reader.readparagraph)
else:
return super(PropTrips, self).tokenize(reader)
class PropRiksdagen(Riksdagen):
alias = "propriksdagen"
rdf_type = RPUBL.Proposition
document_type = Riksdagen.PROPOSITION
def sanitize_identifier(self, identifier):
return prop_sanitize_identifier(identifier)
class PropKBStore(SwedishLegalStore):
downloaded_suffixes = [".pdf", ".xml"]
class PropKB(Offtryck, PDFDocumentRepository):
alias = "propkb"
storage_policy = "dir"
start_url = "https://riksdagstryck.kb.se/tvakammarriksdagen.html"
rdf_type = RPUBL.Proposition
basefile_regex = "prop_(?P<year>\d{4})(?P<type>_urtima|_höst|_a|_b|)__+(?P<no>\d+)(?:_(?P<part>\d+)|)"
document_type = PROPOSITION = True
SOU = DS = KOMMITTEDIREKTIV = False
documentstore_class = PropKBStore
@classmethod
def get_default_options(cls):
opts = super(PropKB, cls).get_default_options()
opts['ocr'] = False
return opts
def download_get_first_page(self):
# if we have already successfully downloaded everything, there
# is no need to even make a single network request (and we'd
# have to do at least 100 otherwise) since no new docs will
# ever be published (normally -- and if they are, just set
# config.refresh)
if (not self.config.refresh and
'lastdownload' in self.config and
self.config.lastdownload):
class DummyResp(object):
def raise_for_status(self):
pass
text = "<h1>no data</h1>"
return DummyResp()
else:
return super(PropKB, self).download_get_first_page()
proptype = {"": "",
"_a": "", # 1914, 1958
"_höst": "",
"_b": "b", # also 1914, 1958
"_urtima": "u"}
@decorators.downloadmax
def download_get_basefiles(self, source):
yielded = set()
if self.download_reverseorder:
source = reversed(list(source))
for (element, attribute, link, pos) in source:
if not element.text_content():
continue
if "proposition" in element.text_content():
resp = self.session.get(link)
resp.raise_for_status()
tree = lxml.html.document_fromstring(resp.text)
tree.make_links_absolute(link, resolve_base_href=True)
for (subelement, subattribute, sublink, subpos) in tree.iterlinks():
if not subelement.text:
continue
m = re.match(self.basefile_regex, subelement.text)
if m:
basefile = "%s:%s%s" % (m.group("year"), self.proptype[m.group("type")], m.group("no"))
exists = os.path.exists(self.store.downloaded_path(basefile))
if exists and not self.config.refresh:
continue
part = m.group("part")
if (basefile,part) in yielded:
continue
if self.get_parse_options(basefile) == "skip":
continue
if part and int(part) > 1 and self.get_parse_options(basefile) != "metadataonly":
# Download attachments ourselves -- not
# really what download_get_basefile should
# do, but hey....
filename = self.store.downloaded_path(basefile, attachment=part+".pdf")
self.download_if_needed(sublink, basefile, archive=self.download_archive, filename=filename)
else:
yield basefile, sublink
yielded.add((basefile,part))
def metadata_from_basefile(self, basefile):
attrib = super(PropKB, self).metadata_from_basefile(basefile)
year, ordinal = basefile.split(":")
attrib["rpubl:arsutgava"] = year
attrib["rpubl:lopnummer"] = ordinal
return attrib
def download_single(self, basefile, url=None):
if not url:
entry = DocumentEntry(self.store.documententry_path(basefile))
url = entry.orig_url
xml_downloaded_path = self.store.downloaded_path(basefile).replace(".pdf", ".xml")
if self.get_parse_options(basefile) == "metadataonly":
# in these cases, to save space, get
# the smaller XML OCR data, not the
# actual scanned images-in-PDF
url = url.replace(".pdf", ".xml").replace("pdf/web", "xml")
# make store.downloaded_path return .xml suffixes (and set
# the timestamp to the beginning of epoch so that the
# resulting if-modified-since header doesn't contain the
# current date/time
if not os.path.exists(xml_downloaded_path):
util.writefile(xml_downloaded_path, "")
os.utime(xml_downloaded_path, (0,0))
else:
# if parse options have changed from metadataonly to
# default, there will be a xml file lying about which will
# make downloaded_path return its name. Remove it so that
# we don't end up with pdf files that have a .xml
# extension.
if os.path.exists(xml_downloaded_path):
os.unlink(xml_downloaded_path)
return super(PropKB, self).download_single(basefile, url)
def download_is_different(self, existing, new):
return not filecmp.cmp(new, existing, shallow=False)
# @lazyread
def downloaded_to_intermediate(self, basefile, attachment=None):
downloaded_path = self.store.downloaded_path(basefile, attachment=attachment)
if downloaded_path.endswith(".xml"):
return open(downloaded_path)
else:
intermediate_path = self.store.intermediate_path(basefile)
return self.convert_pdf(downloaded_path, intermediate_path)
def convert_pdf(self, downloaded_path, intermediate_path):
intermediate_dir = os.path.dirname(intermediate_path)
keep_xml = "bz2" if self.config.compress == "bz2" else True
reader = StreamingPDFReader()
kwargs = {'filename': downloaded_path,
'workdir': intermediate_dir,
'images': self.config.pdfimages,
'keep_xml': keep_xml}
if self.config.ocr:
kwargs['ocr_lang'] = 'swe'
return reader.convert(**kwargs)
def extract_head(self, fp, basefile):
if self.get_parse_options(basefile) == "metadataonly":
tree = etree.parse(fp)
firstpage = tree.find("//{http://www.abbyy.com/FineReader_xml/FineReader10-schema-v1.xml}page")
return firstpage
else:
return None # "rawhead" is never used
def extract_metadata(self, rawhead, basefile):
res = self.metadata_from_basefile(basefile)
# extracting title and other metadata (dep, publication date
# etc) requires parsing of the body (and subsequent processing
# in postprocess_doc). For documents marked as metadataonly in
# options.py, the body is never parsed. Therefore, we do a
# very limited parsing of the first page here.
if self.get_parse_options(basefile) == "metadataonly":
text = util.normalize_space(etree.tostring(rawhead, method="text", encoding="utf-8").decode("utf-8"))
res.update(self.find_firstpage_metadata(text, basefile))
return res
def find_firstpage_metadata(self, firstpage, basefile):
res = {}
m = re.search("proposition till riksdagen *,? *(.*?); gif?ven",
util.normalize_space(firstpage), flags=re.I)
if not m:
self.log.warning("%s: Couldn't find title in first %s characters (first page)" %
(basefile, len(firstpage)))
else:
res["dcterms:title"] = m.groups(1)
m = re.search("gif?ven stockholms slott den (\d+ \w+ \d{4})", util.normalize_space(firstpage), flags=re.I)
if not m:
self.log.warning("%s: Couldn't find date in first %s characters (first page)" %
(basefile, len(firstpage)))
else:
try:
res["dcterms:issued"] = self.parse_swedish_date(m.group(1).lower())
except ValueError as e:
self.log.warning("%s: Couldn't parse date %s" % (basefile, m.group(1)))
return res
def extract_body(self, fp, basefile):
reader = StreamingPDFReader()
parser = "ocr" if self.config.ocr else "xml"
intermediate_suffix = ".hocr" if self.config.ocr else ".xml"
if self.config.compress:
intermediate_suffix += "." + self.config.compress
reader.read(fp, parser=parser)
for attachment in [x for x in sorted(self.store.list_attachments(basefile, "downloaded")) if x.endswith(".pdf")]:
downloaded_path = self.store.downloaded_path(basefile, attachment=attachment)
iattachment = attachment.replace(".pdf", intermediate_suffix)
intermediate_path = self.store.intermediate_path(basefile, attachment=iattachment)
if not os.path.exists(intermediate_path):
fp = self.convert_pdf(downloaded_path, intermediate_path)
else:
fp = self.store.open_intermediate(basefile, attachment=iattachment)
reader += StreamingPDFReader().read(fp)
for page in reader:
page.src = "index.pdf" # FIXME: don't hardcode the filename
return reader
def postprocess_doc(self, doc):
if self.get_parse_options(doc.basefile) == "metadataonly":
return
# the first thing will be a Sidbrytning; continue scanning text until next sidbrytning
firstpage = ""
for thing in doc.body[1:]:
if isinstance(thing, Sidbrytning):
break
elif isinstance(thing, Textbox):
firstpage += util.normalize_space(str(thing)) + "\n\n"
metadata = self.find_firstpage_metadata(firstpage, doc.basefile)
if "dcterms:title" in metadata:
doc.meta.add((URIRef(doc.uri), DCTERMS.title, Literal(metadata["dcterms:title"], lang=self.lang)))
if "dcterms:issued" in metadata:
doc.meta.add((URIRef(doc.uri), DCTERMS.issued, Literal(metadata["dcterms:issued"])))
# inherit list_basefiles_for from CompositeStore, basefile_to_pathfrag
# from SwedishLegalStore)
class PropositionerStore(CompositeStore, SwedishLegalStore):
pass
class Propositioner(CompositeRepository, FixedLayoutSource):
subrepos = PropRegeringen, PropTrips, PropRiksdagen, PropKB
alias = "prop"
xslt_template = "xsl/forarbete.xsl"
storage_policy = "dir"
rdf_type = RPUBL.Proposition
documentstore_class = PropositionerStore
sparql_annotations = "sparql/describe-with-subdocs.rq"
sparql_expect_results = False
# NB: The same logic as in
# ferenda.sources.legal.se.{Regeringen,Riksdagen}.metadata_from_basefile
def metadata_from_basefile(self, basefile):
a = super(Propositioner, self).metadata_from_basefile(basefile)
a["rpubl:arsutgava"], a["rpubl:lopnummer"] = basefile.split(":", 1)
return a
def facets(self):
return super(Propositioner, self).facets() + [Facet(DCTERMS.title,
toplevel_only=False)]
def tabs(self):
if self.config.tabs:
return [('Propositioner', self.dataset_uri())]
else:
return []
# For a certain repo, download_path might return *.wpd (good) or
# *.html (bad, because unformatted plaintext). If it returns bad,
# we should continue with other repos that might have
# *.pdf. HOWEVER, if no other repo has it in any format, we'll
# have to accept the repo that has it as *.html.
#
# NOTE: This implementation does not make use of the
# self.store.basefiles[c] cache, since that only keeps track of
# which repos has which basefiles, not the format/quality of the
# source.
def get_preferred_instances(self, basefile):
backups = []
for c in self.subrepos:
inst = self.get_instance(c)
source_candidate = inst.store.downloaded_path(basefile)
if os.path.exists(source_candidate):
if c.alias != "propregeringen" and source_candidate.endswith(".html"):
backups.append(inst)
else:
yield(inst)
for inst in backups:
yield(inst)
|
py | 7dffa8db37632490593362efbb3c640c4e492ca4 | from GeneratorBase import *
class HigherNibbleConst(GeneratorBase):
def __init__(self, values):
super(HigherNibbleConst, self).__init__(values, "higher nibble constant")
def can_generate(self):
n = len(self.values)
if n > 16:
return False
return len(set(self.higher_nibbles)) == 1
def do_generate(self, builder):
lookup = [self.values[0]] * 16
for i, x in enumerate(self.values):
hi = x & 0x0f
lookup[hi] = x
lookup = builder.add_lookup(lookup)
shuffled = builder.add_shuffle(lookup, builder.get_parameter("lower_nibbles"))
tmp = builder.add_compare_eq(builder.get_parameter("input"), shuffled)
builder.update_result(tmp)
|
py | 7dffa90c3bc1a3878c816bd639bf64b13ba755f3 | import boto3
import json
MAX_IMAGES = 100
flatten = lambda l: [item for sublist in l for item in sublist]
def get_image_details(dynamodb_obj, image_id_list):
"""Get the image details of all the images provided by the image ID list"""
try:
image_id_list = list(set(image_id_list))
image_id_query = [{ 'id': item } for item in image_id_list]
response = dynamodb_obj.batch_get_item(
RequestItems={
'image_details': {
'Keys': image_id_query,
'ConsistentRead': True
}
},
ReturnConsumedCapacity='TOTAL'
)
except Exception as e:
print(str(e))
else:
items = response['Responses']
if items["image_details"]:
return items["image_details"]
def get_images_uploaded_by_users(dynamodb_obj, user_id_list):
"""Get the images uploaded by the users provided the user ID list"""
try:
user_id_list = list(set(user_id_list))
user_id_query = [{ 'id': item } for item in user_id_list]
response = dynamodb_obj.batch_get_item(
RequestItems={
'users': {
'Keys': user_id_query,
'ConsistentRead': True
}
},
ReturnConsumedCapacity='TOTAL'
)
except Exception as e:
print(str(e))
else:
items = response['Responses']
# print(items["users"])
# print(json.dumps(items))
# print(len(items))
if items["users"]:
uploaded_image_ids = [ item["uploaded_images"] for item in items["users"] if item.get("uploaded_images") ]
return flatten(uploaded_image_ids)
def generate_user_feed_handler(event, context):
"""
Generate the user feed based on the user id
"""
print("Event received: " + str(event))
# Get path parameters
path_parameters = event["pathParameters"]
print("userid = " + path_parameters['id'])
user_id = path_parameters['id']
if user_id:
dynamodb = boto3.resource("dynamodb", region_name='us-east-1')
# Get the images uploaded by the requesting user
users_table = dynamodb.Table("users")
user_details = users_table.get_item(Key = {'id' : user_id})
if user_details and user_details["Item"]:
user_details_item = user_details["Item"]
candidate_images = user_details_item.get("uploaded_images", [])
current_user_following = user_details_item.get("following", None)
# Get details of the images uploaded by each user
other_user_images = []
if current_user_following is not None:
# Get the consolidated list of image ids uploaded by users current user is following
other_user_image_ids = get_images_uploaded_by_users(dynamodb, current_user_following)
print(other_user_image_ids)
if other_user_image_ids is not None:
candidate_images.extend(other_user_image_ids)
image_details = []
if len(candidate_images) > 0:
image_details = get_image_details(dynamodb, candidate_images)
print("Number of images retrieved: " + str(len(image_details)))
required_number_images = min(MAX_IMAGES, len(image_details))
# Get the most recent images
image_details = sorted(image_details, key = lambda x: x["time"], reverse=True)[: required_number_images]
#for image_detail in image_details:
# print(image_detail["time"])
response = {}
response["statusCode"] = 200
response["headers"] = { "Access-Control-Allow-Origin": "*" }
response["body"] = json.dumps(image_details)
response["isBase64Encoded"] = False
return response
if __name__ == "__main__":
event = { "pathParameters" : { "id": "4" } }
response = generate_user_feed_handler(event, None)
print("Response")
print(response) |
py | 7dffa9162e82c05f6f83814e3667b9994bc3c66e | import xml.etree.ElementTree as ET
from queue import Queue, Empty
from threading import Thread
import logging
from typing import List
from influxdb import InfluxDBClient
from .transfer import message_to_point
from . import config
class FIXMMessageHandler:
"""Saves received FIXM data to InfluxDB."""
_logger = logging.getLogger("sos_journaler.transfer")
def __init__(self, db: InfluxDBClient):
self._db = db
self._messages = Queue()
self._running = True
# Spin up all messaging handling threads
self._message_processors: List[Thread] = []
for i in range(config.message_handler_threads):
thread = Thread(
name=f"Message Processor {i}",
daemon=True,
target=self._process_messages,
)
thread.start()
self._message_processors.append(thread)
def on_message(self, _channel, _method, _properties, body) -> None:
"""Handle a received message"""
message_collection = ET.fromstring(body)
for message in message_collection:
self._messages.put(message)
def close(self) -> None:
"""Clean up all threads"""
self._logger.info("Stopping message processing threads")
self._running = True
for thread in self._message_processors:
thread.join()
def _process_messages(self) -> None:
"""Once 50 points have accumulated, batch write them to DB"""
points = []
while self._running:
try:
message = self._messages.get(timeout=0.01)
except Empty:
continue
point = message_to_point(message)
points.append(point)
if len(points) >= 50:
self._db.write_points(points)
points.clear()
# Logging to catch issues with queue build-up
queue_size = self._messages.qsize()
if queue_size > 100:
message = f"The point saving threads are running behind " \
f"by {queue_size} points"
self._logger.warning(message)
|
py | 7dffa9804f74b8248ed8525007be2cc197f2fa4a | # -*- coding: utf-8 -*-
"""OptimalTriangulation.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1DFbZX5RVOrMQ5HplLPHW0bKu4ML4tQVR
"""
from google.colab import drive
drive.mount('/content/drive')
class vertex:
x = 0.0
y = 0.0
import math
import matplotlib.pyplot as plt
def Between(a, b, c):
if not Collinear(a, b, c):
return False
if a.x != b.x:
return (a.x <= c.x and c.x <= b.x) or (a.x >= c.x and c.x >= b.x)
else:
return (a.y <= c.y and c.y <= b.y) or (a.y >= c.y and c.y >= b.y)
def dist(v1, v2):
return math.sqrt((v1.x - v2.x) * (v1.x - v2.x) +
(v1.y - v2.y) * (v1.y - v2.y))
def IntersectProp(a, b, c, d):
if (Collinear(a, b, c) or Collinear(a, b, d) or Collinear(c, d, a) or Collinear(c, d, b)):
return False
return Xor(left(a, b, c), left(a, b, d)) and Xor(left(c, d, a), left(c, d, b))
def Intersect(a, b, c, d):
if (IntersectProp(a, b, c, d)):
return True
elif Between(a, b, c) or Between(a, b, d) or Between(c, d, a) or Between(c, d, b):
return True
else:
return False
def Collinear(a, b, c):
return ((b.x - a.x) * (c.y - a.y) - (c.x - a.x) * (b.y - a.y)) == 0
def left(a, b, c):
return ((b.x - a.x) * (c.y - a.y) - (c.x - a.x) * (b.y - a.y)) > 0
def leftOn(a, b, c):
return ((b.x - a.x) * (c.y - a.y) - (c.x - a.x) * (b.y - a.y)) >= 0
def Xor(x, y):
return (not x) ^ (not y)
def Diagonalie(i, j, n, vert):
for k in range(n):
k1 = (k + 1) % n
if not ((k == i) or (k1 == i) or (k == j) or (k1 == j)):
if (Intersect(vert[i], vert[j], vert[k], vert[k1])):
return False
return True
def InCone(i, j, n, vert):
i_minus = (i - 1 + n) % n
i_plus = (i + 1) % n
if (leftOn(vert[i_minus], vert[i], vert[i_plus])):
return left(vert[i], vert[j], vert[i_minus]) and left(vert[j], vert[i], vert[i_plus])
else:
return not (leftOn(vert[i], vert[j], vert[i_plus]) and leftOn(vert[j], vert[i], vert[i_minus]))
def Diagonal(i, j, n, vert):
# print(InCone (i,j,n, vert), Diagonalie(i,j,n,vert))
if (i == (j + 1) % n or i == (j + n - 1) % n):
return True
return (InCone(i, j, n, vert) and Diagonalie(i, j, n, vert))
def plot_tri(P, v, i, j, n, ax1, c):
if i != j:
k = P[i][j]
if k == -1:
return
if i != (j + 1) % n and i != (j + n - 1) % n:
x_val = [v[i].x, v[j].x]
y_val = [v[i].y, v[j].y]
ax1.plot(x_val, y_val, color=c)
if i != (k + 1) % n and i != (k + n - 1) % n:
x_val = [v[i].x, v[k].x]
y_val = [v[i].y, v[k].y]
ax1.plot(x_val, y_val, color=c)
if k != (j + 1) % n and k != (j + n - 1) % n:
x_val = [v[k].x, v[j].x]
y_val = [v[k].y, v[j].y]
ax1.plot(x_val, y_val, color=c)
plot_tri(P, v, i, k, n, ax1, c)
plot_tri(P, v, k, j, n, ax1, c)
return
def buildVisibility(vert, n):
visibility = [[0 for i in range(n)] for j in range(n)]
for i in range(n):
for j in range(n):
if Diagonal(i, j, n, vert):
visibility[i][j] = 1
return visibility
def PointAssign(a, b):
a.x = b.x
a.y = b.y
def ClipEar(i, n, vert):
for k in range(i, n - 1):
PointAssign(vert[k], vert[k + 1])
length = 0
def EarClipTriangulate(n, vert, ax2):
global length
if n > 3:
for i in range(n):
i1 = (i + 1) % n
i2 = (i + 2) % n
if Diagonal(i, i2, n, vert):
# print('Edge', vert[i].x, vert[i].y, 'to', vert[i2].x, vert[i2].y)
x_val = [vert[i].x, vert[i2].x]
y_val = [vert[i].y, vert[i2].y]
ax2.plot(x_val, y_val, color='g')
length += dist(vert[i], vert[i2])
ClipEar(i1, n, vert)
EarClipTriangulate(n - 1, vert, ax2)
break
def angle_cost(a,b,c):
#We define the angle cost as the difference of longest side and the smallest side in a triangle.
d1 = dist(a,b)
d2 = dist(b,c)
d3 = dist(c,a)
return max(d1, d2, d3) - min(d1, d2, d3)
def area(a, b, c):
return abs((b.x - a.x) * (c.y - a.y) -(c.x - a.x) * (b.y - a.y))/2
def perimeter(a, b, c):
return dist(a,b)+dist(b,c)+dist(c,a)
#minimizing the sum of diagonals of the triangulation
def optimalTri(vert, n, visibility, P_opt):
if (n < 3):
return 0
table = [[float('inf') for x in range(n)] for i in range(n)]
i = 2
while (i < n):
j = (i - 2)
while (j >= 0):
if visibility[i][j] == 1:
if j == (i + n - 2) % n:
table[i][j] = 0
P_opt[i][j] = (i + n - 1) % n
else:
a1 = a2 = mini = float('inf')
k_hat = -1
for k in range(j + 1, i):
if k == i - 1 and visibility[k][j] == 1:
a1 = dist(vert[k], vert[j]) + table[k][j]
elif k == j + 1 and visibility[i][k] == 1:
a2 = dist(vert[k], vert[i]) + table[i][k]
else:
if table[i][k] + table[k][j] + dist(vert[k], vert[j]) + dist(vert[i], vert[k]) < mini:
k_hat = k
mini = min(mini,
table[i][k] + table[k][j] + dist(vert[k], vert[j]) + dist(vert[i], vert[k]))
if mini <= a1 and mini <= a2:
P_opt[i][j] = k_hat
elif a1 <= mini and a1 <= a2:
P_opt[i][j] = i - 1
else:
P_opt[i][j] = j + 1
table[i][j] = min(a1, a2, mini)
j -= 1
i += 1
return table
#Minimizing the difference between longest and shortest side of triangles in the triangulation
def optimalAngleTri(vert, n, visibility, P_opt):
if n<3:
return 0
table = [[float('inf') for x in range(n)] for i in range(n)]
i=2
while i<n:
j=i-2
while j>=0:
if visibility[i][j]==1:
if j == (i + n - 2) % n:
table[i][j] = angle_cost(vert[i], vert[j], vert[j+1]) #Base case
P_opt[i][j] = (i + n - 1) % n
else:
a1 = a2 = mini = float('inf')
k_hat = -1
for k in range(j + 1, i):
if k == i - 1 and visibility[k][j] == 1:
a1 = angle_cost(vert[i], vert[k], vert[j]) + table[k][j]
elif k == j + 1 and visibility[i][k] == 1:
a2 = angle_cost(vert[j], vert[k], vert[i]) + table[i][k]
else:
if (table[i][k] + table[k][j] + angle_cost(vert[j], vert[i], vert[k])) < mini:
k_hat = k
mini = min(mini, table[i][k] + table[k][j] + angle_cost(vert[i], vert[j], vert[k]))
if mini <= a1 and mini <= a2:
P_opt[i][j] = k_hat
elif a1 <= mini and a1 <= a2:
P_opt[i][j] = i - 1
else:
P_opt[i][j] = j + 1
table[i][j] = min(a1, a2, mini)
j -= 1
i += 1
return table
#'Minimizing the area of the triangle with maximum area
def optimalAreaTri(vert, n, visibility, P_opt):
if (n < 3):
return 0
table = [[float('inf') for x in range(n)] for i in range(n)]
i = 2
while (i < n):
j = (i - 2)
while (j >= 0):
if visibility[i][j] == 1:
if j == (i + n - 2) % n:
table[i][j] = area(vert[i], vert[j], vert[j+1])
P_opt[i][j] = (i + n - 1) % n
else:
a1 = a2 = mini = float('inf')
k_hat = -1
for k in range(j + 1, i):
if k == i - 1 and visibility[k][j] == 1:
a1 = max(area(vert[i], vert[j], vert[k]), table[k][j])
elif k == j + 1 and visibility[i][k] == 1:
a2 = max(area(vert[i], vert[j], vert[k]), table[i][k])
else:
if max(table[i][k] , table[k][j] , area(vert[i], vert[j], vert[k])) < mini:
k_hat = k
mini = min(mini,
max(table[i][k] , table[k][j] , area(vert[i], vert[j], vert[k])))
if mini <= a1 and mini <= a2:
P_opt[i][j] = k_hat
elif a1 <= mini and a1 <= a2:
P_opt[i][j] = i - 1
else:
P_opt[i][j] = j + 1
table[i][j] = min(a1, a2, mini)
j -= 1
i += 1
return table
#Minimizing the maximum perimeter triangle
def optimalPerimeterTri(vert, n, visibility, P_opt):
if (n < 3):
return 0
table = [[float('inf') for x in range(n)] for i in range(n)]
i = 2
while (i < n):
j = (i - 2)
while (j >= 0):
if visibility[i][j] == 1:
if j == (i + n - 2) % n:
table[i][j] = perimeter(vert[i], vert[j], vert[j+1])
P_opt[i][j] = (i + n - 1) % n
else:
a1 = a2 = mini = float('inf')
k_hat = -1
for k in range(j + 1, i):
if k == i - 1 and visibility[k][j] == 1:
a1 = max(perimeter(vert[i], vert[j], vert[k]), table[k][j])
elif k == j + 1 and visibility[i][k] == 1:
a2 = max(perimeter(vert[i], vert[j], vert[k]), table[i][k])
else:
if max(table[i][k] , table[k][j] , perimeter(vert[i], vert[j], vert[k])) < mini:
k_hat = k
mini = min(mini,
max(table[i][k] , table[k][j] , perimeter(vert[i], vert[j], vert[k])))
if mini <= a1 and mini <= a2:
P_opt[i][j] = k_hat
elif a1 <= mini and a1 <= a2:
P_opt[i][j] = i - 1
else:
P_opt[i][j] = j + 1
table[i][j] = min(a1, a2, mini)
j -= 1
i += 1
return table
filepath = input("Enter the file path to the input file : ")
file1 = open(filepath, 'r')
lines = file1.readlines()
points=[]
for l in lines:
x,y = l.split(' ')
x = float(x)
y = float(y)
d=[]
d.append(x)
d.append(y)
points.append(d)
file1.close()
#Uncomment the below line to test for the given sample points
#points = [[0, -4], [0.5, -2], [2, -3.75], [4, -4.25], [4.5, -0.5], [3, -2.5], [0.25, 2], [-3, -2.5]]
vert = [[0 for j in range(len(points))] for i in range(len(points))]
print(points)
n = len(points)
fig = plt.figure(figsize=( 8, 20))
ax1 = fig.add_subplot(511)
ax1.set_title('Min sum of diagonals')
ax2 = fig.add_subplot(512)
ax2.set_title('Min difference between longest and shortest side of triangles')
ax3 = fig.add_subplot(513)
ax3.set_title('Minimizing the max area triangle')
ax4 = fig.add_subplot(514)
ax4.set_title('Minimizing the max perimeter triangle')
ax5 = fig.add_subplot(515)
ax5.set_title('Ear-clipping triangulation')
for i, x in enumerate(points):
vert[i] = vertex()
vert[i].x = points[i][0]
vert[i].y = points[i][1]
x_values = [points[i][0], points[(i + 1) % n][0]]
y_values = [points[i][1], points[(i + 1) % n][1]]
ax1.plot(x_values, y_values, color='b')
ax2.plot(x_values, y_values, color='b')
ax3.plot(x_values, y_values, color='b')
ax4.plot(x_values, y_values, color='b')
ax5.plot(x_values, y_values, color='b')
vert1 = vert
P_opt_diag = [[-1 for i in range(n)] for j in range(n)]
P_opt_sides = [[-1 for i in range(n)] for j in range(n)]
P_opt_area = [[-1 for i in range(n)] for j in range(n)]
P_opt_peri = [[-1 for i in range(n)] for j in range(n)]
#Preprocessing and building the visibility graph
vis = buildVisibility(vert, n)
optlen1 = optimalTri(vert, n, vis, P_opt_diag)[n - 1][0]
optlen2 = optimalAngleTri(vert,n, vis, P_opt_sides)[n-1][0]
optlen3 = optimalAreaTri(vert, n, vis, P_opt_area)[n-1][0]
optlen4 = optimalPerimeterTri(vert, n, vis, P_opt_peri)[n-1][0]
print('Sum of diagonals of the triangulation with Dynamic programming - ', optlen1)
plot_tri(P_opt_diag, vert, n - 1, 0, n, ax1, 'r')
plot_tri(P_opt_sides, vert, n - 1, 0, n, ax2, 'g')
plot_tri(P_opt_area, vert, n - 1, 0, n, ax3, 'c')
plot_tri(P_opt_peri, vert, n - 1, 0, n, ax4, 'm')
EarClipTriangulate(n, vert1, ax5)
print('Sum of diagonals of the triangulation with Ear-clipping - ', length)
plt.show() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.