ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 7dfbc7bcb0b74c6466335dee4719624c940e1fce | import time
import os
import unittest
import canopen
import can
EDS_PATH = os.path.join(os.path.dirname(__file__), 'sample.eds')
class TestNetwork(unittest.TestCase):
def setUp(self):
network = canopen.Network()
network.add_node(2, EDS_PATH)
network.add_node(3, network[2].object_dictionary)
self.network = network
def test_add_node(self):
node = self.network[2]
self.assertIsInstance(node, canopen.Node)
self.assertEqual(node.id, 2)
self.assertEqual(self.network[2], node)
self.assertEqual(len(self.network), 2)
def test_notify(self):
node = self.network[2]
self.network.notify(0x82, b'\x01\x20\x02\x00\x01\x02\x03\x04', 1473418396.0)
self.assertEqual(len(node.emcy.active), 1)
self.network.notify(0x702, b'\x05', 1473418396.0)
self.assertEqual(node.nmt.state, 'OPERATIONAL')
self.assertListEqual(self.network.scanner.nodes, [2])
def test_send_perodic(self):
bus = can.interface.Bus(bustype="virtual", channel=1)
self.network.connect(bustype="virtual", channel=1)
task = self.network.send_periodic(0x123, [1, 2, 3], 0.01)
time.sleep(0.1)
self.assertTrue(9 <= bus.queue.qsize() <= 11)
msg = bus.recv(0)
self.assertIsNotNone(msg)
self.assertSequenceEqual(msg.data, [1, 2, 3])
# Update data
task.update([4, 5, 6])
time.sleep(0.02)
while msg is not None and msg.data == b'\x01\x02\x03':
msg = bus.recv(0)
self.assertIsNotNone(msg)
self.assertSequenceEqual(msg.data, [4, 5, 6])
task.stop()
class TestScanner(unittest.TestCase):
def test_passive_scanning(self):
scanner = canopen.network.NodeScanner()
scanner.on_message_received(0x586)
scanner.on_message_received(0x587)
scanner.on_message_received(0x586)
self.assertListEqual(scanner.nodes, [6, 7])
if __name__ == "__main__":
unittest.main()
|
py | 7dfbc84bbde9ad56d2614a9c5ae31c4172df262f | # Author: Mathieu Blondel
# Derek Lim
# License: BSD 3 clause
import os
from matplotlib import image
import numpy as np
from sklearn.externals import joblib
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.cluster import KMeans
root_dir = os.path.dirname(os.path.abspath(__file__))
DATAPATH = os.path.join(root_dir, '..', '..', 'data')
def color_quantize(img, n_colors, name, method):
""" cluster all colors of image """
shape = img.shape
img = img.reshape(-1, 3)
if method == "kmeans":
km = KMeans(n_clusters=n_colors, n_init=1, max_iter=300).fit(img)
centers = km.cluster_centers_
labels = km.labels_
elif method == "rand":
rng = np.random.RandomState(0)
ind = rng.permutation(img.shape[0])
centers = img[ind[:n_colors]]
D = euclidean_distances(centers, img, squared=True)
labels = D.argmin(axis=0)
else:
raise ValueError("Invalid quantization method")
out = "%s/res/%s_%s_%d_colors.pkl" % (root_dir, name, method, n_colors)
joblib.dump((shape, centers, labels), out)
print('Saving color quantization:', out)
def load_color_transfer(img1="comunion", img2="autumn", n_colors=256,
method="kmeans", transpose=False):
if transpose:
img1, img2 = img2, img1
# Load quantized images.
try: # load if already saved
shape1, centers1, labels1 = \
joblib.load("%s/res/%s_%s_%d_colors.pkl" % (root_dir, img1, method, n_colors))
except:
img = image.imread('%s/%s.jpg' % (DATAPATH, img1)).astype(np.float64) / 256
color_quantize(img, n_colors, img1, method=method)
shape1, centers1, labels1 = \
joblib.load("%s/res/%s_%s_%d_colors.pkl" % (root_dir, img1, method, n_colors))
try: # load if already saved
shape2, centers2, labels2 = \
joblib.load("%s/res/%s_%s_%d_colors.pkl" % (root_dir, img2, method, n_colors))
except:
img = image.imread('%s/%s.jpg' % (DATAPATH, img2)).astype(np.float64) / 256
color_quantize(img, n_colors, img2, method=method)
shape2, centers2, labels2 = \
joblib.load("%s/res/%s_%s_%d_colors.pkl" % (root_dir, img2, method, n_colors))
m = centers1.shape[0]
n = centers2.shape[0]
# Prepare histograms and cost matrix.
hist1 = np.bincount(labels1, minlength=m).astype(np.float64)
hist1 /= np.sum(hist1)
hist2 = np.bincount(labels2, minlength=n).astype(np.float64)
hist2 /= np.sum(hist2)
# Remove elements with probability 0.
hist1 += 1e-9
hist1 /= np.sum(hist1)
hist2 += 1e-9
hist2 /= np.sum(hist2)
# Sort centers and histograms.
ind1 = np.argsort(hist1)[::-1]
hist1 = hist1[ind1]
centers1 = centers1[ind1]
inv_map1 = dict((ind1[i], i) for i in range(len(ind1)))
labels1 = np.array([inv_map1[l] for l in labels1])
ind2 = np.argsort(hist2)[::-1]
inv_ind2 = np.arange(len(hist2))[ind2]
hist2 = hist2[ind2]
centers2 = centers2[ind2]
inv_map2 = dict((ind2[i], i) for i in range(len(ind2)))
labels2 = np.array([inv_map2[l] for l in labels2])
# Prepare cost matrix.
C = euclidean_distances(centers1, centers2, squared=True)
return hist1, hist2, C, centers1, centers2, labels1, labels2, shape1, shape2
|
py | 7dfbc896820269b8188a7f8eda7b9f7c224bf37b | # -*- coding: utf-8 -*-
"""
Integration tests for the docker_container states
"""
from __future__ import absolute_import, print_function, unicode_literals
import random
import string
import sys
import salt.utils.path
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
from tests.support.case import ModuleCase
from tests.support.helpers import destructiveTest, slowTest
from tests.support.mixins import SaltReturnAssertsMixin
# Import Salt Testing Libs
from tests.support.unit import skipIf
def _random_name(prefix=""):
ret = prefix
for _ in range(8):
ret += random.choice(string.ascii_lowercase)
return ret
@destructiveTest
@skipIf(not salt.utils.path.which("dockerd"), "Docker not installed")
class DockerCallTestCase(ModuleCase, SaltReturnAssertsMixin):
"""
Test docker_container states
"""
def setUp(self):
"""
setup docker.call tests
"""
# Create temp dir
self.random_name = _random_name(prefix="salt_test_")
self.image_tag = sys.version_info[0]
self.run_state("docker_image.present", tag=self.image_tag, name="python")
self.run_state(
"docker_container.running",
name=self.random_name,
image="python:{0}".format(self.image_tag),
entrypoint="tail -f /dev/null",
)
def tearDown(self):
"""
teardown docker.call tests
"""
self.run_state("docker_container.absent", name=self.random_name, force=True)
self.run_state(
"docker_image.absent",
images=["python:{0}".format(self.image_tag)],
force=True,
)
delattr(self, "random_name")
delattr(self, "image_tag")
@slowTest
def test_docker_call(self):
"""
check that docker.call works, and works with a container not running as root
"""
ret = self.run_function("docker.call", [self.random_name, "test.ping"])
assert ret is True
@slowTest
def test_docker_sls(self):
"""
check that docker.sls works, and works with a container not running as root
"""
ret = self.run_function("docker.apply", [self.random_name, "core"])
self.assertSaltTrueReturn(ret)
@slowTest
def test_docker_highstate(self):
"""
check that docker.highstate works, and works with a container not running as root
"""
ret = self.run_function("docker.apply", [self.random_name])
self.assertSaltTrueReturn(ret)
|
py | 7dfbc8b3115a679864971daf38b6b3f14e8ff99c | '''
Created on Mar 27, 2019
@author: NOOK
'''
import os
import unittest
import coverage
from time import perf_counter
from unittest import TestCase
class TestCaseBase(TestCase):
def _steps(self):
for name in dir(self): # dir() result is implicitly sorted
if name.startswith("step"):
yield name, getattr(self, name)
def test_steps(self):
if (type(self).__name__ != 'TestCaseBase') :
print(type(self).__name__)
for name, step in self._steps():
try:
with self.subTest(name):
print(' %-50s : ' % name, end='')
start = perf_counter()
step()
print(' OK %10.6f s' % (perf_counter() - start))
except Exception as e:
print(' FAIL %10.6f s' % (perf_counter() - start))
self.fail("{} failed ({}: {})".format(step, type(e), e))
def slow() -> bool:
return True;
def testDataPath( filename : str) -> str:
path = os.getcwd();
path = path.replace("\\", "/")
i = path.find("Python/test")
path = path[0:i] + "testdata/"
return path + filename;
# .. call your code ..
def runAll():
testmodules = [
'components.AbstractRecursiveFilter_test',
'components.EMP_test',
'components.FixedMemoryFilter_test',
'components.FMP_test',
'filters.ManagedFilterBase_test',
'filters.controls.ObservationErrorModel_test',
]
suite = unittest.TestSuite()
path = os.getcwd();
print(path)
for t in testmodules:
try:
# If the module defines a suite() function, call it to get the suite.
mod = __import__(t, globals(), locals(), ['suite'])
suitefn = getattr(mod, 'suite')
suite.addTest(suitefn())
except (ImportError, AttributeError):
# else, just load all the test cases from the module.
suite.addTest(unittest.defaultTestLoader.loadTestsFromName(t))
unittest.TextTestRunner().run(suite)
if __name__ == '__main__':
# path = os.getcwd();
# path = path.replace("\\", "/")
# i = path.find("Python/test")
# path = path[0:i] + "/testdata/"
# print(path)
# cov = coverage.coverage(omit='/usr/lib/python2.6/site-packages/*')
cov = coverage.Coverage()
cov.start()
runAll()
cov.stop()
cov.save()
cov.html_report()
|
py | 7dfbc8ffa3008cf648a38cde9fd9b0857222b274 | """Tests for pyhap.accessory_driver."""
import tempfile
from unittest.mock import MagicMock, patch
from uuid import uuid1
import pytest
from pyhap.accessory import STANDALONE_AID, Accessory, Bridge
from pyhap.accessory_driver import AccessoryDriver
from pyhap.characteristic import (HAP_FORMAT_INT, HAP_PERMISSION_READ,
PROP_FORMAT, PROP_PERMISSIONS,
Characteristic)
from pyhap.const import HAP_REPR_IID, HAP_REPR_CHARS, HAP_REPR_AID, HAP_REPR_VALUE
from pyhap.service import Service
CHAR_PROPS = {
PROP_FORMAT: HAP_FORMAT_INT,
PROP_PERMISSIONS: HAP_PERMISSION_READ,
}
@pytest.fixture
def driver():
with patch('pyhap.accessory_driver.HAPServer'), \
patch('pyhap.accessory_driver.Zeroconf'), \
patch('pyhap.accessory_driver.AccessoryDriver.persist'):
yield AccessoryDriver()
def test_auto_add_aid_mac(driver):
acc = Accessory(driver, 'Test Accessory')
driver.add_accessory(acc)
assert acc.aid == STANDALONE_AID
assert driver.state.mac is not None
def test_not_standalone_aid(driver):
acc = Accessory(driver, 'Test Accessory', aid=STANDALONE_AID + 1)
with pytest.raises(ValueError):
driver.add_accessory(acc)
def test_persist_load():
with tempfile.NamedTemporaryFile(mode='r+') as file:
with patch('pyhap.accessory_driver.HAPServer'), \
patch('pyhap.accessory_driver.Zeroconf'):
driver = AccessoryDriver(port=51234, persist_file=file.name)
driver.persist()
pk = driver.state.public_key
# Re-start driver with a "new" accessory. State gets loaded into
# the new accessory.
driver = AccessoryDriver(port=51234, persist_file=file.name)
driver.load()
assert driver.state.public_key == pk
def test_service_callbacks(driver):
bridge = Bridge(driver,"mybridge")
acc = Accessory(driver, 'TestAcc', aid=2)
acc2 = Accessory(driver, 'TestAcc2', aid=3)
service = Service(uuid1(), 'Lightbulb')
char_on = Characteristic('On', uuid1(), CHAR_PROPS)
char_brightness = Characteristic('Brightness', uuid1(), CHAR_PROPS)
service.add_characteristic(char_on)
service.add_characteristic(char_brightness)
mock_callback = MagicMock()
service.setter_callback = mock_callback
acc.add_service(service)
bridge.add_accessory(acc)
service2 = Service(uuid1(), 'Lightbulb')
char_on2 = Characteristic('On', uuid1(), CHAR_PROPS)
char_brightness2 = Characteristic('Brightness', uuid1(), CHAR_PROPS)
service2.add_characteristic(char_on2)
service2.add_characteristic(char_brightness2)
mock_callback2 = MagicMock()
service2.setter_callback = mock_callback2
acc2.add_service(service2)
bridge.add_accessory(acc2)
char_on_iid = char_on.to_HAP()[HAP_REPR_IID]
char_brightness_iid = char_brightness.to_HAP()[HAP_REPR_IID]
char_on2_iid = char_on2.to_HAP()[HAP_REPR_IID]
char_brightness2_iid = char_brightness2.to_HAP()[HAP_REPR_IID]
driver.add_accessory(bridge)
driver.set_characteristics({
HAP_REPR_CHARS: [{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_on_iid,
HAP_REPR_VALUE: True
}, {
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_brightness_iid,
HAP_REPR_VALUE: 88
}, {
HAP_REPR_AID: acc2.aid,
HAP_REPR_IID: char_on2_iid,
HAP_REPR_VALUE: True
}, {
HAP_REPR_AID: acc2.aid,
HAP_REPR_IID: char_brightness2_iid,
HAP_REPR_VALUE: 12
}]
}, "mock_addr")
mock_callback2.assert_called_with({'On': True, 'Brightness': 12})
mock_callback.assert_called_with({'On': True, 'Brightness': 88})
def test_start_stop_sync_acc(driver):
class Acc(Accessory):
running = True
@Accessory.run_at_interval(0)
def run(self):
self.running = False
driver.stop()
def setup_message(self):
pass
acc = Acc(driver, 'TestAcc')
driver.add_accessory(acc)
driver.start()
assert not acc.running
def test_start_stop_async_acc(driver):
class Acc(Accessory):
@Accessory.run_at_interval(0)
async def run(self):
driver.stop()
def setup_message(self):
pass
acc = Acc(driver, 'TestAcc')
driver.add_accessory(acc)
driver.start()
assert driver.loop.is_closed()
def test_send_events(driver):
class LoopMock():
runcount = 0
def is_closed(self):
self.runcount += 1
if self.runcount > 1:
return True
return False
class HapServerMock():
pushed_events = []
def push_event(self, bytedata, client_addr):
self.pushed_events.extend([[bytedata, client_addr]])
return 1
def get_pushed_events(self):
return self.pushed_events
driver.http_server = HapServerMock()
driver.loop = LoopMock()
driver.topics = {"mocktopic": ["client1", "client2", "client3"]}
driver.event_queue.put(("mocktopic", "bytedata", "client1"))
driver.send_events()
# Only client2 and client3 get the event when client1 sent it
assert (driver.http_server.get_pushed_events() ==
[["bytedata", "client2"], ["bytedata", "client3"]])
|
py | 7dfbc97eeb00510afad1890c08a830bdd9403c11 | """Auto-generated file, do not edit by hand. MP metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_MP = PhoneMetadata(id='MP', country_code=1, international_prefix='011',
general_desc=PhoneNumberDesc(national_number_pattern='[58]\\d{9}|(?:67|90)0\\d{7}', possible_length=(10,), possible_length_local_only=(7,)),
fixed_line=PhoneNumberDesc(national_number_pattern='670(?:2(?:3[3-7]|56|8[5-8])|32[1-38]|4(?:33|8[348])|5(?:32|55|88)|6(?:64|70|82)|78[3589]|8[3-9]8|989)\\d{4}', example_number='6702345678', possible_length=(10,), possible_length_local_only=(7,)),
mobile=PhoneNumberDesc(national_number_pattern='670(?:2(?:3[3-7]|56|8[5-8])|32[1-38]|4(?:33|8[348])|5(?:32|55|88)|6(?:64|70|82)|78[3589]|8[3-9]8|989)\\d{4}', example_number='6702345678', possible_length=(10,), possible_length_local_only=(7,)),
toll_free=PhoneNumberDesc(national_number_pattern='8(?:00|33|44|55|66|77|88)[2-9]\\d{6}', example_number='8002123456', possible_length=(10,)),
premium_rate=PhoneNumberDesc(national_number_pattern='900[2-9]\\d{6}', example_number='9002123456', possible_length=(10,)),
personal_number=PhoneNumberDesc(national_number_pattern='5(?:00|2[12]|33|44|66|77|88)[2-9]\\d{6}', example_number='5002345678', possible_length=(10,)),
national_prefix='1',
national_prefix_for_parsing='1|([2-9]\\d{6})$',
national_prefix_transform_rule='670\\1',
leading_digits='670')
|
py | 7dfbc9ad128e7b9f5d90d60aebba5b26ac98bbff | '''
Created on Jun 24, 2013
@author: Jonas Zaddach <[email protected]>
'''
from collections import defaultdict
class EmulatorTargetCallProxy():
MONITOR_EVENTS = ["emulator_pre_read_request",
"emulator_post_read_request",
"emulator_pre_write_request",
"emulator_post_write_request"]
def __init__(self):
self._target = None
self._monitor_hooks = defaultdict(list)
def set_target(self, target):
self._target = target
def add_monitor(self, monitor):
for monitor_event in self.MONITOR_EVENTS:
if hasattr(monitor, monitor_event):
self._monitor_hooks[monitor_event].append(monitor)
def remove_monitor(self, monitor):
for (_, monitor_hooks) in self._monitor_hooks.items():
try:
monitor_hooks.remove(monitor)
except ValueError:
pass
def handle_emulator_read_request(self, params):
assert(self._target)
for monitor in self._monitor_hooks["emulator_pre_read_request"]:
monitor.emulator_pre_read_request(params)
params["value"] = self._target.read_typed_memory(params["address"], params["size"])
for monitor in self._monitor_hooks["emulator_post_read_request"]:
monitor.emulator_post_read_request(params)
return params["value"]
def handle_emulator_write_request(self, params):
assert(self._target)
for monitor in self._monitor_hooks["emulator_pre_write_request"]:
monitor.emulator_pre_write_request(params)
self._target.write_typed_memory(params["address"], params["size"], params["value"])
for monitor in self._monitor_hooks["emulator_post_write_request"]:
monitor.emulator_post_write_request(params)
def handle_emulator_set_cpu_state_request(self, params):
# this function sets the CPU state on the target device
assert(self._target)
# TODO: fire events?
for reg in params["cpu_state"]:
if reg == "cpsr":
# skip cpsr register
continue
value = int(params["cpu_state"][reg], 16)
self._target.set_register(reg, value)
def handle_emulator_get_cpu_state_request(self, params):
# this function gets the CPU state on the target device
assert(self._target)
# TODO: fire events?
ret = {}
for r in range(13):
val = self._target.get_register("r"+str(r))
ret["cpu_state_"+"r"+str(r)] = hex(val)
val = self._target.get_register("sp")
ret["cpu_state_r13"] = hex(val)
val = self._target.get_register("lr")
ret["cpu_state_r14"] = hex(val)
val = self._target.get_register("pc")
ret["cpu_state_pc"] = hex(val)
return ret
def handle_emulator_continue_request(self, params):
assert(self._target)
self._target.cont()
def handle_emulator_get_checksum_request(self, params):
assert(self._target)
cmd = "-gdb-show remote checksum %s %s" % \
(hex(params['address'])[2:], params['size'][2:])
return self._target.execute_gdb_command(cmd)
#return self._target.get_checksum(\
# params['address'], params['size'])
|
py | 7dfbca2306f0de5e73a0c0c376a8dd3981506aa7 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Validate environment states.
Example usage:
.. code-block::
$ cat << EOF |
benchmark,reward,walltime,commandline
cBench-v0/crc32,0,1.2,opt input.bc -o output.bc
EOF
python -m compiler_gym.bin.validate < results.csv --env=llvm-v0 --reward=IrInstructionCount
Use this script to validate environment states. Environment states are read from
stdin as a comma-separated list of benchmark names, walltimes, episode rewards,
and commandlines. Each state is validated by replaying the commandline and
validating that the reward matches the expected value. Further, some benchmarks
allow for validation of program semantics. When available, those additional
checks will be automatically run.
Input Format
------------
The correct format for generating input states can be generated using
:func:`env.state.to_csv() <compiler_gym.envs.CompilerEnvState.to_csv>`. The
input CSV must start with a header row. A valid header row can be generated
using
:func:`env.state.csv_header() <compiler_gym.envs.CompilerEnvState.csv_header>`.
Full example:
>>> env = gym.make("llvm-v0")
>>> env.reset()
>>> env.step(0)
>>> print(env.state.csv_header())
benchmark,reward,walltime,commandline
>>> print(env.state.to_csv())
benchmark://cBench-v0/rijndael,,20.53565216064453,opt -add-discriminators input.bc -o output.bc
%
Output Format
-------------
This script prints one line per input state. The order of input states is not
preserved. A successfully validated state has the format:
.. code-block::
✅ <benchmark_name> <reproduced_reward>
Else if validation fails, the output is:
.. code-block::
❌ <benchmark_name> <error_details>
"""
import csv
import sys
from absl import app, flags
import compiler_gym.util.flags.dataset # Flag definition.
import compiler_gym.util.flags.nproc # Flag definition.
from compiler_gym.envs.compiler_env import CompilerEnvState
from compiler_gym.util.flags.env_from_flags import env_from_flags
from compiler_gym.validate import validate_states
FLAGS = flags.FLAGS
def main(argv):
"""Main entry point."""
assert len(argv) == 1, f"Unrecognized flags: {argv[1:]}"
data = sys.stdin.readlines()
states = []
for line in csv.DictReader(data):
try:
line["reward"] = float(line["reward"])
states.append(CompilerEnvState(**line))
except (TypeError, KeyError) as e:
print(f"Failed to parse input: `{e}`", file=sys.stderr)
sys.exit(1)
error_count = 0
for result in validate_states(
env_from_flags, states, datasets=FLAGS.dataset, nproc=FLAGS.nproc
):
print(result)
if result.failed:
error_count += 1
if error_count:
sys.exit(1)
if __name__ == "__main__":
app.run(main)
|
py | 7dfbca96a8f8b35bd3d7947b783ddb3dff13be24 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import re
import scrape_common as sc
# get pdf and xlsx URL from covid19 page of TI
main_url = 'https://www4.ti.ch/dss/dsp/covid19/home/'
d = sc.download(main_url, silent=True)
soup = BeautifulSoup(d, 'html.parser')
pdf_url = soup.find('a', string=re.compile(r'Dati stato.*')).get('href')
pdf_url = f'https://www4.ti.ch/{pdf_url}'
pdf_content = sc.pdfdownload(pdf_url, silent=True, raw=True)
dd = sc.DayData(canton='TI', url=pdf_url)
dd.datetime = sc.find(r'(?:Stato )?(\d+\.\d+\.20\d{2})', pdf_content)
dd.isolated = sc.find(r'(\d+)\sPersone\sin\sisolamento', pdf_content)
dd.quarantined = sc.find(r'(\d+)\sPersone\sin\squarantena', pdf_content)
is_first = True
if dd:
print(dd)
is_first = False
xls_url = soup.find(href=re.compile("\.xlsx$")).get('href')
assert xls_url, "URL is empty"
if not xls_url.startswith('http'):
xls_url = f'https://www4.ti.ch/{xls_url}'
xls = sc.xlsdownload(xls_url, silent=True)
rows = sc.parse_xls(xls, header_row=0)
for row in rows:
if not is_first:
print('-' * 10)
is_first = False
dd = sc.DayData(canton='TI', url=xls_url)
dd.datetime = f"{row['Data'].date().isoformat()}"
if row['Ora']:
dd.datetime += f"T{row['Ora'].time().isoformat()}"
dd.cases = row['Totale casi confermati']
dd.hospitalized = row['Pazienti ricoverati attualmente']
dd.icu = row['Pazienti in cure intense']
dd.vent = row['Pazienti ventilati']
dd.recovered = row['Totale pazienti dimessi da ospedali']
dd.deaths = row['Totale decessi']
print(dd)
|
py | 7dfbcacae2138ffdbf116e2370cff42e7ca663b0 | from PyML.utils.table import Table
|
py | 7dfbcaf3d19036860d22703b853d13b400304339 | from electrum_safecoin.i18n import _
fullname = _('Cosigner Pool')
description = ' '.join([
_("This plugin facilitates the use of multi-signatures wallets."),
_("It sends and receives partially signed transactions from/to your cosigner wallet."),
_("Transactions are encrypted and stored on a remote server.")
])
#requires_wallet_type = ['2of2', '2of3']
available_for = ['qt']
|
py | 7dfbcbc4dce53e467d491ae8f736c348f1015ef8 | import sys
import os
from string import ascii_uppercase
inputfile=os.path.join(sys.path[0],sys.argv[1])
with open(inputfile) as f:
lines = f.readlines()
nbr_of_workers = int(sys.argv[2])
time_base = int(sys.argv[3])
instructions = []
nodes = {}
remaining_work = {node : duration for duration, node in enumerate(ascii_uppercase, start=time_base+1)}
workers = {worker : None for worker in range(1, nbr_of_workers+1)}
rounds = 0
for line in lines:
before = line[5]
after = line[36]
instructions.append((before,after))
nodes[before] = []
nodes[after] = []
for instruction in instructions:
before, after = instruction
nodes[after].append(before)
def no_parents_not_started():
keys = []
for node, parent_list in nodes.items():
if not parent_list and not node in workers.values():
keys.append(node)
return keys
def remove_node(node):
del nodes[node]
for parent_list in nodes.values():
if node in parent_list:
parent_list.remove(node)
def work():
for node in workers.values():
if node:
remaining_work[node] -=1
global rounds
rounds +=1
def remove_finished_nodes():
for worker, node in workers.items():
if node and remaining_work[node] == 0:
remove_node(node)
workers[worker] = None
def get_worker():
for worker, node in workers.items():
if not node:
return worker
def assign_work(assignable_nodes_list):
assignable_nodes_list.sort(reverse=True)
while(assignable_nodes_list):
worker = get_worker()
if not worker:
return
node = assignable_nodes_list.pop()
workers[worker] = node
def busy_workers():
for node in workers.values():
if node:
return True
return False
while(True):
assignable_nodes_list = no_parents_not_started()
if not assignable_nodes_list and not busy_workers():
break
assign_work(assignable_nodes_list)
work()
remove_finished_nodes()
print(rounds)
|
py | 7dfbcbc9d9685d4c5507fd0ecc0dc4fce481157b | #!/usr/bin/env python3
import os
from setuptools import setup, find_packages
NAME = 'cellannotation'
VERSION = "0.1.0"
AUTHOR = 'Bioinformatics Laboratory, FRI UL'
AUTHOR_EMAIL = '[email protected]'
URL = 'http://biolab.si/'
DESCRIPTION = 'Package for annotating the data (e.g. cell data).'
with open('README.md') as f:
README = f.read()
KEYWORDS = [
'cells',
'annotation'
]
PACKAGES = find_packages()
INSTALL_REQUIRES = sorted(set(
line.partition('#')[0].strip()
for line in open(os.path.join(os.path.dirname(__file__), 'requirements.txt'))
) - {''})
if __name__ == '__main__':
setup(
name=NAME,
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
description=DESCRIPTION,
long_description=README,
packages=PACKAGES,
keywords=KEYWORDS,
install_requires=INSTALL_REQUIRES,
test_suite='cellannotation.tests.suite',
classifiers=[
"Programming Language :: Python",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 4 - Beta"
]
)
|
py | 7dfbcc8c3c4c9b00148c71fd2e6c34853ddeab84 | # a place to hold event type constants used among many data models, rules, or policies
ADMIN_ROLE_ASSIGNED = 'admin_role_assigned'
FAILED_LOGIN = 'failed_login'
SUCCESSFUL_LOGIN = 'successful_login'
|
py | 7dfbce16d05249d3948eb1548a9aa3352d0a59c3 | from .builder import MGDistiller, SMDistiller
from .sampler import ExtraDistributedSampler
|
py | 7dfbcecba4aa2c03439738254272c5be05050913 | from .document_translator import KafkaTranslate |
py | 7dfbd0941c0497d2738fe09daa08292313ebf709 | import os
import cv2
import torch
import argparse
import numpy as np
from tqdm import tqdm
from torch.nn import functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
torch.set_grad_enabled(False)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
parser = argparse.ArgumentParser(description='Interpolation for a pair of images')
parser.add_argument('--video', dest='video', required=True)
parser.add_argument('--montage', dest='montage', action='store_true', help='montage origin video')
parser.add_argument('--skip', dest='skip', action='store_true', help='whether to remove static frames before processing')
parser.add_argument('--fps', dest='fps', type=int, default=60)
parser.add_argument('--model', dest='model', type=str, default='RIFE')
args = parser.parse_args()
if args.model == '2F':
from model.RIFE2F import Model
else:
from model.RIFE import Model
model = Model()
model.load_model('./train_log')
model.eval()
model.device()
videoCapture = cv2.VideoCapture(args.video)
fps = np.round(videoCapture.get(cv2.CAP_PROP_FPS))
success, frame = videoCapture.read()
h, w, _ = frame.shape
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
output = cv2.VideoWriter('{}_4x.mp4'.format(args.video[:-4]), fourcc, args.fps, (w, h))
if args.montage:
left = w // 4
w = w // 2
ph = ((h - 1) // 32 + 1) * 32
pw = ((w - 1) // 32 + 1) * 32
padding = (0, pw - w, 0, ph - h)
tot_frame = videoCapture.get(cv2.CAP_PROP_FRAME_COUNT)
print('{}.mp4, {} frames in total, {}FPS to {}FPS'.format(args.video[:-4], tot_frame, fps, args.fps))
pbar = tqdm(total=tot_frame)
cnt = 0
if args.montage:
frame = frame[:, left: left + w]
while success:
lastframe = frame
success, frame = videoCapture.read()
if success:
if args.montage:
frame = frame[:, left: left + w]
I0 = torch.from_numpy(np.transpose(lastframe, (2,0,1)).astype("float32") / 255.).to(device).unsqueeze(0)
I1 = torch.from_numpy(np.transpose(frame, (2,0,1)).astype("float32") / 255.).to(device).unsqueeze(0)
I0 = F.pad(I0, padding)
I1 = F.pad(I1, padding)
p = (F.interpolate(I0, (16, 16), mode='bilinear', align_corners=False)
- F.interpolate(I1, (16, 16), mode='bilinear', align_corners=False)).abs().mean()
if p < 1e-3 and args.skip:
if cnt % 10 == 0:
print("Warning: Your video has {} static frames, skipping them may change the duration of the generated video.".format(cnt))
cnt += 1
pbar.update(1)
continue
if p > 0.2:
mid0 = lastframe
mid1 = lastframe
mid2 = frame
else:
mid1 = model.inference(I0, I1)
mid = model.inference(torch.cat((I0, mid1), 0), torch.cat((mid1, I1), 0))
mid0 = (((mid[0] * 255.).cpu().detach().numpy().transpose(1, 2, 0))).astype('uint8')
mid1 = (((mid1[0] * 255.).cpu().detach().numpy().transpose(1, 2, 0))).astype('uint8')
mid2 = (((mid[1]* 255.).cpu().detach().numpy().transpose(1, 2, 0))).astype('uint8')
if args.montage:
output.write(np.concatenate((lastframe, lastframe), 1))
output.write(np.concatenate((lastframe, mid0[:h, :w]), 1))
output.write(np.concatenate((lastframe, mid1[:h, :w]), 1))
output.write(np.concatenate((lastframe, mid2[:h, :w]), 1))
else:
output.write(lastframe)
output.write(mid0[:h, :w])
output.write(mid1[:h, :w])
output.write(mid2[:h, :w])
pbar.update(1)
if args.montage:
output.write(np.concatenate((lastframe, lastframe), 1))
else:
output.write(lastframe)
pbar.close()
output.release()
|
py | 7dfbd1f013857b887d99f5c6d036c97f074e1765 | from zope.schema.vocabulary import SimpleTerm
from zope.schema.vocabulary import SimpleVocabulary
def card_availability(context):
return SimpleVocabulary([
SimpleTerm(0, 'A', 'No Card'),
SimpleTerm(1, 'B', '1 Card'),
SimpleTerm(2, 'C', '2 Cards'),
SimpleTerm(3, 'D', '3 Cards'),
SimpleTerm(4, 'E', '4 Cards'),
SimpleTerm(5, 'F', '5 Cards')
])
|
py | 7dfbd41e5813bb89e96691931bf8eceeffb5f451 | """
NETCONF connections
File: connections.py
Author: Radek Krejci <[email protected]>
"""
import json
import os
import logging
from liberouterapi import socketio, auth
from flask import request
from eventlet.timeout import Timeout
import yang
import netconf2 as nc
from .inventory import INVENTORY
from .socketio import sio_emit, sio_wait, sio_clean
from .devices import devices_get, devices_replace
from .error import NetopeerException
from .schemas import getschema, schemas_update
from .data import *
log = logging.getLogger(__name__)
sessions = {}
def hostkey_check(hostname, state, keytype, hexa, priv):
if 'fingerprint' in priv['device']:
# check according to the stored fingerprint from previous connection
if hexa == priv['device']['fingerprint']:
return True
elif state != 2:
log.error("Incorrect host key state")
state = 2
# ask frontend/user for hostkey check
params = {'id': priv['session']['session_id'], 'hostname' : hostname, 'state' : state, 'keytype' : keytype, 'hexa' : hexa}
sio_emit('hostcheck', params)
result = False
timeout = Timeout(30)
try:
# wait for response from the frontend
data = sio_wait(priv['session']['session_id'])
result = data['result']
except Timeout:
# no response received within the timeout
log.info("socketio: hostcheck timeout.")
except KeyError:
# invalid response
log.error("socketio: invalid hostcheck_result received.")
finally:
# we have the response
sio_clean(priv['session']['session_id'])
timeout.cancel()
if result:
# store confirmed fingerprint for future connections
priv['device']['fingerprint'] = hexa;
devices_replace(priv['device']['id'], priv['session']['user'].username, priv['device'])
return result
def auth_common(session_id):
result = None
timeout = Timeout(60)
try:
# wait for response from the frontend
data = sio_wait(session_id)
result = data['password']
except Timeout:
# no response received within the timeout
log.info("socketio: auth request timeout.")
except KeyError:
# no password
log.info("socketio: invalid credential data received.")
finally:
# we have the response
sio_clean(session_id)
timeout.cancel()
return result
def auth_password(username, hostname, priv):
sio_emit('device_auth', {'id': priv, 'type': 'Password Authentication', 'msg': username + '@' + hostname})
return auth_common(priv)
def auth_interactive(name, instruction, prompt, priv):
sio_emit('device_auth', {'id': priv, 'type': name, 'msg': instruction, 'prompt': prompt})
return auth_common(priv)
@auth.required()
def connect():
session = auth.lookup(request.headers.get('lgui-Authorization', None))
user = session['user']
path = os.path.join(INVENTORY, user.username)
data = request.get_json()
if 'id' in data:
# stored device
device = devices_get(data['id'], user.username)
elif 'device' in data:
# one-time connect, the device is specified in request
device = data['device']
else:
raise NetopeerException('Invalid connection request.')
if not device:
raise NetopeerException('Unknown device to connect to request.')
nc.setSearchpath(path)
nc.setSchemaCallback(getschema, session)
if 'password' in device:
ssh = nc.SSH(device['username'], password = device['password'])
else:
ssh = nc.SSH(device['username'])
ssh.setAuthPasswordClb(auth_password, session['session_id'])
ssh.setAuthInteractiveClb(auth_interactive, session['session_id'])
ssh.setAuthHostkeyCheckClb(hostkey_check, {'session': session, 'device' : device})
try:
ncs = nc.Session(device['hostname'], device['port'], ssh)
except Exception as e:
nc.setSchemaCallback(None)
return(json.dumps({'success': False, 'error-msg': str(e)}))
nc.setSchemaCallback(None)
if not user.username in sessions:
sessions[user.username] = {}
# use key (as hostname:port:session-id) to store the created NETCONF session
key = ncs.host + ":" + str(ncs.port) + ":" + ncs.id
sessions[user.username][key] = {}
sessions[user.username][key]['session'] = ncs
# update inventory's list of schemas
schemas_update(session)
return(json.dumps({'success': True, 'session-key': key}))
@auth.required()
def session_get_capabilities():
session = auth.lookup(request.headers.get('lgui-Authorization', None))
user = session['user']
req = request.args.to_dict()
if not 'key' in req:
return(json.dumps({'success': False, 'error-msg': 'Missing session key.'}))
if not user.username in sessions:
sessions[user.username] = {}
key = req['key']
if not key in sessions[user.username]:
return(json.dumps({'success': False, 'error-msg': 'Invalid session key.'}))
cpblts = []
for c in sessions[user.username][key]['session'].capabilities:
cpblts.append(c)
return(json.dumps({'success': True, 'capabilities': cpblts}))
@auth.required()
def session_get():
session = auth.lookup(request.headers.get('lgui-Authorization', None))
user = session['user']
req = request.args.to_dict()
if not 'key' in req:
return(json.dumps({'success': False, 'error-msg': 'Missing session key.'}))
if not 'recursive' in req:
return(json.dumps({'success': False, 'error-msg': 'Missing recursive flag.'}))
if not user.username in sessions:
sessions[user.username] = {}
key = req['key']
if not key in sessions[user.username]:
return(json.dumps({'success': False, 'error-msg': 'Invalid session key.'}))
try:
sessions[user.username][key]['data'] = sessions[user.username][key]['session'].rpcGet()
except ConnectionError as e:
reply = {'success': False, 'error': [{'msg': str(e)}]}
del sessions[user.username][key]
return(json.dumps(reply))
except nc.ReplyError as e:
reply = {'success': False, 'error': []}
for err in e.args[0]:
reply['error'].append(json.loads(str(err)))
return(json.dumps(reply))
if not 'path' in req:
return(dataInfoRoots(sessions[user.username][key]['data'], True if req['recursive'] == 'true' else False))
else:
return(dataInfoSubtree(sessions[user.username][key]['data'], req['path'], True if req['recursive'] == 'true' else False))
def _checkvalue(session, req, schema):
user = session['user'];
if not 'key' in req:
return(json.dumps({'success': False, 'error-msg': 'Missing session key.'}))
if not 'path' in req:
return(json.dumps({'success': False, 'error-msg': 'Missing path to validate value.'}))
if not 'value' in req:
return(json.dumps({'success': False, 'error-msg': 'Missing value to validate.'}))
key = req['key']
if not key in sessions[user.username]:
return(json.dumps({'success': False, 'error-msg': 'Invalid session key.'}))
ctx = sessions[user.username][key]['session'].context;
if schema:
search = ctx.find_path(req['path'])
else:
search = sessions[user.username][key]['data'].find_path(req['path'])
if search.number() != 1:
return(json.dumps({'success': False, 'error-msg': 'Invalid data path.'}))
if schema:
node = search.schema()[0]
else:
node = search.data()[0]
if node.validate_value(req['value']):
errors = yang.get_ly_errors(ctx)
if errors.size():
return(json.dumps({'success': False, 'error-msg': errors[errors.size() - 1].errmsg()}))
else:
return(json.dumps({'success': False, 'error-msg': 'unknown error'}))
return(json.dumps({'success': True}))
@auth.required()
def data_checkvalue():
session = auth.lookup(request.headers.get('lgui-Authorization', None))
req = request.args.to_dict()
return _checkvalue(session, req, False)
@auth.required()
def schema_checkvalue():
session = auth.lookup(request.headers.get('lgui-Authorization', None))
req = request.args.to_dict()
return _checkvalue(session, req, True)
@auth.required()
def schema_values():
session = auth.lookup(request.headers.get('lgui-Authorization', None))
user = session['user']
req = request.args.to_dict()
if not 'key' in req:
return(json.dumps({'success': False, 'error-msg': 'Missing session key.'}))
if not 'path' in req:
return(json.dumps({'success': False, 'error-msg': 'Missing path to validate value.'}))
key = req['key']
if not key in sessions[user.username]:
return(json.dumps({'success': False, 'error-msg': 'Invalid session key.'}))
search = sessions[user.username][key]['session'].context.find_path(req['path'])
if search.number() != 1:
return(json.dumps({'success': False, 'error-msg': 'Invalid data path.'}))
schema = search.schema()[0]
if schema.nodetype() != yang.LYS_LEAF and schema.nodetype != yang.LYS_LEAFLIST:
result = None
else:
result = typeValues(schema.subtype().type(), [])
return(json.dumps({'success': True, 'data': result}))
@auth.required()
def schema_info():
session = auth.lookup(request.headers.get('lgui-Authorization', None))
user = session['user']
req = request.args.to_dict()
if not 'key' in req:
return(json.dumps({'success': False, 'error-msg': 'Missing session key.'}))
if not 'path' in req:
return(json.dumps({'success': False, 'error-msg': 'Missing path to validate value.'}))
key = req['key']
if not key in sessions[user.username]:
return(json.dumps({'success': False, 'error-msg': 'Invalid session key.'}))
if req['path'] == '/':
node = None
else:
search = sessions[user.username][key]['session'].context.find_path(req['path'])
if search.number() != 1:
return(json.dumps({'success': False, 'error-msg': 'Invalid data path.'}))
node = search.schema()[0]
result = [];
if 'relative' in req:
if req['relative'] == 'children':
if node:
instantiables = node.child_instantiables(0)
else:
# top level
instantiables = sessions[user.username][key]['session'].context.data_instantiables(0)
elif req['relative'] == 'siblings':
if node.parent():
instantiables = node.parent().child_instantiables(0)
else:
# top level
instantiables = sessions[user.username][key]['session'].context.data_instantiables(0)
else:
return(json.dumps({'success': False, 'error-msg': 'Invalid relative parameter.'}))
for child in instantiables:
if child.flags() & yang.LYS_CONFIG_R:
# ignore status nodes
continue
if child.nodetype() & (yang.LYS_RPC | yang.LYS_NOTIF | yang.LYS_ACTION):
# ignore RPCs, Notifications and Actions
continue
result.append(schemaInfoNode(child))
else:
result.append(schemaInfoNode(node))
return(json.dumps({'success': True, 'data': result}))
def _create_child(ctx, parent, child_def):
at = child_def['info']['module'].find('@')
if at == -1:
module = ctx.get_module(child_def['info']['module'])
else:
module = ctx.get_module(child_def['info']['module'][:at], child_def['info']['module'][at + 1:])
# print('child: ' + json.dumps(child_def))
# print('parent: ' + parent.schema().name())
# print('module: ' + module.name())
# print('name: ' + child_def['info']['name'])
if child_def['info']['type'] == 4 :
# print('value: ' + str(child_def['value']))
yang.Data_Node(parent, module, child_def['info']['name'], child_def['value'])
elif child_def['info']['type'] == 8:
# print('value: ' + child_def['value'][0])
yang.Data_Node(parent, module, child_def['info']['name'], child_def['value'][0])
else:
child = yang.Data_Node(parent, module, child_def['info']['name'])
if 'children' in child_def:
for grandchild in child_def['children']:
_create_child(ctx, child, grandchild)
@auth.required()
def session_commit():
session = auth.lookup(request.headers.get('lgui-Authorization', None))
user = session['user']
req = request.get_json(keep_order = True)
if not 'key' in req:
return(json.dumps({'success': False, 'error-msg': 'Missing session key.'}))
if not 'modifications' in req:
return(json.dumps({'success': False, 'error-msg': 'Missing modifications.'}))
mods = req['modifications']
ctx = sessions[user.username][req['key']]['session'].context
root = None
reorders = []
for key in mods:
recursion = False
# get correct path and value if needed
path = key
value = None
if mods[key]['type'] == 'change':
value = mods[key]['value']
elif mods[key]['type'] == 'create' or mods[key]['type'] == 'replace':
if mods[key]['data']['info']['type'] == 1:
# creating/replacing container
recursion = True
elif mods[key]['data']['info']['type'] == 4:
# creating/replacing leaf
value = mods[key]['data']['value']
elif mods[key]['data']['info']['type'] == 8:
# creating/replacing leaf-list
value = mods[key]['data']['value'][0]
path = mods[key]['data']['path']
elif mods[key]['data']['info']['type'] == 16:
recursion = True
path = mods[key]['data']['path']
elif mods[key]['type'] == 'reorder':
# postpone reorders
reorders.extend(mods[key]['transactions'])
continue
# create node
# print("creating " + path)
# print("value " + str(value))
if root:
root.new_path(ctx, path, value, 0, 0)
else:
root = yang.Data_Node(ctx, path, value, 0, 0)
node = root.find_path(path).data()[0];
# set operation attribute and add additional data if any
if mods[key]['type'] == 'change':
node.insert_attr(None, 'ietf-netconf:operation', 'merge')
elif mods[key]['type'] == 'delete':
node.insert_attr(None, 'ietf-netconf:operation', 'delete')
elif mods[key]['type'] == 'create':
node.insert_attr(None, 'ietf-netconf:operation', 'create')
elif mods[key]['type'] == 'replace':
node.insert_attr(None, 'ietf-netconf:operation', 'replace')
else:
return(json.dumps({'success': False, 'error-msg': 'Invalid modification ' + key}))
if recursion and 'children' in mods[key]['data']:
for child in mods[key]['data']['children']:
if 'key' in child['info'] and child['info']['key']:
continue
_create_child(ctx, node, child)
# finally process reorders which must be last since they may refer newly created nodes
# and they do not reflect removed nodes
for move in reorders:
try:
node = root.find_path(move['node']).data()[0];
parent = node.parent()
node.unlink()
if parent:
parent.insert(node)
else:
root.insert_sibling(node)
except:
if root:
root.new_path(ctx, move['node'], None, 0, 0)
else:
root = yang.Data_Node(ctx, move['node'], None, 0, 0)
node = root.find_path(move['node']).data()[0];
node.insert_attr(None, 'yang:insert', move['insert'])
if move['insert'] == 'after' or move['insert'] == 'before':
if 'key' in move:
node.insert_attr(None, 'yang:key', move['key'])
elif 'value' in move:
node.insert_attr(None, 'yang:value', move['value'])
# print(root.print_mem(yang.LYD_XML, yang.LYP_FORMAT))
try:
sessions[user.username][req['key']]['session'].rpcEditConfig(nc.DATASTORE_RUNNING, root)
except nc.ReplyError as e:
reply = {'success': False, 'error': []}
for err in e.args[0]:
reply['error'].append(json.loads(str(err)))
return(json.dumps(reply))
return(json.dumps({'success': True}))
@auth.required()
def session_close():
session = auth.lookup(request.headers.get('lgui-Authorization', None))
user = session['user']
req = request.args.to_dict()
if not 'key' in req:
return(json.dumps({'success': False, 'error-msg': 'Missing session key.'}))
if not user.username in sessions:
sessions[user.username] = {}
key = req['key']
if not key in sessions[user.username]:
return(json.dumps({'success': False, 'error-msg': 'Invalid session key.'}))
del sessions[user.username][key]
return(json.dumps({'success': True}))
@auth.required()
def session_alive():
session = auth.lookup(request.headers.get('lgui-Authorization', None))
user = session['user']
req = request.args.to_dict()
if not 'key' in req:
return(json.dumps({'success': False, 'error-msg': 'Missing session key.'}))
if not user.username in sessions:
sessions[user.username] = {}
key = req['key']
if not key in sessions[user.username]:
return(json.dumps({'success': False, 'error-msg': 'Invalid session key.'}))
return(json.dumps({'success': True}))
|
py | 7dfbd4928d6fe241f515ade5ea84b79e81073868 | # Copyright 2011 Ken Pepple
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import __builtin__
import StringIO
from oslo_config import cfg
from nova import test
from nova import version
class VersionTestCase(test.NoDBTestCase):
"""Test cases for Versions code."""
def test_version_string_with_package_is_good(self):
"""Ensure uninstalled code get version string."""
self.stubs.Set(version.version_info, 'version', '5.5.5.5')
self.stubs.Set(version, 'NOVA_PACKAGE', 'g9ec3421')
self.assertEqual("5.5.5.5-g9ec3421",
version.version_string_with_package())
def test_release_file(self):
version.loaded = False
real_open = __builtin__.open
real_find_file = cfg.CONF.find_file
def fake_find_file(self, name):
if name == "release":
return "/etc/nova/release"
return real_find_file(self, name)
def fake_open(path, *args, **kwargs):
if path == "/etc/nova/release":
data = """[Nova]
vendor = ACME Corporation
product = ACME Nova
package = 1337"""
return StringIO.StringIO(data)
return real_open(path, *args, **kwargs)
self.stubs.Set(__builtin__, 'open', fake_open)
self.stubs.Set(cfg.ConfigOpts, 'find_file', fake_find_file)
self.assertEqual(version.vendor_string(), "ACME Corporation")
self.assertEqual(version.product_string(), "ACME Nova")
self.assertEqual(version.package_string(), "1337")
|
py | 7dfbd5b58d249e9bd1ac6c385bb08fd4bf2764ad | # -*- coding: utf-8 -*-
import numpy as np
def add_cell(self, pt_indice, cell_type, group_name=None):
"""Add a new cell defined by a point indices
Parameters
----------
self : MeshMat
an Mesh object
pt_indice : ndarray
a ndarray of points indices
group_name : str
name of the group
Returns
-------
new_ind : int
Tag of the created element. None if the element already exist
"""
# Create the new element
new_ind = 0
for key in self.cell: # There should only one solution
if self.cell[key].indice is not None and self.cell[key].indice.size > 0:
tmp_ind = max(self.cell[key].indice)
new_ind = max(new_ind, tmp_ind)
new_ind += 1
test_exist = self.cell[cell_type].add_cell(pt_indice, new_ind)
if test_exist:
# if group_name in self.group:
# self.group[group_name] = np.append(self.group[group_name], new_ind)
# else:
# self.group[group_name] = np.array([new_ind])
return new_ind
else:
return None
|
py | 7dfbd77bc66113c04215dca25b48c11cc7077070 | # https://www.simononsoftware.com/how-to-make-django-base-model/#the-huge-mess
from django.db import models
class ChannelManager(models.Manager):
def channel_for_indexing(self):
return self.filter(
Q(last_index__isnull=True)
| Q(reindex__exact=True)).first()
class PlaylistManager(models.Manager):
def playlist_for_indexing(self):
return self.filter(
Q(last_index__isnull=True)
| Q(reindex__exact=True)).first()
class VideoManager(models.Manager):
def video_for_indexing(self):
return self.filter(
Q(last_index__isnull=True)
| Q(reindex__exact=True)).first()
class BaseModel(models.Model):
etag = models.CharField(max_length=300, blank=True, null=True)
reindex = models.BooleanField(default=False)
is_deleted = models.BooleanField(default=False)
last_error = models.CharField(max_length=300,
blank=True, null=True)
last_indexing = models.DateTimeField(blank=True, null=True)
last_error_indexing = models.DateTimeField(blank=True, null=True)
last_successful_indexing = models.DateTimeField(blank=True, null=True)
youtube_url = models.CharField(max_length=1234,
blank=True, null=True)
def indexing_error(self, e):
self.last_error_indexing = datetime.now()
self.last_indexing = datetime.now()
self.last_error = e
def indexing_ok(self):
self.last_indexing = datetime.now()
self.last_successful_indexing = datetime.now()
self.last_error = None
class Meta:
abstract = True
class Channel(BaseModel):
channel_id = models.CharField(max_length=200, unique=True)
title = models.CharField(max_length=200, blank=True, null=True)
description = models.TextField(blank=True, null=True)
objects = ChannelManager()
def __str__(self):
return self.title if self.title else self.channel_id
class Playlist(BaseModel):
channel = models.ForeignKey(Channel, blank=True, null=True)
playlist_id = models.CharField(max_length=200, unique=True)
title = models.CharField(max_length=300, blank=True, null=True)
description = models.TextField(blank=True, null=True)
objects = PlaylistManager()
def __str__(self):
return self.title if self.title else self.playlist_id
class Video(BaseModel):
playlist = models.ForeignKey(Playlist, null=True, blank=True,
related_name='videos')
channel = models.ForeignKey(Channel, null=True, blank=True,
related_name='videos')
video_id = models.CharField(max_length=200, unique=False)
title = models.CharField(max_length=200, blank=True, null=True)
description = models.TextField(blank=True, null=True)
length = models.TimeField(blank=True, null=True)
objects = VideoManager()
def __str__(self):
return self.title if self.title else self.video_id |
py | 7dfbd7cf6ef6a424cf281cd5d07caacd0b72d56e | from ._off import read, write
__all__ = ["read", "write"]
|
py | 7dfbd8e647e561d580d2b27cbfad57fe0e190b62 | import os
import sys
import json
from datetime import datetime
class FlagHolder(object):
def __init__(self):
self._dict = None
def __getattr__(self, key):
return self._dict[key]
def initialize(self, **kwargs):
self._dict = {}
self._dict['time'] = datetime.now().strftime('%Y/%m/%d %H:%M:%S')
for k, v in kwargs.items():
self._dict[k] = v
def summary(self):
print('===== Flag summary =====')
for k, v in self._dict.items():
print('{k}: {v}'.format(k=k, v=v))
print('=== End flag summary ===')
def dump(self, path):
"""
dump all flag to json file.
"""
# check extension
base_path, ext = os.path.splitext(path)
if ext == '.json':
pass
else:
path = base_path + '.json'
with open(path, mode='w') as f:
json.dump(self._dict, f, indent=2)
if __name__ == '__main__':
flag = {
'model': 'resnet18',
'batch_size': 128,
'train': True
}
FLAGS = FlagHolder()
FLAGS.initialize(**flag)
FLAGS.summary()
FLAGS.dump('../logs/flags.json') |
py | 7dfbd913345adbc8c3fe8599efa29ffc22c73e4e | """app.py: Create a web app that will run locally on port 5000 by default.
Will provide info and monitoring functionality on the web pages."""
# Import os, subprocess and sys libraries to call other scripts.
# Import threading to do multi-threading.
# Import Flask to use the micro framework for the app.
import os
import subprocess
import sys
import threading
from flask import Flask
from flask import render_template, abort
# Initiliaze Flask app and use debugging.
app = Flask(__name__)
app.debug = True
# Function to call netcap.py script for traffic capturing.
def run_script():
theproc = subprocess.Popen(['python', 'netcap.py'])
theproc.communicate()
# Display index.html template under the root level.
@app.route('/')
def index():
return render_template('index.html')
# Display basics.html under basics.
@app.route('/basics')
def basics():
return render_template('basics.html')
# Display about.html under about.
@app.route('/about')
def about():
return render_template('about.html')
# Display processing.html under generate.
@app.route('/generate')
def generate():
threading.Thread(target=lambda: run_script()).start()
return render_template('processing.html')
# Display itworked.html under is_done.
# Check to see if path of file is valid.
@app.route('/is_done')
def is_done():
hfile = "templates\itworked.html"
if os.path.isfile(hfile):
return render_template('itworked.html')
else:
abort(404)
# Start the web app locally on the default port (5000).
if __name__ == "__main__":
app.run()
|
py | 7dfbd9bc7e035831ea7782d69a668e603ac18624 | from pymodbus.payload import BinaryPayloadBuilder
from pymodbus.constants import Endian
import AZDKDParameter
import AZDKDDirectParameter
method = AZDKDParameter.ControlMethod_Default # 方式(基準アドレス+0,1): default:2
position = AZDKDParameter.Position_Default # 位置(基準アドレス+2, 3):default:0
speed = AZDKDParameter.Speed_Default # 速度(基準アドレス+4, 5):default:1000
changeSpeed = AZDKDParameter.ChangeSpeed_Default # 起動・変速(基準アドレス+6, 7):default:1000000
stop = AZDKDParameter.Stop_Default # 停止(基準アドレス+8, 9):default:1000000
motionSupply = AZDKDParameter.MotionSupply_Default # 運転電流(基準アドレス+10 11) : default: 1000
motionFinishDelay = AZDKDParameter.MotionFinishDelay_Default # 運転終了遅延(基準アドレス+12, 13) : default:0
merge = AZDKDParameter.Merge_Default # 結合(基準アドレス+14, 15) : default:0
mergeTo = AZDKDParameter.MergeTo_Default # 結合先(基準アドレス)+16, 17:default:-1
offsetArea = AZDKDParameter.OffsetArea_Default # オフセット(エリア)(基準アドレス+18, 19):default : 0
widthArea = AZDKDParameter.WidthArea_Default # 幅(エリア)(基準アドレス+20, 21) default:-1
countLoop = AZDKDParameter.CountLoop_Default # カウント(loop) (基準アドレス+22, 23) default:0
postionOffset = AZDKDParameter.PositionOffset_Default # 位置オフセット(基準アドレス+24, 25) default : 0
finishLoop = AZDKDParameter.FinishLoop_Default # 終了(loop)(基準アドレス+26, 27) default:0
weakEvent = AZDKDParameter.WeakEvent_Default # 弱イ.ベント(基準アドレス+28, 29) default:-1
strongEvent = AZDKDParameter.StrongEvent_Default # 強イベント(基準あdレス+30, 31) default:-1
def makeMotionParameter():
builder = BinaryPayloadBuilder(byteorder=Endian.Big)
builder.add_32bit_int(method) # 方式(基準アドレス+0,1)
builder.add_32bit_int(position) # 位置(基準アドレス+2, 3)
builder.add_32bit_int(speed) # 速度(基準アドレス+4, 5)
builder.add_32bit_int(changeSpeed) # 起動・変速(基準アドレス+6, 7)
builder.add_32bit_int(stop) # 停止(基準アドレス+8, 9)
builder.add_32bit_int(motionSupply) # 運転電流(基準アドレス+10 11)
builder.add_32bit_int(motionFinishDelay) # 運転終了遅延(基準アドレス+12, 13)
builder.add_32bit_int(merge) # 結合(基準アドレス+14, 15)
builder.add_32bit_int(mergeTo)# 結合先(基準アドレス+16, 17)
builder.add_32bit_int(offsetArea) # オフセット(エリア)(基準アドレス+18, 19)
builder.add_32bit_int(widthArea) # 幅(エリア)(基準アドレス+20, 21)
builder.add_32bit_int(countLoop) # カウント(loop) (基準アドレス+22, 23)
builder.add_32bit_int(postionOffset) # 位置オフセット(基準アドレス+24, 25)
builder.add_32bit_int(finishLoop) # 終了(loop)(基準アドレス+26, 27)
builder.add_32bit_int(weakEvent) # 弱イ.ベント(基準アドレス+28, 29)
builder.add_32bit_int(strongEvent) # 強イベント(基準ドレス+30, 31)
return builder.build()
def getAddress(motionNumber):
return motionNumber * AZDKDParameter.MotionNumAdress_Pitch + AZDKDParameter.MotionNumAdress_Min
|
py | 7dfbda4674da6f10b5243a8331a4a3975bc43758 | import sys
import os
import pwd
import grp
import subprocess
import json
import multiprocessing
import installation
def config(key):
return "installation.config." + key
def system(key):
return "installation.system." + key
def admin(key):
return "installation.admin." + key
def database(key):
return "installation.database." + key
def prereqs(key):
return "installation.prereqs." + key
def paths(key):
return "installation.paths." + key
def smtp(key):
return "installation.smtp." + key
def extensions(key):
return "installation.extensions." + key
def username():
return pwd.getpwuid(os.getuid()).pw_name
def groupname():
return grp.getgrgid(pwd.getpwuid(os.getuid()).pw_gid).gr_name
def which(name):
return subprocess.check_output("which " + name, shell=True).strip()
def generate(arguments, database_path):
data = { config("password_hash_schemes"): [installation.config.default_password_hash_scheme],
config("default_password_hash_scheme"): installation.config.default_password_hash_scheme,
config("minimum_password_hash_time"): installation.config.minimum_password_hash_time,
config("minimum_rounds"): { installation.config.default_password_hash_scheme: 100 },
config("auth_database"): "internal",
system("username"): username(),
system("email"): username() + "@localhost",
system("groupname"): groupname(),
admin("username"): username(),
admin("email"): username() + "@localhost",
admin("fullname"): username(),
system("hostname"): "localhost",
system("recipients"): arguments.system_recipient or [username() + "@localhost"],
config("auth_mode"): "critic",
config("session_type"): "cookie",
config("allow_anonymous_user"): True,
config("allow_user_registration"): True,
config("verify_email_addresses"): arguments.testing,
config("access_scheme"): "http",
config("enable_access_tokens"): True,
config("repository_url_types"): ["http"],
config("default_encodings"): ["utf-8", "latin-1"],
database("driver"): "sqlite",
database("parameters"): { "database": database_path },
config("is_development"): True,
config("coverage_dir"): None,
prereqs("python"): sys.executable,
prereqs("git"): which("git"),
prereqs("tar"): which("tar"),
paths("etc_dir"): installation.paths.etc_dir,
paths("install_dir"): installation.paths.install_dir,
paths("data_dir"): installation.paths.data_dir,
paths("cache_dir"): installation.paths.cache_dir,
paths("log_dir"): installation.paths.log_dir,
paths("run_dir"): installation.paths.run_dir,
paths("git_dir"): installation.paths.git_dir,
smtp("host"): arguments.smtp_host,
smtp("port"): arguments.smtp_port,
smtp("username"): json.dumps(arguments.smtp_username),
smtp("password"): json.dumps(arguments.smtp_password),
smtp("use_ssl"): False,
smtp("use_starttls"): False,
config("is_quickstart"): True,
config("is_testing"): arguments.testing,
config("ldap_url"): "",
config("ldap_search_base"): "",
config("ldap_create_user"): False,
config("ldap_username_attribute"): "",
config("ldap_fullname_attribute"): "",
config("ldap_email_attribute"): "",
config("ldap_cache_max_age"): 600,
extensions("enabled"): False,
extensions("critic_v8_jsshell"): "NOT_INSTALLED",
extensions("default_flavor"): "js/v8",
config("highlight.max_workers"): multiprocessing.cpu_count(),
# Setting changeset.max_workers to 1 is a workaround for some race
# conditions causing duplicate rows in (at least) the files table.
config("changeset.max_workers"): 1,
config("archive_review_branches"): True,
config("web_server_integration"): "none" }
def provider(name):
prefix = "provider_%s." % name
return { config(prefix + "enabled"): False,
config(prefix + "allow_user_registration"): False,
config(prefix + "verify_email_addresses"): False,
config(prefix + "client_id"): None,
config(prefix + "client_secret"): None,
config(prefix + "bypass_createuser"): False,
config(prefix + "redirect_uri"): None }
data.update(provider("github"))
data.update(provider("google"))
return data
|
py | 7dfbdb2b676b9ba5cf025ee57a4c0b2bf0ede25a | # Copyright (c) 2003-2015 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Author: Alberto Solino (@agsolino)
#
# Description:
# [MS-DRSR] Directory Replication Service (DRS) DRSUAPI Interface implementation
#
# Best way to learn how to use these calls is to grab the protocol standard
# so you understand what the call does, and then read the test case located
# at https://github.com/CoreSecurity/impacket/tree/master/impacket/testcases/SMB_RPC
#
# Some calls have helper functions, which makes it even easier to use.
# They are located at the end of this file.
# Helper functions start with "h"<name of the call>.
# There are test cases for them too.
#
import hashlib
from struct import pack
from impacket.dcerpc.v5.ndr import NDRCALL, NDRSTRUCT, NDRPOINTER, NDRUniConformantArray, NDRUNION, NDR, NDRENUM
from impacket.dcerpc.v5.dtypes import PUUID, DWORD, NULL, GUID, LPWSTR, BOOL, ULONG, UUID, LONGLONG, ULARGE_INTEGER, LARGE_INTEGER
from impacket import hresult_errors, system_errors
from impacket.structure import Structure
from impacket.uuid import uuidtup_to_bin, string_to_bin
from impacket.dcerpc.v5.enum import Enum
from impacket.dcerpc.v5.rpcrt import DCERPCException
from impacket.krb5 import crypto
from pyasn1.type import univ
from pyasn1.codec.ber import decoder
try:
from Crypto.Cipher import ARC4, DES
except Exception:
LOG.critical("Warning: You don't have any crypto installed. You need PyCrypto")
LOG.critical("See http://www.pycrypto.org/")
MSRPC_UUID_DRSUAPI = uuidtup_to_bin(('E3514235-4B06-11D1-AB04-00C04FC2DCD2','4.0'))
class DCERPCSessionError(DCERPCException):
def __init__(self, error_string=None, error_code=None, packet=None):
DCERPCException.__init__(self, error_string, error_code, packet)
def __str__( self ):
key = self.error_code
if hresult_errors.ERROR_MESSAGES.has_key(key):
error_msg_short = hresult_errors.ERROR_MESSAGES[key][0]
error_msg_verbose = hresult_errors.ERROR_MESSAGES[key][1]
return 'DRSR SessionError: code: 0x%x - %s - %s' % (self.error_code, error_msg_short, error_msg_verbose)
elif system_errors.ERROR_MESSAGES.has_key(key & 0xffff):
error_msg_short = system_errors.ERROR_MESSAGES[key & 0xffff][0]
error_msg_verbose = system_errors.ERROR_MESSAGES[key & 0xffff][1]
return 'DRSR SessionError: code: 0x%x - %s - %s' % (self.error_code, error_msg_short, error_msg_verbose)
else:
return 'DRSR SessionError: unknown error code: 0x%x' % self.error_code
################################################################################
# CONSTANTS
################################################################################
# 4.1.10.2.17 EXOP_ERR Codes
class EXOP_ERR(NDRENUM):
align = 4
align64 = 4
structure = (
('Data', '<L'),
)
class enumItems(Enum):
EXOP_ERR_SUCCESS = 0x00000001
EXOP_ERR_UNKNOWN_OP = 0x00000002
EXOP_ERR_FSMO_NOT_OWNER = 0x00000003
EXOP_ERR_UPDATE_ERR = 0x00000004
EXOP_ERR_EXCEPTION = 0x00000005
EXOP_ERR_UNKNOWN_CALLER = 0x00000006
EXOP_ERR_RID_ALLOC = 0x00000007
EXOP_ERR_FSMO_OWNER_DELETED = 0x00000008
EXOP_ERR_FSMO_PENDING_OP = 0x00000009
EXOP_ERR_MISMATCH = 0x0000000A
EXOP_ERR_COULDNT_CONTACT = 0x0000000B
EXOP_ERR_FSMO_REFUSING_ROLES = 0x0000000C
EXOP_ERR_DIR_ERROR = 0x0000000D
EXOP_ERR_FSMO_MISSING_SETTINGS = 0x0000000E
EXOP_ERR_ACCESS_DENIED = 0x0000000F
EXOP_ERR_PARAM_ERROR = 0x00000010
def dump(self, msg = None, indent = 0):
if msg is None: msg = self.__class__.__name__
if msg != '':
print msg,
try:
print " %s" % self.enumItems(self.fields['Data']).name,
except ValueError:
print " %d" % self.fields['Data']
# 4.1.10.2.18 EXOP_REQ Codes
EXOP_FSMO_REQ_ROLE = 0x00000001
EXOP_FSMO_REQ_RID_ALLOC = 0x00000002
EXOP_FSMO_RID_REQ_ROLE = 0x00000003
EXOP_FSMO_REQ_PDC = 0x00000004
EXOP_FSMO_ABANDON_ROLE = 0x00000005
EXOP_REPL_OBJ = 0x00000006
EXOP_REPL_SECRETS = 0x00000007
# 5.14 ATTRTYP
ATTRTYP = ULONG
# 5.51 DSTIME
DSTIME = LONGLONG
# 5.39 DRS_EXTENSIONS_INT
DRS_EXT_BASE = 0x00000001
DRS_EXT_ASYNCREPL = 0x00000002
DRS_EXT_REMOVEAPI = 0x00000004
DRS_EXT_MOVEREQ_V2 = 0x00000008
DRS_EXT_GETCHG_DEFLATE = 0x00000010
DRS_EXT_DCINFO_V1 = 0x00000020
DRS_EXT_RESTORE_USN_OPTIMIZATION = 0x00000040
DRS_EXT_ADDENTRY = 0x00000080
DRS_EXT_KCC_EXECUTE = 0x00000100
DRS_EXT_ADDENTRY_V2 = 0x00000200
DRS_EXT_LINKED_VALUE_REPLICATION = 0x00000400
DRS_EXT_DCINFO_V2 = 0x00000800
DRS_EXT_INSTANCE_TYPE_NOT_REQ_ON_MOD = 0x00001000
DRS_EXT_CRYPTO_BIND = 0x00002000
DRS_EXT_GET_REPL_INFO = 0x00004000
DRS_EXT_STRONG_ENCRYPTION = 0x00008000
DRS_EXT_DCINFO_VFFFFFFFF = 0x00010000
DRS_EXT_TRANSITIVE_MEMBERSHIP = 0x00020000
DRS_EXT_ADD_SID_HISTORY = 0x00040000
DRS_EXT_POST_BETA3 = 0x00080000
DRS_EXT_GETCHGREQ_V5 = 0x00100000
DRS_EXT_GETMEMBERSHIPS2 = 0x00200000
DRS_EXT_GETCHGREQ_V6 = 0x00400000
DRS_EXT_NONDOMAIN_NCS = 0x00800000
DRS_EXT_GETCHGREQ_V8 = 0x01000000
DRS_EXT_GETCHGREPLY_V5 = 0x02000000
DRS_EXT_GETCHGREPLY_V6 = 0x04000000
DRS_EXT_WHISTLER_BETA3 = 0x08000000
DRS_EXT_W2K3_DEFLATE = 0x10000000
DRS_EXT_GETCHGREQ_V10 = 0x20000000
DRS_EXT_RESERVED_FOR_WIN2K_OR_DOTNET_PART2 = 0x40000000
DRS_EXT_RESERVED_FOR_WIN2K_OR_DOTNET_PART3 = 0x80000000
# dwFlagsExt
DRS_EXT_ADAM = 0x00000001
DRS_EXT_LH_BETA2 = 0x00000002
DRS_EXT_RECYCLE_BIN = 0x00000004
# 5.41 DRS_OPTIONS
DRS_ASYNC_OP = 0x00000001
DRS_GETCHG_CHECK = 0x00000002
DRS_UPDATE_NOTIFICATION = 0x00000002
DRS_ADD_REF = 0x00000004
DRS_SYNC_ALL = 0x00000008
DRS_DEL_REF = 0x00000008
DRS_WRIT_REP = 0x00000010
DRS_INIT_SYNC = 0x00000020
DRS_PER_SYNC = 0x00000040
DRS_MAIL_REP = 0x00000080
DRS_ASYNC_REP = 0x00000100
DRS_IGNORE_ERROR = 0x00000100
DRS_TWOWAY_SYNC = 0x00000200
DRS_CRITICAL_ONLY = 0x00000400
DRS_GET_ANC = 0x00000800
DRS_GET_NC_SIZE = 0x00001000
DRS_LOCAL_ONLY = 0x00001000
DRS_NONGC_RO_REP = 0x00002000
DRS_SYNC_BYNAME = 0x00004000
DRS_REF_OK = 0x00004000
DRS_FULL_SYNC_NOW = 0x00008000
DRS_NO_SOURCE = 0x00008000
DRS_FULL_SYNC_IN_PROGRESS = 0x00010000
DRS_FULL_SYNC_PACKET = 0x00020000
DRS_SYNC_REQUEUE = 0x00040000
DRS_SYNC_URGENT = 0x00080000
DRS_REF_GCSPN = 0x00100000
DRS_NO_DISCARD = 0x00100000
DRS_NEVER_SYNCED = 0x00200000
DRS_SPECIAL_SECRET_PROCESSING = 0x00400000
DRS_INIT_SYNC_NOW = 0x00800000
DRS_PREEMPTED = 0x01000000
DRS_SYNC_FORCED = 0x02000000
DRS_DISABLE_AUTO_SYNC = 0x04000000
DRS_DISABLE_PERIODIC_SYNC = 0x08000000
DRS_USE_COMPRESSION = 0x10000000
DRS_NEVER_NOTIFY = 0x20000000
DRS_SYNC_PAS = 0x40000000
DRS_GET_ALL_GROUP_MEMBERSHIP = 0x80000000
# 5.113 LDAP_CONN_PROPERTIES
BND = 0x00000001
SSL = 0x00000002
UDP = 0x00000004
GC = 0x00000008
GSS = 0x00000010
NGO = 0x00000020
SPL = 0x00000040
MD5 = 0x00000080
SGN = 0x00000100
SL = 0x00000200
# 5.137 NTSAPI_CLIENT_GUID
NTDSAPI_CLIENT_GUID = string_to_bin('e24d201a-4fd6-11d1-a3da-0000f875ae0d')
# 5.139 NULLGUID
NULLGUID = string_to_bin('00000000-0000-0000-0000-000000000000')
# 5.205 USN
USN = LONGLONG
# 4.1.4.1.2 DRS_MSG_CRACKREQ_V1
DS_NAME_FLAG_GCVERIFY = 0x00000004
DS_NAME_FLAG_TRUST_REFERRAL = 0x00000008
DS_NAME_FLAG_PRIVATE_RESOLVE_FPOS = 0x80000000
DS_LIST_SITES = 0xFFFFFFFF
DS_LIST_SERVERS_IN_SITE = 0xFFFFFFFE
DS_LIST_DOMAINS_IN_SITE = 0xFFFFFFFD
DS_LIST_SERVERS_FOR_DOMAIN_IN_SITE = 0xFFFFFFFC
DS_LIST_INFO_FOR_SERVER = 0xFFFFFFFB
DS_LIST_ROLES = 0xFFFFFFFA
DS_NT4_ACCOUNT_NAME_SANS_DOMAIN = 0xFFFFFFF9
DS_MAP_SCHEMA_GUID = 0xFFFFFFF8
DS_LIST_DOMAINS = 0xFFFFFFF7
DS_LIST_NCS = 0xFFFFFFF6
DS_ALT_SECURITY_IDENTITIES_NAME = 0xFFFFFFF5
DS_STRING_SID_NAME = 0xFFFFFFF4
DS_LIST_SERVERS_WITH_DCS_IN_SITE = 0xFFFFFFF3
DS_LIST_GLOBAL_CATALOG_SERVERS = 0xFFFFFFF1
DS_NT4_ACCOUNT_NAME_SANS_DOMAIN_EX = 0xFFFFFFF0
DS_USER_PRINCIPAL_NAME_AND_ALTSECID = 0xFFFFFFEF
DS_USER_PRINCIPAL_NAME_FOR_LOGON = 0xFFFFFFF2
# 5.53 ENTINF
ENTINF_FROM_MASTER = 0x00000001
ENTINF_DYNAMIC_OBJECT = 0x00000002
ENTINF_REMOTE_MODIFY = 0x00010000
# 4.1.27.1.2 DRS_MSG_VERIFYREQ_V1
DRS_VERIFY_DSNAMES = 0x00000000
DRS_VERIFY_SIDS = 0x00000001
DRS_VERIFY_SAM_ACCOUNT_NAMES = 0x00000002
DRS_VERIFY_FPOS = 0x00000003
# 4.1.11.1.2 DRS_MSG_NT4_CHGLOG_REQ_V1
DRS_NT4_CHGLOG_GET_CHANGE_LOG = 0x00000001
DRS_NT4_CHGLOG_GET_SERIAL_NUMBERS = 0x00000002
################################################################################
# STRUCTURES
################################################################################
# 4.1.10.2.16 ENCRYPTED_PAYLOAD
class ENCRYPTED_PAYLOAD(Structure):
structure = (
('Salt','16s'),
('CheckSum','<L'),
('EncryptedData',':'),
)
# 5.136 NT4SID
class NT4SID(NDRSTRUCT):
structure = (
('Data','28s=""'),
)
def getAlignment(self):
return 4
# 5.40 DRS_HANDLE
class DRS_HANDLE(NDRSTRUCT):
structure = (
('Data','20s=""'),
)
def getAlignment(self):
return 4
class PDRS_HANDLE(NDRPOINTER):
referent = (
('Data',DRS_HANDLE),
)
# 5.38 DRS_EXTENSIONS
class BYTE_ARRAY(NDRUniConformantArray):
item = 'c'
class PBYTE_ARRAY(NDRPOINTER):
referent = (
('Data',BYTE_ARRAY),
)
class DRS_EXTENSIONS(NDRSTRUCT):
structure = (
('cb',DWORD),
('rgb',BYTE_ARRAY),
)
class PDRS_EXTENSIONS(NDRPOINTER):
referent = (
('Data',DRS_EXTENSIONS),
)
# 5.39 DRS_EXTENSIONS_INT
class DRS_EXTENSIONS_INT(Structure):
structure = (
('dwFlags','<L=0'),
('SiteObjGuid','16s=""'),
('Pid','<L=0'),
('dwReplEpoch','<L=0'),
('dwFlagsExt','<L=0'),
('ConfigObjGUID','16s=""'),
('dwExtCaps','<L=0'),
)
# 4.1.5.1.2 DRS_MSG_DCINFOREQ_V1
class DRS_MSG_DCINFOREQ_V1(NDRSTRUCT):
structure = (
('Domain',LPWSTR),
('InfoLevel',DWORD),
)
# 4.1.5.1.1 DRS_MSG_DCINFOREQ
class DRS_MSG_DCINFOREQ(NDRUNION):
commonHdr = (
('tag', DWORD),
)
union = {
1 : ('V1', DRS_MSG_DCINFOREQ_V1),
}
# 4.1.5.1.8 DS_DOMAIN_CONTROLLER_INFO_1W
class DS_DOMAIN_CONTROLLER_INFO_1W(NDRSTRUCT):
structure = (
('NetbiosName',LPWSTR),
('DnsHostName',LPWSTR),
('SiteName',LPWSTR),
('ComputerObjectName',LPWSTR),
('ServerObjectName',LPWSTR),
('fIsPdc',BOOL),
('fDsEnabled',BOOL),
)
class DS_DOMAIN_CONTROLLER_INFO_1W_ARRAY(NDRUniConformantArray):
item = DS_DOMAIN_CONTROLLER_INFO_1W
class PDS_DOMAIN_CONTROLLER_INFO_1W_ARRAY(NDRPOINTER):
referent = (
('Data',DS_DOMAIN_CONTROLLER_INFO_1W_ARRAY),
)
# 4.1.5.1.4 DRS_MSG_DCINFOREPLY_V1
class DRS_MSG_DCINFOREPLY_V1(NDRSTRUCT):
structure = (
('cItems',DWORD),
('rItems',PDS_DOMAIN_CONTROLLER_INFO_1W_ARRAY),
)
# 4.1.5.1.9 DS_DOMAIN_CONTROLLER_INFO_2W
class DS_DOMAIN_CONTROLLER_INFO_2W(NDRSTRUCT):
structure = (
('NetbiosName',LPWSTR),
('DnsHostName',LPWSTR),
('SiteName',LPWSTR),
('SiteObjectName',LPWSTR),
('ComputerObjectName',LPWSTR),
('ServerObjectName',LPWSTR),
('NtdsDsaObjectName',LPWSTR),
('fIsPdc',BOOL),
('fDsEnabled',BOOL),
('fIsGc',BOOL),
('SiteObjectGuid',GUID),
('ComputerObjectGuid',GUID),
('ServerObjectGuid',GUID),
('NtdsDsaObjectGuid',GUID),
)
class DS_DOMAIN_CONTROLLER_INFO_2W_ARRAY(NDRUniConformantArray):
item = DS_DOMAIN_CONTROLLER_INFO_2W
class PDS_DOMAIN_CONTROLLER_INFO_2W_ARRAY(NDRPOINTER):
referent = (
('Data',DS_DOMAIN_CONTROLLER_INFO_2W_ARRAY),
)
# 4.1.5.1.5 DRS_MSG_DCINFOREPLY_V2
class DRS_MSG_DCINFOREPLY_V2(NDRSTRUCT):
structure = (
('cItems',DWORD),
('rItems',PDS_DOMAIN_CONTROLLER_INFO_2W_ARRAY),
)
# 4.1.5.1.10 DS_DOMAIN_CONTROLLER_INFO_3W
class DS_DOMAIN_CONTROLLER_INFO_3W(NDRSTRUCT):
structure = (
('NetbiosName',LPWSTR),
('DnsHostName',LPWSTR),
('SiteName',LPWSTR),
('SiteObjectName',LPWSTR),
('ComputerObjectName',LPWSTR),
('ServerObjectName',LPWSTR),
('NtdsDsaObjectName',LPWSTR),
('fIsPdc',BOOL),
('fDsEnabled',BOOL),
('fIsGc',BOOL),
('fIsRodc',BOOL),
('SiteObjectGuid',GUID),
('ComputerObjectGuid',GUID),
('ServerObjectGuid',GUID),
('NtdsDsaObjectGuid',GUID),
)
class DS_DOMAIN_CONTROLLER_INFO_3W_ARRAY(NDRUniConformantArray):
item = DS_DOMAIN_CONTROLLER_INFO_3W
class PDS_DOMAIN_CONTROLLER_INFO_3W_ARRAY(NDRPOINTER):
referent = (
('Data',DS_DOMAIN_CONTROLLER_INFO_3W_ARRAY),
)
# 4.1.5.1.6 DRS_MSG_DCINFOREPLY_V3
class DRS_MSG_DCINFOREPLY_V3(NDRSTRUCT):
structure = (
('cItems',DWORD),
('rItems',PDS_DOMAIN_CONTROLLER_INFO_3W_ARRAY),
)
# 4.1.5.1.11 DS_DOMAIN_CONTROLLER_INFO_FFFFFFFFW
class DS_DOMAIN_CONTROLLER_INFO_FFFFFFFFW(NDRSTRUCT):
structure = (
('IPAddress',DWORD),
('NotificationCount',DWORD),
('secTimeConnected',DWORD),
('Flags',DWORD),
('TotalRequests',DWORD),
('Reserved1',DWORD),
('UserName',LPWSTR),
)
class DS_DOMAIN_CONTROLLER_INFO_FFFFFFFFW_ARRAY(NDRUniConformantArray):
item = DS_DOMAIN_CONTROLLER_INFO_FFFFFFFFW
class PDS_DOMAIN_CONTROLLER_INFO_FFFFFFFFW_ARRAY(NDRPOINTER):
referent = (
('Data',DS_DOMAIN_CONTROLLER_INFO_FFFFFFFFW_ARRAY),
)
# 4.1.5.1.7 DRS_MSG_DCINFOREPLY_VFFFFFFFF
class DRS_MSG_DCINFOREPLY_VFFFFFFFF(NDRSTRUCT):
structure = (
('cItems',DWORD),
('rItems',PDS_DOMAIN_CONTROLLER_INFO_FFFFFFFFW_ARRAY),
)
# 4.1.5.1.3 DRS_MSG_DCINFOREPLY
class DRS_MSG_DCINFOREPLY(NDRUNION):
commonHdr = (
('tag', DWORD),
)
union = {
1 : ('V1', DRS_MSG_DCINFOREPLY_V1),
2 : ('V2', DRS_MSG_DCINFOREPLY_V2),
3 : ('V3', DRS_MSG_DCINFOREPLY_V3),
0xffffffff : ('V1', DRS_MSG_DCINFOREPLY_VFFFFFFFF),
}
# 4.1.4.1.2 DRS_MSG_CRACKREQ_V1
class LPWSTR_ARRAY(NDRUniConformantArray):
item = LPWSTR
class PLPWSTR_ARRAY(NDRPOINTER):
referent = (
('Data',LPWSTR_ARRAY),
)
class DRS_MSG_CRACKREQ_V1(NDRSTRUCT):
structure = (
('CodePage',ULONG),
('LocaleId',ULONG),
('dwFlags',DWORD),
('formatOffered',DWORD),
('formatDesired',DWORD),
('cNames',DWORD),
('rpNames',PLPWSTR_ARRAY),
)
# 4.1.4.1.1 DRS_MSG_CRACKREQ
class DRS_MSG_CRACKREQ(NDRUNION):
commonHdr = (
('tag', DWORD),
)
union = {
1 : ('V1', DRS_MSG_CRACKREQ_V1),
}
# 4.1.4.1.3 DS_NAME_FORMAT
class DS_NAME_FORMAT(NDRENUM):
class enumItems(Enum):
DS_UNKNOWN_NAME = 0
DS_FQDN_1779_NAME = 1
DS_NT4_ACCOUNT_NAME = 2
DS_DISPLAY_NAME = 3
DS_UNIQUE_ID_NAME = 6
DS_CANONICAL_NAME = 7
DS_USER_PRINCIPAL_NAME = 8
DS_CANONICAL_NAME_EX = 9
DS_SERVICE_PRINCIPAL_NAME = 10
DS_SID_OR_SID_HISTORY_NAME = 11
DS_DNS_DOMAIN_NAME = 12
# 4.1.4.1.4 DS_NAME_RESULT_ITEMW
class DS_NAME_RESULT_ITEMW(NDRSTRUCT):
structure = (
('status',DWORD),
('pDomain',LPWSTR),
('pName',LPWSTR),
)
class DS_NAME_RESULT_ITEMW_ARRAY(NDRUniConformantArray):
item = DS_NAME_RESULT_ITEMW
class PDS_NAME_RESULT_ITEMW_ARRAY(NDRPOINTER):
referent = (
('Data',DS_NAME_RESULT_ITEMW_ARRAY),
)
# 4.1.4.1.5 DS_NAME_RESULTW
class DS_NAME_RESULTW(NDRSTRUCT):
structure = (
('cItems',DWORD),
('rItems',PDS_NAME_RESULT_ITEMW_ARRAY),
)
class PDS_NAME_RESULTW(NDRPOINTER):
referent = (
('Data',DS_NAME_RESULTW),
)
# 4.1.4.1.7 DRS_MSG_CRACKREPLY_V1
class DRS_MSG_CRACKREPLY_V1(NDRSTRUCT):
structure = (
('pResult',PDS_NAME_RESULTW),
)
# 4.1.4.1.6 DRS_MSG_CRACKREPLY
class DRS_MSG_CRACKREPLY(NDRUNION):
commonHdr = (
('tag', DWORD),
)
union = {
1 : ('V1', DRS_MSG_CRACKREPLY_V1),
}
# 5.198 UPTODATE_CURSOR_V1
class UPTODATE_CURSOR_V1(NDRSTRUCT):
structure = (
('uuidDsa',UUID),
('usnHighPropUpdate',USN),
)
class UPTODATE_CURSOR_V1_ARRAY(NDRUniConformantArray):
item = UPTODATE_CURSOR_V1
# 5.200 UPTODATE_VECTOR_V1_EXT
class UPTODATE_VECTOR_V1_EXT(NDRSTRUCT):
structure = (
('dwVersion',DWORD),
('dwReserved1',DWORD),
('cNumCursors',DWORD),
('dwReserved2',DWORD),
('rgCursors',UPTODATE_CURSOR_V1_ARRAY),
)
class PUPTODATE_VECTOR_V1_EXT(NDRPOINTER):
referent = (
('Data',UPTODATE_VECTOR_V1_EXT),
)
# 5.206 USN_VECTOR
class USN_VECTOR(NDRSTRUCT):
structure = (
('usnHighObjUpdate',USN),
('usnReserved',USN),
('usnHighPropUpdate',USN),
)
# 5.50 DSNAME
class WCHAR_ARRAY(NDRUniConformantArray):
item = 'H'
def __setitem__(self, key, value):
self.fields['MaximumCount'] = None
self.data = None # force recompute
return NDRUniConformantArray.__setitem__(self, key, [ord(c) for c in value])
def __getitem__(self, key):
if key == 'Data':
return ''.join([chr(i) for i in self.fields[key]])
else:
return NDR.__getitem__(self,key)
class DSNAME(NDRSTRUCT):
structure = (
('structLen',ULONG),
('SidLen',ULONG),
('Guid',GUID),
('Sid',NT4SID),
('NameLen',ULONG),
('StringName', WCHAR_ARRAY),
)
def getDataLen(self, data):
return self['NameLen']
def getData(self, soFar = 0):
return NDRSTRUCT.getData(self, soFar)
class PDSNAME(NDRPOINTER):
referent = (
('Data',DSNAME),
)
class PDSNAME_ARRAY(NDRUniConformantArray):
item = PDSNAME
class PPDSNAME_ARRAY(NDRPOINTER):
referent = (
('Data',PDSNAME_ARRAY),
)
class ATTRTYP_ARRAY(NDRUniConformantArray):
item = ATTRTYP
# 5.145 PARTIAL_ATTR_VECTOR_V1_EXT
class PARTIAL_ATTR_VECTOR_V1_EXT(NDRSTRUCT):
structure = (
('dwVersion',DWORD),
('dwReserved1',DWORD),
('cAttrs',DWORD),
('rgPartialAttr',ATTRTYP_ARRAY),
)
class PPARTIAL_ATTR_VECTOR_V1_EXT(NDRPOINTER):
referent = (
('Data',PARTIAL_ATTR_VECTOR_V1_EXT),
)
# 5.142 OID_t
class OID_t(NDRSTRUCT):
structure = (
('length',ULONG),
('elements',PBYTE_ARRAY),
)
# 5.153 PrefixTableEntry
class PrefixTableEntry(NDRSTRUCT):
structure = (
('ndx',ULONG),
('prefix',OID_t),
)
class PrefixTableEntry_ARRAY(NDRUniConformantArray):
item = PrefixTableEntry
class PPrefixTableEntry_ARRAY(NDRPOINTER):
referent = (
('Data',PrefixTableEntry_ARRAY),
)
# 5.177 SCHEMA_PREFIX_TABLE
class SCHEMA_PREFIX_TABLE(NDRSTRUCT):
structure = (
('PrefixCount',DWORD),
('pPrefixEntry',PPrefixTableEntry_ARRAY),
)
# 4.1.10.2.2 DRS_MSG_GETCHGREQ_V3
class DRS_MSG_GETCHGREQ_V3(NDRSTRUCT):
structure = (
('uuidDsaObjDest',UUID),
('uuidInvocIdSrc',UUID),
('pNC',PDSNAME),
('usnvecFrom',USN_VECTOR),
('pUpToDateVecDestV1',PUPTODATE_VECTOR_V1_EXT),
('pPartialAttrVecDestV1',PPARTIAL_ATTR_VECTOR_V1_EXT),
('PrefixTableDest',SCHEMA_PREFIX_TABLE),
('ulFlags',ULONG),
('cMaxObjects',ULONG),
('cMaxBytes',ULONG),
('ulExtendedOp',ULONG),
)
# 5.131 MTX_ADDR
class MTX_ADDR(NDRSTRUCT):
structure = (
('mtx_namelen',ULONG),
('mtx_name',PBYTE_ARRAY),
)
class PMTX_ADDR(NDRPOINTER):
referent = (
('Data',MTX_ADDR),
)
# 4.1.10.2.3 DRS_MSG_GETCHGREQ_V4
class DRS_MSG_GETCHGREQ_V4(NDRSTRUCT):
structure = (
('uuidTransportObj',UUID),
('pmtxReturnAddress',PMTX_ADDR),
('V3',DRS_MSG_GETCHGREQ_V3),
)
# 4.1.10.2.4 DRS_MSG_GETCHGREQ_V5
class DRS_MSG_GETCHGREQ_V5(NDRSTRUCT):
structure = (
('uuidDsaObjDest',UUID),
('uuidInvocIdSrc',UUID),
('pNC',PDSNAME),
('usnvecFrom',USN_VECTOR),
('pUpToDateVecDestV1',PUPTODATE_VECTOR_V1_EXT),
('ulFlags',ULONG),
('cMaxObjects',ULONG),
('cMaxBytes',ULONG),
('ulExtendedOp',ULONG),
('liFsmoInfo',ULARGE_INTEGER),
)
# 4.1.10.2.5 DRS_MSG_GETCHGREQ_V7
class DRS_MSG_GETCHGREQ_V7(NDRSTRUCT):
structure = (
('uuidTransportObj',UUID),
('pmtxReturnAddress',PMTX_ADDR),
('V3',DRS_MSG_GETCHGREQ_V3),
('pPartialAttrSet',PPARTIAL_ATTR_VECTOR_V1_EXT),
('pPartialAttrSetEx1',PPARTIAL_ATTR_VECTOR_V1_EXT),
('PrefixTableDest',SCHEMA_PREFIX_TABLE),
)
# 4.1.10.2.6 DRS_MSG_GETCHGREQ_V8
class DRS_MSG_GETCHGREQ_V8(NDRSTRUCT):
structure = (
('uuidDsaObjDest',UUID),
('uuidInvocIdSrc',UUID),
('pNC',PDSNAME),
('usnvecFrom',USN_VECTOR),
('pUpToDateVecDest',PUPTODATE_VECTOR_V1_EXT),
('ulFlags',ULONG),
('cMaxObjects',ULONG),
('cMaxBytes',ULONG),
('ulExtendedOp',ULONG),
('liFsmoInfo',ULARGE_INTEGER),
('pPartialAttrSet',PPARTIAL_ATTR_VECTOR_V1_EXT),
('pPartialAttrSetEx1',PPARTIAL_ATTR_VECTOR_V1_EXT),
('PrefixTableDest',SCHEMA_PREFIX_TABLE),
)
# 4.1.10.2.7 DRS_MSG_GETCHGREQ_V10
class DRS_MSG_GETCHGREQ_V10(NDRSTRUCT):
structure = (
('uuidDsaObjDest',UUID),
('uuidInvocIdSrc',UUID),
('pNC',PDSNAME),
('usnvecFrom',USN_VECTOR),
('pUpToDateVecDest',PUPTODATE_VECTOR_V1_EXT),
('ulFlags',ULONG),
('cMaxObjects',ULONG),
('cMaxBytes',ULONG),
('ulExtendedOp',ULONG),
('liFsmoInfo',ULARGE_INTEGER),
('pPartialAttrSet',PPARTIAL_ATTR_VECTOR_V1_EXT),
('pPartialAttrSetEx1',PPARTIAL_ATTR_VECTOR_V1_EXT),
('PrefixTableDest',SCHEMA_PREFIX_TABLE),
('ulMoreFlags',ULONG),
)
# 4.1.10.2.1 DRS_MSG_GETCHGREQ
class DRS_MSG_GETCHGREQ(NDRUNION):
commonHdr = (
('tag', DWORD),
)
union = {
4 : ('V4', DRS_MSG_GETCHGREQ_V4),
5 : ('V5', DRS_MSG_GETCHGREQ_V5),
7 : ('V7', DRS_MSG_GETCHGREQ_V7),
8 : ('V8', DRS_MSG_GETCHGREQ_V8),
10 : ('V10', DRS_MSG_GETCHGREQ_V10),
}
# 5.16 ATTRVAL
class ATTRVAL(NDRSTRUCT):
structure = (
('valLen',ULONG),
('pVal',PBYTE_ARRAY),
)
class ATTRVAL_ARRAY(NDRUniConformantArray):
item = ATTRVAL
class PATTRVAL_ARRAY(NDRPOINTER):
referent = (
('Data',ATTRVAL_ARRAY),
)
# 5.17 ATTRVALBLOCK
class ATTRVALBLOCK(NDRSTRUCT):
structure = (
('valCount',ULONG),
('pAVal',PATTRVAL_ARRAY),
)
# 5.9 ATTR
class ATTR(NDRSTRUCT):
structure = (
('attrTyp',ATTRTYP),
('AttrVal',ATTRVALBLOCK),
)
class ATTR_ARRAY(NDRUniConformantArray):
item = ATTR
class PATTR_ARRAY(NDRPOINTER):
referent = (
('Data',ATTR_ARRAY),
)
# 5.10 ATTRBLOCK
class ATTRBLOCK(NDRSTRUCT):
structure = (
('attrCount',ULONG),
('pAttr',PATTR_ARRAY),
)
# 5.53 ENTINF
class ENTINF(NDRSTRUCT):
structure = (
('pName',PDSNAME),
('ulFlags',ULONG),
('AttrBlock',ATTRBLOCK),
)
class ENTINF_ARRAY(NDRUniConformantArray):
item = ENTINF
class PENTINF_ARRAY(NDRPOINTER):
referent = (
('Data',ENTINF_ARRAY),
)
# 5.154 PROPERTY_META_DATA_EXT
class PROPERTY_META_DATA_EXT(NDRSTRUCT):
structure = (
('dwVersion',DWORD),
('timeChanged',DSTIME),
('uuidDsaOriginating',UUID),
('usnOriginating',USN),
)
class PROPERTY_META_DATA_EXT_ARRAY(NDRUniConformantArray):
item = PROPERTY_META_DATA_EXT
# 5.155 PROPERTY_META_DATA_EXT_VECTOR
class PROPERTY_META_DATA_EXT_VECTOR(NDRSTRUCT):
structure = (
('cNumProps',DWORD),
('rgMetaData',PROPERTY_META_DATA_EXT_ARRAY),
)
class PPROPERTY_META_DATA_EXT_VECTOR(NDRPOINTER):
referent = (
('Data',PROPERTY_META_DATA_EXT_VECTOR),
)
# 5.161 REPLENTINFLIST
class REPLENTINFLIST(NDRSTRUCT):
structure = (
('pNextEntInf',NDRPOINTER),
('Entinf',ENTINF),
('fIsNCPrefix',BOOL),
('pParentGuidm',PUUID),
('pMetaDataExt',PPROPERTY_META_DATA_EXT_VECTOR),
)
# ToDo: Here we should work with getData and fromString beacuse we're cheating with pNextEntInf
def fromString(self, data, soFar = 0 ):
# Here we're changing the struct so we can represent a linked list with NDR
self.fields['pNextEntInf'] = PREPLENTINFLIST(isNDR64 = self._isNDR64)
retVal = NDRSTRUCT.fromString(self, data, soFar)
return retVal
class PREPLENTINFLIST(NDRPOINTER):
referent = (
('Data',REPLENTINFLIST),
)
# 4.1.10.2.9 DRS_MSG_GETCHGREPLY_V1
class DRS_MSG_GETCHGREPLY_V1(NDRSTRUCT):
structure = (
('uuidDsaObjSrc',UUID),
('uuidInvocIdSrc',UUID),
('pNC',PDSNAME),
('usnvecFrom',USN_VECTOR),
('usnvecTo',USN_VECTOR),
('pUpToDateVecSrcV1',PUPTODATE_VECTOR_V1_EXT),
('PrefixTableSrc',SCHEMA_PREFIX_TABLE),
('ulExtendedRet',EXOP_ERR),
('cNumObjects',ULONG),
('cNumBytes',ULONG),
('pObjects',PREPLENTINFLIST),
('fMoreData',BOOL),
)
# 4.1.10.2.15 DRS_COMPRESSED_BLOB
class DRS_COMPRESSED_BLOB(NDRSTRUCT):
structure = (
('cbUncompressedSize',DWORD),
('cbCompressedSize',DWORD),
('pbCompressedData',BYTE_ARRAY),
)
# 4.1.10.2.10 DRS_MSG_GETCHGREPLY_V2
class DRS_MSG_GETCHGREPLY_V2(NDRSTRUCT):
structure = (
('CompressedV1',DRS_COMPRESSED_BLOB),
)
# 5.199 UPTODATE_CURSOR_V2
class UPTODATE_CURSOR_V2(NDRSTRUCT):
structure = (
('uuidDsa',UUID),
('usnHighPropUpdate',USN),
('timeLastSyncSuccess',DSTIME),
)
class UPTODATE_CURSOR_V2_ARRAY(NDRUniConformantArray):
item = UPTODATE_CURSOR_V2
# 5.201 UPTODATE_VECTOR_V2_EXT
class UPTODATE_VECTOR_V2_EXT(NDRSTRUCT):
structure = (
('dwVersion',DWORD),
('dwReserved1',DWORD),
('cNumCursors',DWORD),
('dwReserved2',DWORD),
('rgCursors',UPTODATE_CURSOR_V2_ARRAY),
)
class PUPTODATE_VECTOR_V2_EXT(NDRPOINTER):
referent = (
('Data',UPTODATE_VECTOR_V2_EXT),
)
# 5.211 VALUE_META_DATA_EXT_V1
class VALUE_META_DATA_EXT_V1(NDRSTRUCT):
structure = (
('timeCreated',DSTIME),
('MetaData',PROPERTY_META_DATA_EXT),
)
# 5.166 REPLVALINF
class REPLVALINF(NDRSTRUCT):
structure = (
('pObject',PDSNAME),
('attrTyp',ATTRTYP),
('Aval',ATTRVAL),
('fIsPresent',BOOL),
('MetaData',VALUE_META_DATA_EXT_V1),
)
def fromString(self, data, soFar = 0):
retVal = NDRSTRUCT.fromString(self, data, soFar)
#self.dumpRaw()
return retVal
class REPLVALINF_ARRAY(NDRUniConformantArray):
item = REPLVALINF
class PREPLVALINF_ARRAY(NDRPOINTER):
referent = (
('Data',REPLVALINF_ARRAY),
)
# 4.1.10.2.11 DRS_MSG_GETCHGREPLY_V6
class DRS_MSG_GETCHGREPLY_V6(NDRSTRUCT):
structure = (
('uuidDsaObjSrc',UUID),
('uuidInvocIdSrc',UUID),
('pNC',PDSNAME),
('usnvecFrom',USN_VECTOR),
('usnvecTo',USN_VECTOR),
('pUpToDateVecSrc',PUPTODATE_VECTOR_V2_EXT),
('PrefixTableSrc',SCHEMA_PREFIX_TABLE),
('ulExtendedRet',EXOP_ERR),
('cNumObjects',ULONG),
('cNumBytes',ULONG),
('pObjects',PREPLENTINFLIST),
('fMoreData',BOOL),
('cNumNcSizeObjectsc',ULONG),
('cNumNcSizeValues',ULONG),
('cNumValues',DWORD),
#('rgValues',PREPLVALINF_ARRAY),
# ToDo: Once we find out what's going on with PREPLVALINF_ARRAY get it back
# Seems there's something in there that is not being parsed correctly
('rgValues',DWORD),
('dwDRSError',DWORD),
)
# 4.1.10.2.14 DRS_COMP_ALG_TYPE
class DRS_COMP_ALG_TYPE(NDRENUM):
class enumItems(Enum):
DRS_COMP_ALG_NONE = 0
DRS_COMP_ALG_UNUSED = 1
DRS_COMP_ALG_MSZIP = 2
DRS_COMP_ALG_WIN2K3 = 3
# 4.1.10.2.12 DRS_MSG_GETCHGREPLY_V7
class DRS_MSG_GETCHGREPLY_V7(NDRSTRUCT):
structure = (
('dwCompressedVersion',DWORD),
('CompressionAlg',DRS_COMP_ALG_TYPE),
('CompressedAny',DRS_COMPRESSED_BLOB),
)
# 4.1.10.2.8 DRS_MSG_GETCHGREPLY
class DRS_MSG_GETCHGREPLY(NDRUNION):
commonHdr = (
('tag', DWORD),
)
union = {
1 : ('V1', DRS_MSG_GETCHGREPLY_V1),
2 : ('V2', DRS_MSG_GETCHGREPLY_V2),
6 : ('V6', DRS_MSG_GETCHGREPLY_V6),
7 : ('V7', DRS_MSG_GETCHGREPLY_V7),
}
# 4.1.27.1.2 DRS_MSG_VERIFYREQ_V1
class DRS_MSG_VERIFYREQ_V1(NDRSTRUCT):
structure = (
('dwFlags',DWORD),
('cNames',DWORD),
('rpNames',PPDSNAME_ARRAY),
('RequiredAttrs',ATTRBLOCK),
('PrefixTable',SCHEMA_PREFIX_TABLE),
)
# 4.1.27.1.1 DRS_MSG_VERIFYREQ
class DRS_MSG_VERIFYREQ(NDRUNION):
commonHdr = (
('tag', DWORD),
)
union = {
1 : ('V1', DRS_MSG_VERIFYREQ_V1),
}
# 4.1.27.1.4 DRS_MSG_VERIFYREPLY_V1
class DRS_MSG_VERIFYREPLY_V1(NDRSTRUCT):
structure = (
('error',DWORD),
('cNames',DWORD),
('rpEntInf',PENTINF_ARRAY),
('PrefixTable',SCHEMA_PREFIX_TABLE),
)
# 4.1.27.1.3 DRS_MSG_VERIFYREPLY
class DRS_MSG_VERIFYREPLY(NDRUNION):
commonHdr = (
('tag', DWORD),
)
union = {
1 : ('V1', DRS_MSG_VERIFYREPLY_V1),
}
# 4.1.11.1.2 DRS_MSG_NT4_CHGLOG_REQ_V1
class DRS_MSG_NT4_CHGLOG_REQ_V1(NDRSTRUCT):
structure = (
('dwFlags',DWORD),
('PreferredMaximumLength',DWORD),
('cbRestart',DWORD),
('pRestart',PBYTE_ARRAY),
)
# 4.1.11.1.1 DRS_MSG_NT4_CHGLOG_REQ
class DRS_MSG_NT4_CHGLOG_REQ(NDRUNION):
commonHdr = (
('tag', DWORD),
)
union = {
1 : ('V1', DRS_MSG_NT4_CHGLOG_REQ_V1),
}
# 4.1.11.1.5 NT4_REPLICATION_STATE
class NT4_REPLICATION_STATE(NDRSTRUCT):
structure = (
('SamSerialNumber',LARGE_INTEGER),
('SamCreationTime',LARGE_INTEGER),
('BuiltinSerialNumber',LARGE_INTEGER),
('BuiltinCreationTime',LARGE_INTEGER),
('LsaSerialNumber',LARGE_INTEGER),
('LsaCreationTime',LARGE_INTEGER),
)
# 4.1.11.1.4 DRS_MSG_NT4_CHGLOG_REPLY_V1
class DRS_MSG_NT4_CHGLOG_REPLY_V1(NDRSTRUCT):
structure = (
('cbRestart',DWORD),
('cbLog',DWORD),
('ReplicationState',NT4_REPLICATION_STATE),
('ActualNtStatus',DWORD),
('pRestart',PBYTE_ARRAY),
('pLog',PBYTE_ARRAY),
)
# 4.1.11.1.3 DRS_MSG_NT4_CHGLOG_REPLY
class DRS_MSG_NT4_CHGLOG_REPLY(NDRUNION):
commonHdr = (
('tag', DWORD),
)
union = {
1 : ('V1', DRS_MSG_NT4_CHGLOG_REPLY_V1),
}
################################################################################
# RPC CALLS
################################################################################
# 4.1.3 IDL_DRSBind (Opnum 0)
class DRSBind(NDRCALL):
opnum = 0
structure = (
('puuidClientDsa', PUUID),
('pextClient', PDRS_EXTENSIONS),
)
class DRSBindResponse(NDRCALL):
structure = (
('ppextServer', PDRS_EXTENSIONS),
('phDrs', DRS_HANDLE),
('ErrorCode',DWORD),
)
# 4.1.10 IDL_DRSGetNCChanges (Opnum 3)
class DRSGetNCChanges(NDRCALL):
opnum = 3
structure = (
('hDrs', DRS_HANDLE),
('dwInVersion', DWORD),
('pmsgIn', DRS_MSG_GETCHGREQ),
)
class DRSGetNCChangesResponse(NDRCALL):
structure = (
('pdwOutVersion', DWORD),
('pmsgOut', DRS_MSG_GETCHGREPLY),
('ErrorCode',DWORD),
)
# 4.1.27 IDL_DRSVerifyNames (Opnum 8)
class DRSVerifyNames(NDRCALL):
opnum = 8
structure = (
('hDrs', DRS_HANDLE),
('dwInVersion', DWORD),
('pmsgIn', DRS_MSG_VERIFYREQ),
)
class DRSVerifyNamesResponse(NDRCALL):
structure = (
('pdwOutVersion', DWORD),
('pmsgOut', DRS_MSG_VERIFYREPLY),
('ErrorCode',DWORD),
)
# 4.1.11 IDL_DRSGetNT4ChangeLog (Opnum 11)
class DRSGetNT4ChangeLog(NDRCALL):
opnum = 11
structure = (
('hDrs', DRS_HANDLE),
('dwInVersion', DWORD),
('pmsgIn', DRS_MSG_NT4_CHGLOG_REQ),
)
class DRSGetNT4ChangeLogResponse(NDRCALL):
structure = (
('pdwOutVersion', DWORD),
('pmsgOut', DRS_MSG_NT4_CHGLOG_REPLY),
('ErrorCode',DWORD),
)
# 4.1.4 IDL_DRSCrackNames (Opnum 12)
class DRSCrackNames(NDRCALL):
opnum = 12
structure = (
('hDrs', DRS_HANDLE),
('dwInVersion', DWORD),
('pmsgIn', DRS_MSG_CRACKREQ),
)
class DRSCrackNamesResponse(NDRCALL):
structure = (
('pdwOutVersion', DWORD),
('pmsgOut', DRS_MSG_CRACKREPLY),
('ErrorCode',DWORD),
)
# 4.1.5 IDL_DRSDomainControllerInfo (Opnum 16)
class DRSDomainControllerInfo(NDRCALL):
opnum = 16
structure = (
('hDrs', DRS_HANDLE),
('dwInVersion', DWORD),
('pmsgIn', DRS_MSG_DCINFOREQ),
)
class DRSDomainControllerInfoResponse(NDRCALL):
structure = (
('pdwOutVersion', DWORD),
('pmsgOut', DRS_MSG_DCINFOREPLY),
('ErrorCode',DWORD),
)
################################################################################
# OPNUMs and their corresponding structures
################################################################################
OPNUMS = {
0 : (DRSBind,DRSBindResponse ),
3 : (DRSGetNCChanges,DRSGetNCChangesResponse ),
12: (DRSCrackNames,DRSCrackNamesResponse ),
16: (DRSDomainControllerInfo,DRSDomainControllerInfoResponse ),
}
################################################################################
# HELPER FUNCTIONS
################################################################################
def checkNullString(string):
if string == NULL:
return string
if string[-1:] != '\x00':
return string + '\x00'
else:
return string
def hDRSDomainControllerInfo(dce, hDrs, domain, infoLevel):
request = DRSDomainControllerInfo()
request['hDrs'] = hDrs
request['dwInVersion'] = 1
request['pmsgIn']['tag'] = 1
request['pmsgIn']['V1']['Domain'] = checkNullString(domain)
request['pmsgIn']['V1']['InfoLevel'] = infoLevel
return dce.request(request)
def hDRSCrackNames(dce, hDrs, flags, formatOffered, formatDesired, rpNames = ()):
request = DRSCrackNames()
request['hDrs'] = hDrs
request['dwInVersion'] = 1
request['pmsgIn']['tag'] = 1
request['pmsgIn']['V1']['CodePage'] = 0
request['pmsgIn']['V1']['LocaleId'] = 0
request['pmsgIn']['V1']['dwFlags'] = flags
request['pmsgIn']['V1']['formatOffered'] = formatOffered
request['pmsgIn']['V1']['formatDesired'] = formatDesired
request['pmsgIn']['V1']['cNames'] = len(rpNames)
for name in rpNames:
record = LPWSTR()
record['Data'] = checkNullString(name)
request['pmsgIn']['V1']['rpNames'].append(record)
return dce.request(request)
def transformKey(InputKey):
# Section 2.2.11.1.2 Encrypting a 64-Bit Block with a 7-Byte Key
OutputKey = []
OutputKey.append( chr(ord(InputKey[0]) >> 0x01) )
OutputKey.append( chr(((ord(InputKey[0])&0x01)<<6) | (ord(InputKey[1])>>2)) )
OutputKey.append( chr(((ord(InputKey[1])&0x03)<<5) | (ord(InputKey[2])>>3)) )
OutputKey.append( chr(((ord(InputKey[2])&0x07)<<4) | (ord(InputKey[3])>>4)) )
OutputKey.append( chr(((ord(InputKey[3])&0x0F)<<3) | (ord(InputKey[4])>>5)) )
OutputKey.append( chr(((ord(InputKey[4])&0x1F)<<2) | (ord(InputKey[5])>>6)) )
OutputKey.append( chr(((ord(InputKey[5])&0x3F)<<1) | (ord(InputKey[6])>>7)) )
OutputKey.append( chr(ord(InputKey[6]) & 0x7F) )
for i in range(8):
OutputKey[i] = chr((ord(OutputKey[i]) << 1) & 0xfe)
return "".join(OutputKey)
def deriveKey(baseKey):
# 2.2.11.1.3 Deriving Key1 and Key2 from a Little-Endian, Unsigned Integer Key
# Let I be the little-endian, unsigned integer.
# Let I[X] be the Xth byte of I, where I is interpreted as a zero-base-index array of bytes.
# Note that because I is in little-endian byte order, I[0] is the least significant byte.
# Key1 is a concatenation of the following values: I[0], I[1], I[2], I[3], I[0], I[1], I[2].
# Key2 is a concatenation of the following values: I[3], I[0], I[1], I[2], I[3], I[0], I[1]
key = pack('<L',baseKey)
key1 = key[0] + key[1] + key[2] + key[3] + key[0] + key[1] + key[2]
key2 = key[3] + key[0] + key[1] + key[2] + key[3] + key[0] + key[1]
return transformKey(key1),transformKey(key2)
def removeDESLayer(cryptedHash, rid):
Key1,Key2 = deriveKey(rid)
Crypt1 = DES.new(Key1, DES.MODE_ECB)
Crypt2 = DES.new(Key2, DES.MODE_ECB)
decryptedHash = Crypt1.decrypt(cryptedHash[:8]) + Crypt2.decrypt(cryptedHash[8:])
return decryptedHash
def DecryptAttributeValue(dce, attribute):
sessionKey = dce.get_session_key()
# Is it a Kerberos Session Key?
if isinstance(sessionKey, crypto.Key):
# Extract its contents and move on
sessionKey = sessionKey.contents
encryptedPayload = ENCRYPTED_PAYLOAD(attribute)
md5 = hashlib.new('md5')
md5.update(sessionKey)
md5.update(encryptedPayload['Salt'])
finalMD5 = md5.digest()
cipher = ARC4.new(finalMD5)
plainText = cipher.decrypt(attribute[16:])
#chkSum = (binascii.crc32(plainText[4:])) & 0xffffffff
#if unpack('<L',plainText[:4])[0] != chkSum:
# print "RECEIVED 0x%x" % unpack('<L',plainText[:4])[0]
# print "CALCULATED 0x%x" % chkSum
return plainText[4:]
# 5.16.4 ATTRTYP-to-OID Conversion
def MakeAttid(prefixTable, oid):
# get the last value in the original OID: the value * after the last '.'
lastValue = int(oid.split('.')[-1])
# convert the dotted form of OID into a BER encoded binary * format.
# The BER encoding of OID is described in section * 8.19 of [ITUX690]
from pyasn1.type import univ
from pyasn1.codec.ber import encoder
binaryOID = encoder.encode(univ.ObjectIdentifier(oid))[2:]
# get the prefix of the OID
if lastValue < 128:
oidPrefix = list(binaryOID[:-1])
else:
oidPrefix = list(binaryOID[:-2])
# search the prefix in the prefix table, if none found, add
# one entry for the new prefix.
fToAdd = True
pos = len(prefixTable)
for j, item in enumerate(prefixTable):
if item['prefix']['elements'] == oidPrefix:
fToAdd = False
pos = j
break
if fToAdd is True:
entry = PrefixTableEntry()
entry['ndx'] = pos
entry['prefix']['length'] = len(oidPrefix)
entry['prefix']['elements'] = oidPrefix
prefixTable.append(entry)
# compose the attid
lowerWord = lastValue % 16384
if lastValue >= 16384:
# mark it so that it is known to not be the whole lastValue
lowerWord += 32768
upperWord = pos
attrTyp = ATTRTYP()
attrTyp['Data'] = (upperWord << 16) + lowerWord
return attrTyp
def OidFromAttid(prefixTable, attr):
# separate the ATTRTYP into two parts
upperWord = attr / 65536
lowerWord = attr % 65536
# search in the prefix table to find the upperWord, if found,
# construct the binary OID by appending lowerWord to the end of
# found prefix.
binaryOID = None
for j, item in enumerate(prefixTable):
if item['ndx'] == upperWord:
binaryOID = item['prefix']['elements'][:item['prefix']['length']]
if lowerWord < 128:
binaryOID.append(chr(lowerWord))
else:
if lowerWord >= 32768:
lowerWord -= 32768
binaryOID.append(chr(((lowerWord/128) % 128)+128))
binaryOID.append(chr(lowerWord%128))
break
if binaryOID is None:
return None
return str(decoder.decode('\x06' + chr(len(binaryOID)) + ''.join(binaryOID), asn1Spec = univ.ObjectIdentifier())[0])
if __name__ == '__main__':
prefixTable = []
oid0 = '1.2.840.113556.1.4.94'
oid1 = '2.5.6.2'
oid2 = '1.2.840.113556.1.2.1'
oid3 = '1.2.840.113556.1.3.223'
oid4 = '1.2.840.113556.1.5.7000.53'
o0 = MakeAttid(prefixTable, oid0)
print hex(o0)
o1 = MakeAttid(prefixTable, oid1)
print hex(o1)
o2 = MakeAttid(prefixTable, oid2)
print hex(o2)
o3 = MakeAttid(prefixTable, oid3)
print hex(o3)
o4 = MakeAttid(prefixTable, oid4)
print hex(o4)
jj = OidFromAttid(prefixTable, o0)
print jj
jj = OidFromAttid(prefixTable, o1)
print jj
jj = OidFromAttid(prefixTable, o2)
print jj
jj = OidFromAttid(prefixTable, o3)
print jj
jj = OidFromAttid(prefixTable, o4)
print jj
|
py | 7dfbdb4d0e1b39ca2182f8720416426e877ab839 | """
Modified on Sun Jul 28 2020 by Yunzhi Shi, DS @ AWS MLSL
Cleaned up for the tutorial.
Original author: avanetten
"""
import os, time, random
import argparse
import math
import copy
from p_tqdm import p_umap
import numpy as np
import pandas as pd
import scipy.spatial
import scipy.stats
import networkx as nx
import shapely.wkt
from shapely.geometry import Point, LineString
import utm
import apls_utils
import osmnx_funcs
import graphTools
################################################################################
def add_travel_time(G_, speed_key='inferred_speed_mps', length_key='length',
travel_time_key='travel_time_s', verbose=False):
"""
Compute and add travel time estimaes to each graph edge.
Arguments
---------
G_ : networkx graph
Input networkx graph, with edges assumed to have a dictioary of
properties that includes speed.
speed_key : str
Key in the edge properties dictionary to use for the edge speed.
Defaults to ``'inferred_speed_mps'``.
length_key : str
Key in the edge properties dictionary to use for the edge length.
Defaults to ``'length'`` (asumed to be in meters).
travel_time_key : str
Name to assign travel time in the edge properties dictionary.
Defaults to ``'travel_time_s'``.
verbose : boolean
Switch to print relevant values to screen. Defaults to ``False``.
Returns
-------
G_ : networkx graph
Updated graph with travel time attached to each edge.
"""
for i, (u, v, data) in enumerate(G_.edges(data=True)):
if speed_key in data:
speed = data[speed_key]
if type(speed) == list:
speed = np.mean(speed)
else:
print("speed_key not found:", speed_key)
return G_
if verbose:
print("data[length_key]:", data[length_key])
print("speed:", speed)
travel_time_seconds = data[length_key] / speed
data[travel_time_key] = travel_time_seconds
return G_
################################################################################
def create_edge_linestrings(G_, remove_redundant=True, verbose=False):
"""
Ensure all edges have the 'geometry' tag, use shapely linestrings.
Notes
-----
If identical edges exist, remove extras.
Arguments
---------
G_ : networkx graph
Input networkx graph, with edges assumed to have a dictioary of
properties that may or may not include 'geometry'.
remove_redundant : boolean
Switch to remove identical edges, if they exist.
verbose : boolean
Switch to print relevant values to screen. Defaults to ``False``.
Returns
-------
G_ : networkx graph
Updated graph with every edge containing the 'geometry' tag.
"""
# clean out redundant edges with identical geometry
edge_seen_set = set([])
geom_seen = []
bad_edges = []
for i, (u, v, data) in enumerate(G_.edges(data=True)):
# create linestring if no geometry reported
if 'geometry' not in data:
sourcex, sourcey = G_.nodes[u]['x'], G_.nodes[u]['y']
targetx, targety = G_.nodes[v]['x'], G_.nodes[v]['y']
line_geom = LineString([Point(sourcex, sourcey),
Point(targetx, targety)])
data['geometry'] = line_geom
# get reversed line
coords = list(data['geometry'].coords)[::-1]
line_geom_rev = LineString(coords)
else:
# check which direction linestring is travelling (it may be going
# from v -> u, which means we need to reverse the linestring)
# otherwise new edge is tangled
line_geom = data['geometry']
u_loc = [G_.nodes[u]['x'], G_.nodes[u]['y']]
v_loc = [G_.nodes[v]['x'], G_.nodes[v]['y']]
geom_p0 = list(line_geom.coords)[0]
dist_to_u = scipy.spatial.distance.euclidean(u_loc, geom_p0)
dist_to_v = scipy.spatial.distance.euclidean(v_loc, geom_p0)
coords = list(data['geometry'].coords)[::-1]
line_geom_rev = LineString(coords)
if dist_to_u > dist_to_v:
data['geometry'] = line_geom_rev
# flag redundant edges
if remove_redundant:
if i == 0:
edge_seen_set = set([(u, v)])
edge_seen_set.add((v, u))
geom_seen.append(line_geom)
else:
if ((u, v) in edge_seen_set) or ((v, u) in edge_seen_set):
# test if geoms have already been seen
for geom_seen_tmp in geom_seen:
if (line_geom == geom_seen_tmp) \
or (line_geom_rev == geom_seen_tmp):
bad_edges.append((u, v))
if verbose:
print("\nRedundant edge:", u, v)
else:
edge_seen_set.add((u, v))
geom_seen.append(line_geom)
geom_seen.append(line_geom_rev)
if remove_redundant:
if verbose:
print("\nedge_seen_set:", edge_seen_set)
print("redundant edges:", bad_edges)
for (u, v) in bad_edges:
if G_.has_edge(u, v):
G_.remove_edge(u, v) # , key)
return G_
################################################################################
def cut_linestring(line, distance, verbose=False):
"""
Cuts a shapely linestring at a specified distance from its starting point.
Notes
----
Return orignal linestring if distance <= 0 or greater than the length of
the line.
Reference:
http://toblerity.org/shapely/manual.html#linear-referencing-methods
Arguments
---------
line : shapely linestring
Input shapely linestring to cut.
distanct : float
Distance from start of line to cut it in two.
verbose : boolean
Switch to print relevant values to screen. Defaults to ``False``.
Returns
-------
[line1, line2] : list
Cut linestrings. If distance <= 0 or greater than the length of
the line, return input line.
"""
if verbose:
print("Cutting linestring at distance", distance, "...")
if distance <= 0.0 or distance >= line.length:
return [LineString(line)]
# iterate through coorda and check if interpolated point has been passed
# already or not
coords = list(line.coords)
for i, p in enumerate(coords):
pdl = line.project(Point(p))
if verbose:
print(i, p, "line.project point:", pdl)
if pdl == distance:
return [
LineString(coords[:i+1]),
LineString(coords[i:])]
if pdl > distance:
cp = line.interpolate(distance)
return [
LineString(coords[:i] + [(cp.x, cp.y)]),
LineString([(cp.x, cp.y)] + coords[i:])]
# if we've reached here then that means we've encountered a self-loop and
# the interpolated point is between the final midpoint and the the original
# node
i = len(coords) - 1
cp = line.interpolate(distance)
return [
LineString(coords[:i] + [(cp.x, cp.y)]),
LineString([(cp.x, cp.y)] + coords[i:])]
################################################################################
def get_closest_edge_from_G(G_, point, nearby_nodes_set=set([]),
verbose=False):
"""
Return closest edge to point, and distance to said edge.
Notes
-----
Just discovered a similar function:
https://github.com/gboeing/osmnx/blob/master/osmnx/utils.py#L501
Arguments
---------
G_ : networkx graph
Input networkx graph, with edges assumed to have a dictioary of
properties that includes the 'geometry' key.
point : shapely Point
Shapely point containing (x, y) coordinates.
nearby_nodes_set : set
Set of possible edge endpoints to search. If nearby_nodes_set is not
empty, only edges with a node in this set will be checked (this can
greatly speed compuation on large graphs). If nearby_nodes_set is
empty, check all possible edges in the graph.
Defaults to ``set([])``.
verbose : boolean
Switch to print relevant values to screen. Defaults to ``False``.
Returns
-------
best_edge, min_dist, best_geom : tuple
best_edge is the closest edge to the point
min_dist is the distance to that edge
best_geom is the geometry of the ege
"""
# get distances from point to lines
dist_list = []
edge_list = []
geom_list = []
p = point # Point(point_coords)
for i, (u, v, key, data) in enumerate(G_.edges(keys=True, data=True)):
# skip if u,v not in nearby nodes
if len(nearby_nodes_set) > 0:
if (u not in nearby_nodes_set) and (v not in nearby_nodes_set):
continue
if verbose:
print(("u,v,key,data:", u, v, key, data))
print((" type data['geometry']:", type(data['geometry'])))
try:
line = data['geometry']
except KeyError:
line = data['attr_dict']['geometry']
geom_list.append(line)
dist_list.append(p.distance(line))
edge_list.append([u, v, key])
# get closest edge
min_idx = np.argmin(dist_list)
min_dist = dist_list[min_idx]
best_edge = edge_list[min_idx]
best_geom = geom_list[min_idx]
return best_edge, min_dist, best_geom
################################################################################
def insert_point_into_G(G_, point, node_id=100000, max_distance_meters=5,
nearby_nodes_set=set([]), allow_renaming=True,
verbose=False, super_verbose=False):
"""
Insert a new node in the graph closest to the given point.
Notes
-----
If the point is too far from the graph, don't insert a node.
Assume all edges have a linestring geometry
http://toblerity.org/shapely/manual.html#object.simplify
Sometimes the point to insert will have the same coordinates as an
existing point. If allow_renaming == True, relabel the existing node.
convert linestring to multipoint?
https://github.com/Toblerity/Shapely/issues/190
TODO : Implement a version without renaming that tracks which node is
closest to the desired point.
Arguments
---------
G_ : networkx graph
Input networkx graph, with edges assumed to have a dictioary of
properties that includes the 'geometry' key.
point : shapely Point
Shapely point containing (x, y) coordinates
node_id : int
Unique identifier of node to insert. Defaults to ``100000``.
max_distance_meters : float
Maximum distance in meters between point and graph. Defaults to ``5``.
nearby_nodes_set : set
Set of possible edge endpoints to search. If nearby_nodes_set is not
empty, only edges with a node in this set will be checked (this can
greatly speed compuation on large graphs). If nearby_nodes_set is
empty, check all possible edges in the graph.
Defaults to ``set([])``.
allow_renameing : boolean
Switch to allow renaming of an existing node with node_id if the
existing node is closest to the point. Defaults to ``False``.
verbose : boolean
Switch to print relevant values to screen. Defaults to ``False``.
super_verbose : boolean
Switch to print mucho values to screen. Defaults to ``False``.
Returns
-------
G_, node_props, min_dist : tuple
G_ is the updated graph
node_props gives the properties of the inserted node
min_dist is the distance from the point to the graph
"""
best_edge, min_dist, best_geom = get_closest_edge_from_G(
G_, point, nearby_nodes_set=nearby_nodes_set,
verbose=super_verbose)
[u, v, key] = best_edge
G_node_set = set(G_.nodes())
if verbose:
print("Inserting point:", node_id)
print("best edge:", best_edge)
print(" best edge dist:", min_dist)
u_loc = [G_.nodes[u]['x'], G_.nodes[u]['y']]
v_loc = [G_.nodes[v]['x'], G_.nodes[v]['y']]
print("ploc:", (point.x, point.y))
print("uloc:", u_loc)
print("vloc:", v_loc)
if min_dist > max_distance_meters:
if verbose:
print("min_dist > max_distance_meters, skipping...")
return G_, {}, -1, -1
else:
# updated graph
# skip if node exists already
if node_id in G_node_set:
if verbose:
print("Node ID:", node_id, "already exists, skipping...")
return G_, {}, -1, -1
line_geom = best_geom
# Length along line that is closest to the point
line_proj = line_geom.project(point)
# Now combine with interpolated point on line
new_point = line_geom.interpolate(line_geom.project(point))
x, y = new_point.x, new_point.y
#################
# create new node
try:
# first get zone, then convert to latlon
_, _, zone_num, zone_letter = utm.from_latlon(G_.nodes[u]['lat'],
G_.nodes[u]['lon'])
# convert utm to latlon
lat, lon = utm.to_latlon(x, y, zone_num, zone_letter)
except:
lat, lon = y, x
# set properties
node_props = {'highway': 'insertQ',
'lat': lat,
'lon': lon,
'osmid': node_id,
'x': x,
'y': y}
# add node
G_.add_node(node_id, **node_props)
# assign, then update edge props for new edge
_, _, edge_props_new = copy.deepcopy(
list(G_.edges([u, v], data=True))[0])
# cut line
split_line = cut_linestring(line_geom, line_proj)
if split_line is None:
print("Failure in cut_linestring()...")
print("type(split_line):", type(split_line))
print("split_line:", split_line)
print("line_geom:", line_geom)
print("line_geom.length:", line_geom.length)
print("line_proj:", line_proj)
print("min_dist:", min_dist)
return G_, {}, 0, 0
if verbose:
print("split_line:", split_line)
if len(split_line) == 1:
if verbose:
print("split line empty, min_dist:", min_dist)
# get coincident node
outnode = ''
outnode_x, outnode_y = -1, -1
x_p, y_p = new_point.x, new_point.y
x_u, y_u = G_.nodes[u]['x'], G_.nodes[u]['y']
x_v, y_v = G_.nodes[v]['x'], G_.nodes[v]['y']
# sometimes it seems that the nodes aren't perfectly coincident,
# so see if it's within a buffer
buff = 0.05 # meters
if (abs(x_p - x_u) <= buff) and (abs(y_p - y_u) <= buff):
outnode = u
outnode_x, outnode_y = x_u, y_u
elif (abs(x_p - x_v) <= buff) and (abs(y_p - y_v) <= buff):
outnode = v
outnode_x, outnode_y = x_v, y_v
else:
print("Error in determining node coincident with node: "
+ str(node_id) + " along edge: " + str(best_edge))
print("x_p, y_p:", x_p, y_p)
print("x_u, y_u:", x_u, y_u)
print("x_v, y_v:", x_v, y_v)
# return
return G_, {}, 0, 0
# if the line cannot be split, that means that the new node
# is coincident with an existing node. Relabel, if desired
if allow_renaming:
node_props = G_.nodes[outnode]
# A dictionary with the old labels as keys and new labels
# as values. A partial mapping is allowed.
mapping = {outnode: node_id}
Gout = nx.relabel_nodes(G_, mapping)
if verbose:
print("Swapping out node ids:", mapping)
return Gout, node_props, x_p, y_p
else:
# new node is already added, presumably at the exact location
# of an existing node. So just remove the best edge and make
# an edge from new node to existing node, length should be 0.0
line1 = LineString([new_point, Point(outnode_x, outnode_y)])
edge_props_line1 = edge_props_new.copy()
edge_props_line1['length'] = line1.length
edge_props_line1['geometry'] = line1
# make sure length is zero
if line1.length > buff:
print("Nodes should be coincident and length 0!")
print(" line1.length:", line1.length)
print(" x_u, y_u :", x_u, y_u)
print(" x_v, y_v :", x_v, y_v)
print(" x_p, y_p :", x_p, y_p)
print(" new_point:", new_point)
print(" Point(outnode_x, outnode_y):",
Point(outnode_x, outnode_y))
return
# add edge of length 0 from new node to neareest existing node
G_.add_edge(node_id, outnode, **edge_props_line1)
return G_, node_props, x, y
else:
# else, create new edges
line1, line2 = split_line
# get distances
u_loc = [G_.nodes[u]['x'], G_.nodes[u]['y']]
v_loc = [G_.nodes[v]['x'], G_.nodes[v]['y']]
# compare to first point in linestring
geom_p0 = list(line_geom.coords)[0]
dist_to_u = scipy.spatial.distance.euclidean(u_loc, geom_p0)
dist_to_v = scipy.spatial.distance.euclidean(v_loc, geom_p0)
# reverse edge order if v closer than u
if dist_to_v < dist_to_u:
line2, line1 = split_line
if verbose:
print("Creating two edges from split...")
print(" original_length:", line_geom.length)
print(" line1_length:", line1.length)
print(" line2_length:", line2.length)
print(" u, dist_u_to_point:", u, dist_to_u)
print(" v, dist_v_to_point:", v, dist_to_v)
print(" min_dist:", min_dist)
# add new edges
edge_props_line1 = edge_props_new.copy()
edge_props_line1['length'] = line1.length
edge_props_line1['geometry'] = line1
# line2
edge_props_line2 = edge_props_new.copy()
edge_props_line2['length'] = line2.length
edge_props_line2['geometry'] = line2
# check which direction linestring is travelling (it may be going
# from v -> u, which means we need to reverse the linestring)
# otherwise new edge is tangled
geom_p0 = list(line_geom.coords)[0]
dist_to_u = scipy.spatial.distance.euclidean(u_loc, geom_p0)
dist_to_v = scipy.spatial.distance.euclidean(v_loc, geom_p0)
if dist_to_u < dist_to_v:
G_.add_edge(u, node_id, **edge_props_line1)
G_.add_edge(node_id, v, **edge_props_line2)
else:
G_.add_edge(node_id, u, **edge_props_line1)
G_.add_edge(v, node_id, **edge_props_line2)
if verbose:
print("insert edges:", u, '-', node_id, 'and', node_id, '-', v)
# remove initial edge
G_.remove_edge(u, v, key)
return G_, node_props, x, y
################################################################################
def insert_control_points(G_, control_points, max_distance_meters=10,
allow_renaming=True,
n_nodes_for_kd=1000, n_neighbors=20,
x_coord='x', y_coord='y',
verbose=True):
"""
Wrapper around insert_point_into_G() for all control_points.
Notes
-----
control_points are assumed to be of the format:
[[node_id, x, y], ... ]
TODO : Implement a version without renaming that tracks which node is
closest to the desired point.
Arguments
---------
G_ : networkx graph
Input networkx graph, with edges assumed to have a dictioary of
properties that includes the 'geometry' key.
control_points : array
Points to insert in the graph, assumed to the of the format:
[[node_id, x, y], ... ]
max_distance_meters : float
Maximum distance in meters between point and graph. Defaults to ``5``.
allow_renameing : boolean
Switch to allow renaming of an existing node with node_id if the
existing node is closest to the point. Defaults to ``False``.
n_nodes_for_kd : int
Minumu size of graph to render to kdtree to speed node placement.
Defaults to ``1000``.
n_neighbors : int
Number of neigbors to return if building a kdtree. Defaults to ``20``.
x_coord : str
Name of x_coordinate, can be 'x' or 'lon'. Defaults to ``'x'``.
y_coord : str
Name of y_coordinate, can be 'y' or 'lat'. Defaults to ``'y'``.
verbose : boolean
Switch to print relevant values to screen. Defaults to ``False``.
Returns
-------
Gout, new_xs, new_ys : tuple
Gout is the updated graph
new_xs, new_ys are coordinates of the inserted points
"""
t0 = time.time()
# insertion can be super slow so construct kdtree if a large graph
if len(G_.nodes()) > n_nodes_for_kd:
# construct kdtree of ground truth
kd_idx_dic, kdtree, pos_arr = apls_utils.G_to_kdtree(G_)
Gout = G_.copy()
new_xs, new_ys = [], []
if len(G_.nodes()) == 0:
return Gout, new_xs, new_ys
for i, [node_id, x, y] in enumerate(control_points):
if math.isinf(x) or math.isinf(y):
print("Infinity in coords!:", x, y)
return
if verbose:
# if (i % 20) == 0:
print(i, "/", len(control_points),
"Insert control point:", node_id, "x =", x, "y =", y)
point = Point(x, y)
# if large graph, determine nearby nodes
if len(G_.nodes()) > n_nodes_for_kd:
# get closest nodes
node_names, dists_m_refine = apls_utils.nodes_near_point(
x, y, kdtree, kd_idx_dic, x_coord=x_coord, y_coord=y_coord,
n_neighbors=n_neighbors,
verbose=False)
nearby_nodes_set = set(node_names)
else:
nearby_nodes_set = set([])
# insert point
Gout, node_props, xnew, ynew = insert_point_into_G(
Gout, point, node_id=node_id,
max_distance_meters=max_distance_meters,
nearby_nodes_set=nearby_nodes_set,
allow_renaming=allow_renaming,
verbose=verbose)
if (x != 0) and (y != 0):
new_xs.append(xnew)
new_ys.append(ynew)
t1 = time.time()
if verbose:
print("Time to run insert_control_points():", t1-t0, "seconds")
return Gout, new_xs, new_ys
################################################################################
def create_graph_midpoints(G_, linestring_delta=50, is_curved_eps=0.03,
n_id_add_val=1, allow_renaming=True,
verbose=False, super_verbose=False):
"""
Insert midpoint nodes into long edges on the graph.
Arguments
---------
G_ : networkx graph
Input networkx graph, with edges assumed to have a dictioary of
properties that includes the 'geometry' key.
linestring_delta : float
Distance in meters between linestring midpoints. Defaults to ``50``.
is_curved_eps : float
Minumum curvature for injecting nodes (if curvature is less than this
value, no midpoints will be injected). If < 0, always inject points
on line, regardless of curvature. Defaults to ``0.3``.
n_id_add_val : int
Sets min midpoint id above existing nodes
e.g.: G.nodes() = [1,2,4], if n_id_add_val = 5, midpoints will
be [9,10,11,...]
allow_renameing : boolean
Switch to allow renaming of an existing node with node_id if the
existing node is closest to the point. Defaults to ``False``.
verbose : boolean
Switch to print relevant values to screen. Defaults to ``False``.
super_verbose : boolean
Switch to print mucho values to screen. Defaults to ``False``.
Returns
-------
Gout, xms, yms : tuple
Gout is the updated graph
xms, yms are coordinates of the inserted points
"""
if len(G_.nodes()) == 0:
return G_, [], []
# midpoints
xms, yms = [], []
Gout = G_.copy()
midpoint_name_val, midpoint_name_inc = np.max(G_.nodes())+n_id_add_val, 1
for u, v, data in G_.edges(data=True):
# curved line
if 'geometry' in data:
# first edge props and get utm zone and letter
edge_props_init = G_.edges([u, v])
linelen = data['length']
line = data['geometry']
#################
# ignore empty line
if linelen == 0:
continue
# check if curved or not
minx, miny, maxx, maxy = line.bounds
# get euclidean distance
dst = scipy.spatial.distance.euclidean([minx, miny], [maxx, maxy])
# ignore if almost straight
if np.abs(dst - linelen) / linelen < is_curved_eps:
continue
#################
#################
# also ignore super short lines
if linelen < 0.75*linestring_delta:
continue
#################
if verbose:
print("create_graph_midpoints()...")
print(" u,v:", u, v)
print(" data:", data)
print(" edge_props_init:", edge_props_init)
# interpolate midpoints
# if edge is short, use midpoint, else get evenly spaced points
if linelen <= linestring_delta:
interp_dists = [0.5 * line.length]
else:
# get evenly spaced points
npoints = len(np.arange(0, linelen, linestring_delta)) + 1
interp_dists = np.linspace(0, linelen, npoints)[1:-1]
if verbose:
print(" interp_dists:", interp_dists)
# create nodes
node_id_new_list = []
xms_tmp, yms_tmp = [], []
for j, d in enumerate(interp_dists):
if verbose:
print(" ", j, "interp_dist:", d)
midPoint = line.interpolate(d)
xm0, ym0 = midPoint.xy
xm = xm0[-1]
ym = ym0[-1]
point = Point(xm, ym)
xms.append(xm)
yms.append(ym)
xms_tmp.append(xm)
yms_tmp.append(ym)
if verbose:
print(" midpoint:", xm, ym)
# add node to graph, with properties of u
node_id = midpoint_name_val
midpoint_name_val += midpoint_name_inc
node_id_new_list.append(node_id)
if verbose:
print(" node_id:", node_id)
# add to graph
Gout, node_props, _, _ = insert_point_into_G(
Gout, point, node_id=node_id,
allow_renaming=allow_renaming,
verbose=super_verbose)
return Gout, xms, yms
################################################################################
def _clean_sub_graphs(G_, min_length=80, max_nodes_to_skip=100,
weight='length', verbose=True,
super_verbose=False):
"""
Remove subgraphs with a max path length less than min_length,
if the subgraph has more than max_noxes_to_skip, don't check length
(this step great reduces processing time)
"""
if len(G_.nodes()) == 0:
return G_
if verbose:
print("Running clean_sub_graphs...")
try:
sub_graphs = list(nx.connected_component_subgraphs(G_))
except:
sub_graph_nodes = nx.connected_components(G_)
sub_graphs = [G_.subgraph(c).copy() for c in sub_graph_nodes]
bad_nodes = []
if verbose:
print(" len(G_.nodes()):", len(G_.nodes()))
print(" len(G_.edges()):", len(G_.edges()))
if super_verbose:
print("G_.nodes:", G_.nodes())
edge_tmp = G_.edges()[np.random.randint(len(G_.edges()))]
print(edge_tmp, "G.edge props:", G_.edges[edge_tmp[0]][edge_tmp[1]])
for G_sub in sub_graphs:
# don't check length if too many nodes in subgraph
if len(G_sub.nodes()) > max_nodes_to_skip:
continue
else:
all_lengths = dict(
nx.all_pairs_dijkstra_path_length(G_sub, weight=weight))
if super_verbose:
print(" \nGs.nodes:", G_sub.nodes())
print(" all_lengths:", all_lengths)
# get all lenghts
lens = []
for u in all_lengths.keys():
v = all_lengths[u]
for uprime in v.keys():
vprime = v[uprime]
lens.append(vprime)
if super_verbose:
print(" u, v", u, v)
print(" uprime, vprime:", uprime, vprime)
max_len = np.max(lens)
if super_verbose:
print(" Max length of path:", max_len)
if max_len < min_length:
bad_nodes.extend(G_sub.nodes())
if super_verbose:
print(" appending to bad_nodes:", G_sub.nodes())
# remove bad_nodes
G_.remove_nodes_from(bad_nodes)
if verbose:
print(" num bad_nodes:", len(bad_nodes))
# print ("bad_nodes:", bad_nodes)
print(" len(G'.nodes()):", len(G_.nodes()))
print(" len(G'.edges()):", len(G_.edges()))
if super_verbose:
print(" G_.nodes:", G_.nodes())
return G_
################################################################################
def _create_gt_graph(geoJson, im_test_file, network_type='all_private',
valid_road_types=set([]),
osmidx=0, osmNodeidx=0,
subgraph_filter_weight='length',
min_subgraph_length=5,
travel_time_key='travel_time_s',
speed_key='inferred_speed_mps',
verbose=False,
super_verbose=False):
'''Ingest graph from geojson file and refine'''
t0 = time.time()
if verbose:
print("Executing graphTools.create_graphGeoJson()...")
G0gt_init = graphTools.create_graphGeoJson(
geoJson, name='unnamed', retain_all=True,
network_type=network_type, valid_road_types=valid_road_types,
osmidx=osmidx, osmNodeidx=osmNodeidx, verbose=verbose)
t1 = time.time()
if verbose:
print("Time to run create_graphGeoJson():", t1 - t0, "seconds")
# refine graph
G_gt = _refine_gt_graph(G0gt_init, im_test_file,
subgraph_filter_weight=subgraph_filter_weight,
min_subgraph_length=min_subgraph_length,
travel_time_key=travel_time_key,
speed_key=speed_key,
verbose=verbose,
super_verbose=super_verbose)
return G_gt, G0gt_init
################################################################################
def _refine_gt_graph(G0gt_init, im_test_file,
subgraph_filter_weight='length',
min_subgraph_length=5,
travel_time_key='travel_time_s',
speed_key='inferred_speed_mps',
verbose=False,
super_verbose=False):
"""refine ground truth graph"""
t1 = time.time()
# save latlon geometry (osmnx overwrites the 'geometry' tag)
# also compute pixel geom
for i, (u, v, key, data) in enumerate(G0gt_init.edges(keys=True, data=True)):
if 'geometry' not in data:
sourcex, sourcey = G0gt_init.nodes[u]['x'], G0gt_init.nodes[u]['y']
targetx, targety = G0gt_init.nodes[v]['x'], G0gt_init.nodes[v]['y']
line_geom = LineString([Point(sourcex, sourcey),
Point(targetx, targety)])
else:
line_geom = data['geometry']
data['geometry_latlon'] = line_geom.wkt
if os.path.exists(im_test_file):
# get pixel geom (do this after simplify so that we don't have to
# collapse the lines (see apls_wkt_to_G.wkt_to_G)
geom_pix = apls_utils.geomGeo2geomPixel(line_geom,
input_raster=im_test_file)
data['geometry_pix'] = geom_pix.wkt
data['length_pix'] = geom_pix.length
if len(G0gt_init.nodes()) == 0:
return G0gt_init
G0gt = osmnx_funcs.project_graph(G0gt_init)
if verbose:
print("len G0gt.nodes():", len(G0gt.nodes()))
print("len G0gt.edges:", len(G0gt.edges()))
if verbose:
print("Simplifying graph...")
try:
G2gt_init0 = osmnx_funcs.simplify_graph(G0gt).to_undirected()
except:
print("Cannot simplify graph, using original")
G2gt_init0 = G0gt
# make sure all edges have a geometry assigned to them
G2gt_init1 = create_edge_linestrings(
G2gt_init0.copy(), remove_redundant=True)
t2 = time.time()
if verbose:
print("Time to project, simplify, and create linestrings:",
t2 - t1, "seconds")
# clean up connected components
G2gt_init2 = _clean_sub_graphs(
G2gt_init1.copy(), min_length=min_subgraph_length,
weight=subgraph_filter_weight,
verbose=verbose, super_verbose=super_verbose)
# add pixel coords
try:
if os.path.exists(im_test_file):
G_gt_almost, _, gt_graph_coords = apls_utils._set_pix_coords(
G2gt_init2.copy(), im_test_file)
else:
G_gt_almost = G2gt_init2
except:
pass
# !!!!!!!!!!!!!!!
# ensure nodes have coorect xpix and ypix since _set_pix_coords is faulty!
for j, n in enumerate(G_gt_almost.nodes()):
x, y = G_gt_almost.nodes[n]['x'], G_gt_almost.nodes[n]['y']
geom_pix = apls_utils.geomGeo2geomPixel(Point(x, y),
input_raster=im_test_file)
[(xp, yp)] = list(geom_pix.coords)
G_gt_almost.nodes[n]['x_pix'] = xp
G_gt_almost.nodes[n]['y_pix'] = yp
# update pixel and lat lon geometries that get turned into lists upon
# simplify() that produces a 'geometry' tag in wmp
if verbose:
print("Merge 'geometry' linestrings...")
keys_tmp = ['geometry_pix', 'geometry_latlon']
for i, (u, v, attr_dict) in enumerate(G_gt_almost.edges(data=True)):
for key_tmp in keys_tmp:
if key_tmp not in attr_dict.keys():
continue
if super_verbose:
print("Merge", key_tmp, "...")
geom = attr_dict[key_tmp]
if type(geom) == list:
# check if the list items are wkt strings, if so, create
# linestrigs
# or (type(geom_pix[0]) == unicode):
if (type(geom[0]) == str):
geom = [shapely.wkt.loads(ztmp) for ztmp in geom]
# merge geoms
attr_dict[key_tmp] = shapely.ops.linemerge(geom)
elif type(geom) == str:
attr_dict[key_tmp] = shapely.wkt.loads(geom)
else:
pass
# update wkt_pix?
if 'wkt_pix' in attr_dict.keys():
attr_dict['wkt_pix'] = attr_dict['geometry_pix'].wkt
# update 'length_pix'
if 'length_pix' in attr_dict.keys():
attr_dict['length_pix'] = np.sum([attr_dict['length_pix']])
# check if simplify created various speeds on an edge
speed_keys = [speed_key, 'inferred_speed_mph', 'inferred_speed_mps']
for sk in speed_keys:
if sk not in attr_dict.keys():
continue
if type(attr_dict[sk]) == list:
if verbose:
print(" Taking mean of multiple speeds on edge:", u, v)
attr_dict[sk] = np.mean(attr_dict[sk])
if verbose:
print("u, v, speed_key, attr_dict)[speed_key]:",
u, v, sk, attr_dict[sk])
# add travel time
G_gt = add_travel_time(G_gt_almost.copy(),
speed_key=speed_key,
travel_time_key=travel_time_key)
return G_gt
################################################################################
def make_graphs(G_gt_, G_p_,
weight='length',
speed_key='inferred_speed_mps',
travel_time_key='travel_time_s',
max_nodes_for_midpoints=500,
linestring_delta=50,
is_curved_eps=0.012,
max_snap_dist=4,
allow_renaming=True,
verbose=False,
super_verbose=False):
"""
Match nodes in ground truth and propsal graphs, and get paths.
Notes
-----
The path length dictionaries returned by this function will be fed into
compute_metric().
Arguments
---------
G_gt_ : networkx graph
Ground truth graph.
G_p_ : networkd graph
Proposal graph over the same region.
weight : str
Key in the edge properties dictionary to use for the path length
weight. Defaults to ``'length'``.
speed_key : str
Key in the edge properties dictionary to use for the edge speed.
Defaults to ``'inferred_speed_mps'``.
travel_time_key : str
Name to assign travel time in the edge properties dictionary.
Defaults to ``'travel_time_s'``.
max_nodes_for_midpoints : int
Maximum number of gt nodes to inject midpoints. If there are more
gt nodes than this, skip midpoints and use this number of points
to comput APLS.
linestring_delta : float
Distance in meters between linestring midpoints.
If len gt nodes > max_nodes_for_midppoints this argument is ignored.
Defaults to ``50``.
is_curved_eps : float
Minumum curvature for injecting nodes (if curvature is less than this
value, no midpoints will be injected). If < 0, always inject points
on line, regardless of curvature.
If len gt nodes > max_nodes_for_midppoints this argument is ignored.
Defaults to ``0.012``.
max_snap_dist : float
Maximum distance a node can be snapped onto a graph.
Defaults to ``4``.
allow_renameing : boolean
Switch to allow renaming of an existing node with node_id if the
existing node is closest to the point. Defaults to ``False``.
verbose : boolean
Switch to print relevant values to screen. Defaults to ``False``.
super_verbose : boolean
Switch to print mucho values to screen. Defaults to ``False``.
Return
------
G_gt_cp, G_p_cp, G_gt_cp_prime, G_p_cp_prime, \
control_points_gt, control_points_prop, \
all_pairs_lengths_gt_native, all_pairs_lengths_prop_native, \
all_pairs_lengths_gt_prime, all_pairs_lengths_prop_prime : tuple
G_gt_cp is ground truth with control points inserted
G_p_cp is proposal with control points inserted
G_gt_cp_prime is ground truth with control points from prop inserted
G_p_cp_prime is proposal with control points from gt inserted
all_pairs_lengths_gt_native is path length dict corresponding to G_gt_cp
all_pairs_lengths_prop_native is path length dict corresponding to G_p_cp
all_pairs_lengths_gt_prime is path length dict corresponding to G_gt_cp_prime
all_pairs_lenfgths_prop_prime is path length dict corresponding to G_p_cp_prime
"""
t0 = time.time()
for i, (u, v, data) in enumerate(G_gt_.edges(keys=False, data=True)):
if weight not in data.keys():
print("Error!", weight, "not in G_gt_ edge u, v, data:", u, v, data)
return
for i, (u, v, key, data) in enumerate(G_gt_.edges(keys=True, data=True)):
try:
line = data['geometry']
except KeyError:
line = data[0]['geometry']
if type(line) == str: # or type(line) == unicode:
data['geometry'] = shapely.wkt.loads(line)
# create graph with midpoints
G_gt0 = create_edge_linestrings(G_gt_.to_undirected())
if verbose:
print("len G_gt.nodes():", len(list(G_gt0.nodes())))
print("len G_gt.edges():", len(list(G_gt0.edges())))
if verbose:
print("Creating gt midpoints")
G_gt_cp0, xms, yms = create_graph_midpoints(
G_gt0.copy(),
linestring_delta=linestring_delta,
is_curved_eps=is_curved_eps,
verbose=False)
# add travel time
G_gt_cp = add_travel_time(G_gt_cp0.copy(),
speed_key=speed_key,
travel_time_key=travel_time_key)
# get ground truth control points
control_points_gt = []
for n in G_gt_cp.nodes():
u_x, u_y = G_gt_cp.nodes[n]['x'], G_gt_cp.nodes[n]['y']
control_points_gt.append([n, u_x, u_y])
if verbose:
print("len control_points_gt:", len(control_points_gt))
# get ground truth paths
if verbose:
print("Get ground truth paths...")
all_pairs_lengths_gt_native = dict(
nx.shortest_path_length(G_gt_cp, weight=weight))
###############
# Proposal
for i, (u, v, data) in enumerate(G_p_.edges(keys=False, data=True)):
if weight not in data.keys():
print("Error!", weight, "not in G_p_ edge u, v, data:", u, v, data)
return
# get proposal graph with native midpoints
for i, (u, v, key, data) in enumerate(G_p_.edges(keys=True, data=True)):
try:
line = data['geometry']
except:
line = data[0]['geometry']
if type(line) == str: # or type(line) == unicode:
data['geometry'] = shapely.wkt.loads(line)
G_p0 = create_edge_linestrings(G_p_.to_undirected())
# add travel time
G_p = add_travel_time(G_p0.copy(),
speed_key=speed_key,
travel_time_key=travel_time_key)
if verbose:
print("len G_p.nodes():", len(G_p.nodes()))
print("len G_p.edges():", len(G_p.edges()))
if verbose:
print("Creating proposal midpoints")
G_p_cp0, xms_p, yms_p = create_graph_midpoints(
G_p.copy(),
linestring_delta=linestring_delta,
is_curved_eps=is_curved_eps,
verbose=False)
# add travel time
G_p_cp = add_travel_time(G_p_cp0.copy(),
speed_key=speed_key,
travel_time_key=travel_time_key)
if verbose:
print("len G_p_cp.nodes():", len(G_p_cp.nodes()))
print("len G_p_cp.edges():", len(G_p_cp.edges()))
# set proposal control nodes, originally just all nodes in G_p_cp
# original method sets proposal control points as all nodes in G_p_cp
# get proposal control points
control_points_prop = []
for n in G_p_cp.nodes():
u_x, u_y = G_p_cp.nodes[n]['x'], G_p_cp.nodes[n]['y']
control_points_prop.append([n, u_x, u_y])
# get paths
all_pairs_lengths_prop_native = dict(
nx.shortest_path_length(G_p_cp, weight=weight))
###############
# insert gt control points into proposal
if verbose:
print("Inserting", len(control_points_gt),
"control points into G_p...")
print("G_p.nodes():", G_p.nodes())
G_p_cp_prime0, xn_p, yn_p = insert_control_points(
G_p.copy(), control_points_gt,
max_distance_meters=max_snap_dist,
allow_renaming=allow_renaming,
verbose=super_verbose)
# add travel time
G_p_cp_prime = add_travel_time(G_p_cp_prime0.copy(),
speed_key=speed_key,
travel_time_key=travel_time_key)
###############
# now insert control points into ground truth
if verbose:
print("\nInserting", len(control_points_prop),
"control points into G_gt...")
# permit renaming of inserted nodes if coincident with existing node
G_gt_cp_prime0, xn_gt, yn_gt = insert_control_points(
G_gt_,
control_points_prop,
max_distance_meters=max_snap_dist,
allow_renaming=allow_renaming,
verbose=super_verbose)
# add travel time
G_gt_cp_prime = add_travel_time(G_gt_cp_prime0.copy(),
speed_key=speed_key,
travel_time_key=travel_time_key)
###############
# get paths
all_pairs_lengths_gt_prime = dict(
nx.shortest_path_length(G_gt_cp_prime, weight=weight))
all_pairs_lengths_prop_prime = dict(
nx.shortest_path_length(G_p_cp_prime, weight=weight))
tf = time.time()
if verbose:
print("Time to run make_graphs in apls.py:", tf - t0, "seconds")
return G_gt_cp, G_p_cp, G_gt_cp_prime, G_p_cp_prime, \
control_points_gt, control_points_prop, \
all_pairs_lengths_gt_native, all_pairs_lengths_prop_native, \
all_pairs_lengths_gt_prime, all_pairs_lengths_prop_prime
################################################################################
def make_graphs_yuge(G_gt, G_p,
weight='length',
speed_key='inferred_speed_mps',
travel_time_key='travel_time_s',
max_nodes=500,
max_snap_dist=4,
allow_renaming=True,
verbose=True, super_verbose=False):
"""
Match nodes in large ground truth and propsal graphs, and get paths.
Notes
-----
Skip midpoint injection and only select a subset of routes to compare.
The path length dictionaries returned by this function will be fed into
compute_metric().
Arguments
---------
G_gt : networkx graph
Ground truth graph.
G_p : networkd graph
Proposal graph over the same region.
weight : str
Key in the edge properties dictionary to use for the path length
weight. Defaults to ``'length'``.
speed_key : str
Key in the edge properties dictionary to use for the edge speed.
Defaults to ``'inferred_speed_mps'``.
travel_time_key : str
Name to assign travel time in the edge properties dictionary.
Defaults to ``'travel_time_s'``.
max_nodess : int
Maximum number of gt nodes to inject midpoints. If there are more
gt nodes than this, skip midpoints and use this number of points
to comput APLS.
max_snap_dist : float
Maximum distance a node can be snapped onto a graph.
Defaults to ``4``.
allow_renameing : boolean
Switch to allow renaming of an existing node with node_id if the
existing node is closest to the point. Defaults to ``False``.
verbose : boolean
Switch to print relevant values to screen. Defaults to ``False``.
super_verbose : boolean
Switch to print mucho values to screen. Defaults to ``False``.
Return
------
G_gt_cp, G_p_cp, G_gt_cp_prime, G_p_cp_prime, \
control_points_gt, control_points_prop, \
all_pairs_lengths_gt_native, all_pairs_lengths_prop_native, \
all_pairs_lengths_gt_prime, all_pairs_lengths_prop_prime : tuple
G_gt_cp is ground truth with control points inserted
G_p_cp is proposal with control points inserted
G_gt_cp_prime is ground truth with control points from prop inserted
G_p_cp_prime is proposal with control points from gt inserted
all_pairs_lengths_gt_native is path length dict corresponding to G_gt_cp
all_pairs_lengths_prop_native is path length dict corresponding to G_p_cp
all_pairs_lengths_gt_prime is path length dict corresponding to G_gt_cp_prime
all_pairs_lenfgths_prop_prime is path length dict corresponding to G_p_cp_prime
"""
t0 = time.time()
for i, (u, v, key, data) in enumerate(G_gt.edges(keys=True, data=True)):
try:
line = data['geometry']
except:
line = data[0]['geometry']
if type(line) == str: # or type(line) == unicode:
data['geometry'] = shapely.wkt.loads(line)
for i, (u, v, key, data) in enumerate(G_p.edges(keys=True, data=True)):
try:
line = data['geometry']
except:
line = data[0]['geometry']
if type(line) == str: # or type(line) == unicode:
data['geometry'] = shapely.wkt.loads(line)
# create graph with linestrings?
G_gt_cp = G_gt.to_undirected()
if verbose:
print("len(G_gt.nodes()):", len(G_gt_cp.nodes()))
print("len(G_gt.edges()):", len(G_gt_cp.edges()))
# gt node and edge props
node = random.choice(list(G_gt.nodes()))
print("node:", node, "G_gt random node props:", G_gt.nodes[node])
edge_tmp = random.choice(list(G_gt.edges()))
print("G_gt edge_tmp:", edge_tmp)
try:
print("edge:", edge_tmp, "G_gt random edge props:",
G_gt.edges[edge_tmp[0]][edge_tmp[1]])
except:
try:
print("edge:", edge_tmp, "G_gt random edge props:",
G_gt.edges[edge_tmp[0], edge_tmp[1], 0])
except:
pass
# prop node and edge props
node = random.choice(list(G_p.nodes()))
print("node:", node, "G_p random node props:", G_p.nodes[node])
edge_tmp = random.choice(list(G_p.edges()))
print("G_p edge_tmp:", edge_tmp)
try:
print("edge:", edge_tmp, "G_p random edge props:",
G_p.edges[edge_tmp[0]][edge_tmp[1]])
except:
try:
print("edge:", edge_tmp, "G_p random edge props:",
G_p.edges[edge_tmp[0], edge_tmp[1], 0])
except:
pass
# get ground truth control points, which will be a subset of nodes
sample_size = min(max_nodes, len(G_gt_cp.nodes()))
rand_nodes_gt = random.sample(G_gt_cp.nodes(), sample_size)
rand_nodes_gt_set = set(rand_nodes_gt)
control_points_gt = []
for itmp,n in enumerate(rand_nodes_gt):
if verbose and (i % 20) == 0:
print ("control_point", itmp, ":", n, ":", G_gt_cp.nodes[n])
u_x, u_y = G_gt_cp.nodes[n]['x'], G_gt_cp.nodes[n]['y']
control_points_gt.append([n, u_x, u_y])
if verbose:
print("len control_points_gt:", len(control_points_gt))
# add travel time
G_gt_cp = add_travel_time(G_gt_cp,
speed_key=speed_key,
travel_time_key=travel_time_key)
# get route lengths between all control points
# gather all paths from nodes of interest, keep only routes to control nodes
tt = time.time()
if verbose:
print("Computing all_pairs_lengths_gt_native...")
all_pairs_lengths_gt_native = {}
for itmp, source in enumerate(rand_nodes_gt):
if verbose and ((itmp % 50) == 0):
print((itmp, "source:", source))
paths_tmp = nx.single_source_dijkstra_path_length(
G_gt_cp, source, weight=weight)
# delete items
for k in list(paths_tmp.keys()):
if k not in rand_nodes_gt_set:
del paths_tmp[k]
all_pairs_lengths_gt_native[source] = paths_tmp
if verbose:
print(("Time to compute all source routes for",
sample_size, "nodes:", time.time() - tt, "seconds"))
###############
# get proposal graph with native midpoints
G_p_cp = G_p.to_undirected()
if verbose:
print("len G_p_cp.nodes():", len(G_p_cp.nodes()))
print("G_p_cp.edges():", len(G_p_cp.edges()))
# get control points, which will be a subset of nodes
# (original method sets proposal control points as all nodes in G_p_cp)
sample_size = min(max_nodes, len(G_p_cp.nodes()))
rand_nodes_p = random.sample(G_p_cp.nodes(), sample_size)
rand_nodes_p_set = set(rand_nodes_p)
control_points_prop = []
for n in rand_nodes_p:
u_x, u_y = G_p_cp.nodes[n]['x'], G_p_cp.nodes[n]['y']
control_points_prop.append([n, u_x, u_y])
# add travel time
G_p_cp = add_travel_time(G_p_cp,
speed_key=speed_key,
travel_time_key=travel_time_key)
# get paths
# gather all paths from nodes of interest, keep only routes to control nodes
tt = time.time()
if verbose:
print("Computing all_pairs_lengths_prop_native...")
all_pairs_lengths_prop_native = {}
for itmp, source in enumerate(rand_nodes_p):
if verbose and ((itmp % 50) == 0):
print((itmp, "source:", source))
paths_tmp = nx.single_source_dijkstra_path_length(
G_p_cp, source, weight=weight)
# delete items
for k in list(paths_tmp.keys()):
if k not in rand_nodes_p_set:
del paths_tmp[k]
all_pairs_lengths_prop_native[source] = paths_tmp
if verbose:
print(("Time to compute all source routes for",
max_nodes, "nodes:", time.time() - tt, "seconds"))
###############
# insert gt control points into proposal
if verbose:
print("Inserting", len(control_points_gt),
"control points into G_p...")
print("len G_p.nodes():", len(G_p.nodes()))
G_p_cp_prime, xn_p, yn_p = insert_control_points(
G_p.copy(), control_points_gt, max_distance_meters=max_snap_dist,
allow_renaming=allow_renaming, verbose=super_verbose)
# add travel time
G_p_cp_prime = add_travel_time(G_p_cp_prime,
speed_key=speed_key,
travel_time_key=travel_time_key)
###############
# now insert control points into ground truth
if verbose:
print("\nInserting", len(control_points_prop),
"control points into G_gt...")
# permit renaming of inserted nodes if coincident with existing node
G_gt_cp_prime, xn_gt, yn_gt = insert_control_points(
G_gt, control_points_prop, max_distance_meters=max_snap_dist,
allow_renaming=allow_renaming, verbose=super_verbose)
G_gt_cp_prime = add_travel_time(G_gt_cp_prime,
speed_key=speed_key,
travel_time_key=travel_time_key)
###############
# get paths for graphs_prime
# gather all paths from nodes of interest, keep only routes to control nodes
# gt_prime
tt = time.time()
all_pairs_lengths_gt_prime = {}
if verbose:
print("Computing all_pairs_lengths_gt_prime...")
G_gt_cp_prime_nodes_set = set(G_gt_cp_prime.nodes())
for itmp, source in enumerate(rand_nodes_p_set):
if verbose and ((itmp % 50) == 0):
print((itmp, "source:", source))
if source in G_gt_cp_prime_nodes_set:
paths_tmp = nx.single_source_dijkstra_path_length(
G_gt_cp_prime, source, weight=weight)
# delete items
for k in list(paths_tmp.keys()):
if k not in rand_nodes_p_set:
del paths_tmp[k]
all_pairs_lengths_gt_prime[source] = paths_tmp
if verbose:
print(("Time to compute all source routes for",
max_nodes, "nodes:", time.time() - tt, "seconds"))
# prop_prime
tt = time.time()
all_pairs_lengths_prop_prime = {}
if verbose:
print("Computing all_pairs_lengths_prop_prime...")
G_p_cp_prime_nodes_set = set(G_p_cp_prime.nodes())
for itmp, source in enumerate(rand_nodes_gt_set):
if verbose and ((itmp % 50) == 0):
print((itmp, "source:", source))
if source in G_p_cp_prime_nodes_set:
paths_tmp = nx.single_source_dijkstra_path_length(
G_p_cp_prime, source, weight=weight)
# delete items
for k in list(paths_tmp.keys()):
if k not in rand_nodes_gt_set:
del paths_tmp[k]
all_pairs_lengths_prop_prime[source] = paths_tmp
if verbose:
print(("Time to compute all source routes for",
max_nodes, "nodes:", time.time() - tt, "seconds"))
###############
tf = time.time()
if verbose:
print("Time to run make_graphs_yuge in apls.py:", tf - t0, "seconds")
return G_gt_cp, G_p_cp, G_gt_cp_prime, G_p_cp_prime, \
control_points_gt, control_points_prop, \
all_pairs_lengths_gt_native, all_pairs_lengths_prop_native, \
all_pairs_lengths_gt_prime, all_pairs_lengths_prop_prime
################################################################################
def single_path_metric(len_gt, len_prop, diff_max=1):
"""
Compute APLS metric for single path.
Notes
-----
Compute normalize path difference metric, if len_prop < 0, return diff_max
Arguments
---------
len_gt : float
Length of ground truth edge.
len_prop : float
Length of proposal edge.
diff_max : float
Maximum value to return. Defaults to ``1``.
Returns
-------
metric : float
Normalized path difference.
"""
if len_gt <= 0:
return 0
elif len_prop < 0 and len_gt > 0:
return diff_max
else:
diff_raw = np.abs(len_gt - len_prop) / len_gt
return np.min([diff_max, diff_raw])
################################################################################
def path_sim_metric(all_pairs_lengths_gt, all_pairs_lengths_prop,
control_nodes=[], min_path_length=10,
diff_max=1, missing_path_len=-1, normalize=True,
verbose=False):
"""
Compute metric for multiple paths.
Notes
-----
Assume nodes in ground truth and proposed graph have the same names.
Assume graph is undirected so don't evaluate routes in both directions
control_nodes is the list of nodes to actually evaluate; if empty do all
in all_pairs_lenghts_gt
min_path_length is the minimum path length to evaluate
https://networkx.github.io/documentation/networkx-2.2/reference/algorithms/shortest_paths.html
Parameters
----------
all_pairs_lengths_gt : dict
Dictionary of path lengths for ground truth graph.
all_pairs_lengths_prop : dict
Dictionary of path lengths for proposal graph.
control_nodes : list
List of control nodes to evaluate.
min_path_length : float
Minimum path length to evaluate.
diff_max : float
Maximum value to return. Defaults to ``1``.
missing_path_len : float
Value to assign a missing path. Defaults to ``-1``.
normalize : boolean
Switch to normalize outputs. Defaults to ``True``.
verbose : boolean
Switch to print relevant values to screen. Defaults to ``False``.
Returns
-------
C, diffs, routes, diff_dic
C is the APLS score
diffs is a list of the the route differences
routes is a list of routes
diff_dic is a dictionary of path differences
"""
diffs = []
routes = []
diff_dic = {}
gt_start_nodes_set = set(all_pairs_lengths_gt.keys())
prop_start_nodes_set = set(all_pairs_lengths_prop.keys())
t0 = time.time()
if len(gt_start_nodes_set) == 0:
return 0, [], [], {}
# set nodes to inspect
if len(control_nodes) == 0:
good_nodes = list(all_pairs_lengths_gt.keys())
else:
good_nodes = control_nodes
if verbose:
print("\nComputing path_sim_metric()...")
print("good_nodes:", good_nodes)
# iterate overall start nodes
for start_node in good_nodes:
if verbose:
print("start node:", start_node)
node_dic_tmp = {}
# if we are not careful with control nodes, it's possible that the
# start node will not be in all_pairs_lengths_gt, in this case use max
# diff for all routes to that node
# if the start node is missing from proposal, use maximum diff for
# all possible routes to that node
if start_node not in gt_start_nodes_set:
for end_node, len_prop in all_pairs_lengths_prop[start_node].items():
diffs.append(diff_max)
routes.append([start_node, end_node])
node_dic_tmp[end_node] = diff_max
return
paths = all_pairs_lengths_gt[start_node]
# CASE 1
# if the start node is missing from proposal, use maximum diff for
# all possible routes to the start node
if start_node not in prop_start_nodes_set:
for end_node, len_gt in paths.items():
if (end_node != start_node) and (end_node in good_nodes):
diffs.append(diff_max)
routes.append([start_node, end_node])
node_dic_tmp[end_node] = diff_max
diff_dic[start_node] = node_dic_tmp
continue
# else get proposed paths
else:
paths_prop = all_pairs_lengths_prop[start_node]
# get set of all nodes in paths_prop, and missing_nodes
end_nodes_gt_set = set(paths.keys()).intersection(good_nodes)
end_nodes_prop_set = set(paths_prop.keys())
missing_nodes = end_nodes_gt_set - end_nodes_prop_set
if verbose:
print("missing nodes:", missing_nodes)
# iterate over all paths from node
for end_node in end_nodes_gt_set:
len_gt = paths[end_node]
# skip if too short
if len_gt < min_path_length:
continue
# get proposed path
if end_node in end_nodes_prop_set:
# CASE 2, end_node in both paths and paths_prop, so
# valid path exists
len_prop = paths_prop[end_node]
else:
# CASE 3: end_node in paths but not paths_prop, so assign
# length as diff_max
len_prop = missing_path_len
if verbose:
print("end_node:", end_node)
print(" len_gt:", len_gt)
print(" len_prop:", len_prop)
# compute path difference metric
diff = single_path_metric(len_gt, len_prop, diff_max=diff_max)
diffs.append(diff)
routes.append([start_node, end_node])
node_dic_tmp[end_node] = diff
diff_dic[start_node] = node_dic_tmp
if len(diffs) == 0:
return 0, [], [], {}
# compute Cost
diff_tot = np.sum(diffs)
if normalize:
norm = len(diffs)
diff_norm = diff_tot / norm
C = 1. - diff_norm
else:
C = diff_tot
if verbose:
print("Time to compute metric (score = ", C, ") for ", len(diffs),
"routes:", time.time() - t0, "seconds")
return C, diffs, routes, diff_dic
################################################################################
def compute_apls_metric(all_pairs_lengths_gt_native,
all_pairs_lengths_prop_native,
all_pairs_lengths_gt_prime,
all_pairs_lengths_prop_prime,
control_points_gt, control_points_prop,
min_path_length=10,
verbose=False, super_verbose=False):
"""
Compute APLS metric
Notes
-----
Computes APLS
Arguments
---------
all_pairs_lengths_gt_native : dict
Dict of paths for gt graph.
all_pairs_lengths_prop_native : dict
Dict of paths for prop graph.
all_pairs_lengths_gt_prime : dict
Dict of paths for gt graph with control points from prop.
all_pairs_lengths_prop_prime : dict
Dict of paths for prop graph with control points from gt.
control_points_gt : list
Array of control points.
control_points_prop : list
Array of control points.
min_path_length : float
Minimum path length to evaluate.
verbose : boolean
Switch to print relevant values to screen. Defaults to ``False``.
super_verbose : boolean
Switch to print mucho values to screen. Defaults to ``False``.
Returns
-------
C_tot, C_gt_onto_prop, C_prop_onto_gt : tuple
C_tot is the total APLS score
C_gt_onto_prop is the score when inserting gt control nodes onto prop
C_prop_onto_gt is the score when inserting prop control nodes onto gt
"""
t0 = time.time()
# return 0 if no paths
if (len(list(all_pairs_lengths_gt_native.keys())) == 0) \
or (len(list(all_pairs_lengths_prop_native.keys())) == 0):
if verbose:
print("len(all_pairs_lengths_gt_native.keys()) == 0)")
return 0, 0, 0
####################
# compute metric (gt to prop)
control_nodes = [z[0] for z in control_points_gt]
if verbose:
print(("control_nodes_gt:", control_nodes))
C_gt_onto_prop, diffs, routes, diff_dic = path_sim_metric(
all_pairs_lengths_gt_native,
all_pairs_lengths_prop_prime,
control_nodes=control_nodes,
min_path_length=min_path_length,
diff_max=1, missing_path_len=-1, normalize=True,
verbose=super_verbose)
dt1 = time.time() - t0
if verbose:
print("len(diffs):", len(diffs))
if len(diffs) > 0:
print(" max(diffs):", np.max(diffs))
print(" min(diffs)", np.min(diffs))
####################
# compute metric (prop to gt)
t1 = time.time()
control_nodes = [z[0] for z in control_points_prop]
if verbose:
print("control_nodes:", control_nodes)
C_prop_onto_gt, diffs, routes, diff_dic = path_sim_metric(
all_pairs_lengths_prop_native,
all_pairs_lengths_gt_prime,
control_nodes=control_nodes,
min_path_length=min_path_length,
diff_max=1, missing_path_len=-1, normalize=True,
verbose=super_verbose)
dt2 = time.time() - t1
if verbose:
print("len(diffs):", len(diffs))
if len(diffs) > 0:
print(" max(diffs):", np.max(diffs))
print(" min(diffs)", np.min(diffs))
####################
# Total
if (C_gt_onto_prop <= 0) or (C_prop_onto_gt <= 0) \
or (np.isnan(C_gt_onto_prop)) or (np.isnan(C_prop_onto_gt)):
C_tot = 0
else:
C_tot = scipy.stats.hmean([C_gt_onto_prop, C_prop_onto_gt])
if np.isnan(C_tot):
C_tot = 0
# print("Total APLS Metric = Mean(", np.round(C_gt_onto_prop, 2), "+",
# np.round(C_prop_onto_gt, 2),
# ") =", np.round(C_tot, 2))
return C_tot, C_gt_onto_prop, C_prop_onto_gt
################################################################################
def gather_files(truth_dir, prop_dir,
im_dir='',
max_files=1000,
gt_subgraph_filter_weight='length',
gt_min_subgraph_length=5,
speed_key='inferred_speed_mps',
travel_time_key='travel_time_s',
verbose=False,\
n_threads=12):
"""
Build lists of ground truth and proposal graphs
Arguments
---------
truth_dir : str
Location of ground truth graphs.
prop_dir : str
Location of proposal graphs.
im_dir : str
Location of image files. Defaults to ``''``.
max_files : int
Maximum number of files to analyze. Defaults to ``1000``.
gt_subgraph_filter_weight : str
Edge key for filtering ground truth edge length.
Defaults to ``'length'``.
gt_min_subgraph_length : float
Minimum length of the edge. Defaults to ``5``.
speed_key : str
Edge key for speed. Defaults to ``'inferred_speed_mps'``.
travel_time_key : str
Edge key for travel time. Defaults to ``'travel_time_s'``.
verbose : boolean
Switch to print relevant values to screen. Defaults to ``False``.
super_verbose : boolean
Switch to print mucho values to screen. Defaults to ``False``.
Returns
-------
gt_list, gt_raw_list, gp_list, root_list, im_loc_list : tuple
gt_list is a list of ground truth graphs.
gp_list is a list of proposal graphs
root_list is a list of names
im_loc_list is the location of the images corresponding to root_list
"""
def get_file_by_id(id, dir, ext):
"""Get filename from {dir} by image {id} with certain {ext}ension."""
file_list = [f for f in os.listdir(dir) if f.endswith(id+ext)]
if len(file_list) == 0:
# raise ValueError(f'img id {id} not found in dir {dir}')
return None
elif len(file_list) > 1:
raise ValueError(f'Duplicated img id {id} in dir {dir}',
f'filename list: {file_list}')
return file_list[0]
###################
gt_list, gp_list, root_list, im_loc_list = [], [], [], []
###################
# use ground truth spacenet geojsons, and submission pkl files
valid_road_types = set([]) # assume no road type in geojsons
name_list = [f for f in os.listdir(truth_dir) if f.endswith('.geojson')]
# truncate until max_files
name_list = name_list[:max_files]
i_list = list(range(len(name_list)))
if n_threads is not None:
n_threads = min(n_threads, len(name_list))
print(f"Checking valid scoring pairs from {len(name_list)} ground truths ...")
# for i, f in tqdm(enumerate(name_list), total=len(name_list)):
def get_valid_pairs(i, f):
'''Helper function for parallel multi-processing.
i : int
index of enumerate(name_list)
f : str
filename from truth_dir, element in name_list '''
# skip non-geojson files
if not f.endswith('.geojson'):
return None, None, None, None
# ground-truth file
gt_file = os.path.join(truth_dir, f)
imgid = f.split('.')[0].split('_')[-1] # in 'img???' format
# reference image file
im_file = get_file_by_id(imgid, im_dir, '.tif')
if im_file is None:
return None, None, None, None
outroot = im_file.split('.')[0]
im_file = os.path.join(im_dir, im_file)
# proposal file
prop_file = get_file_by_id(imgid, prop_dir, '.gpickle')
if prop_file is None:
return None, None, None, None
prop_file = os.path.join(prop_dir, prop_file)
#########
# ground truth
osmidx, osmNodeidx = 10000, 10000
G_gt_init, G_gt_raw = \
_create_gt_graph(gt_file, im_file, network_type='all_private',
valid_road_types=valid_road_types,
subgraph_filter_weight=gt_subgraph_filter_weight,
min_subgraph_length=gt_min_subgraph_length,
osmidx=osmidx, osmNodeidx=osmNodeidx,
speed_key=speed_key,
travel_time_key=travel_time_key,
verbose=verbose)
# # skip empty ground truth graphs
# if len(G_gt_init.nodes()) == 0:
# continue
if verbose:
# print a node
node = list(G_gt_init.nodes())[-1]
print(node, "gt random node props:", G_gt_init.nodes[node])
# print an edge
edge_tmp = list(G_gt_init.edges())[-1]
try:
props = G_gt_init.edges[edge_tmp[0], edge_tmp[1], 0]
except:
props = G_gt_init.edges[edge_tmp[0], edge_tmp[1], "0"]
print("gt random edge props for edge:", edge_tmp, " = ",
props)
#########
# proposal
G_p_init = nx.read_gpickle(prop_file)
# print a few values
if verbose:
# print a node
try:
node = list(G_p_init.nodes())[-1]
print(node, "prop random node props:",
G_p_init.nodes[node])
# print an edge
edge_tmp = list(G_p_init.edges())[-1]
print("prop random edge props for edge:", edge_tmp,
" = ", G_p_init.edges[edge_tmp[0], edge_tmp[1], 0])
except:
print("Empty proposal graph")
# return (map to reduce)
return G_gt_init, G_p_init, outroot, im_file
# Multiprocessing to accelerate the gathering process.
if n_threads is None:
print("Running in parallel using all threads ...")
else:
print("Running in parallel using {} threads ...".format(n_threads))
map_reduce_res = p_umap(get_valid_pairs, i_list, name_list,
num_cpus=n_threads)
unzipped = list(zip(*map_reduce_res))
# distribute result lists
def filter_none(l):
return [x for x in l if x is not None]
gt_list = filter_none(unzipped[0])
gp_list = filter_none(unzipped[1])
root_list = filter_none(unzipped[2])
im_loc_list = filter_none(unzipped[3])
return gt_list, gp_list, root_list, im_loc_list
###############################################################################
def execute(output_dir, gt_list, gp_list, root_list,
weight='length',
speed_key='inferred_speed_mps',
travel_time_key='travel_time_s',
max_files=1000,
linestring_delta=50,
is_curved_eps=10**3,
max_snap_dist=4,
max_nodes=500,
min_path_length=10,
allow_renaming=True,
verbose=True,
super_verbose=False,
n_threads=12):
"""
Compute APLS for the input data in gt_list, gp_list
Arguments
---------
output_dir: str
dir to write output files into.
weight : str
Edge key determining path length weights. Defaults to ``'length'``.
speed_key : str
Edge key for speed. Defaults to ``'inferred_speed_mps'``.
travel_time_key : str
Edge key for travel time. Defaults to ``'travel_time_s'``.
max_files : int
Maximum number of files to analyze. Defaults to ``1000``.
linestring_delta : float
Distance in meters between linestring midpoints. Defaults to ``50``.
is_curved_eps : float
Minumum curvature for injecting nodes (if curvature is less than this
value, no midpoints will be injected). If < 0, always inject points
on line, regardless of curvature. Defaults to ``0.3``.
max_snap_dist : float
Maximum distance a node can be snapped onto a graph.
Defaults to ``4``.
max_nodes : int
Maximum number of gt nodes to inject midpoints. If there are more
gt nodes than this, skip midpoints and use this number of points
to comput APLS.
min_path_length : float
Mimumum path length to consider for APLS. Defaults to ``10``.
allow_renaming : boolean
Switch to rename nodes when injecting nodes into graphs.
Defaulst to ``True``.
verbose : boolean
Switch to print relevant values to screen. Defaults to ``False``.
super_verbose : boolean
Switch to print mucho values to screen. Defaults to ``False``.
Returns
-------
None
"""
# now compute results
C_arr = [["outroot", "APLS", "APLS_gt_onto_prop", "APLS_prop_onto_gt"]]
# make dirs
os.makedirs(output_dir, exist_ok=True)
##################
t0 = time.time()
# truncate until max_files
root_list = root_list[:max_files]
gt_list = gt_list[:max_files]
gp_list = gp_list[:max_files]
if n_threads is not None:
n_threads = min(n_threads, len(root_list))
print(f'Computing scores for {len(root_list)} pairs in total ...')
# for i, [outroot, G_gt_init, G_p_init] in tqdm(
# enumerate(zip(root_list, gt_list, gp_list)), total=len(root_list)):
def compute_score_arr(outroot, G_gt_init, G_p_init):
# get graphs with midpoints and geometry (if small graph)
if len(G_gt_init.nodes()) < 500: # 2000:
G_gt_cp, G_p_cp, G_gt_cp_prime, G_p_cp_prime, \
control_points_gt, control_points_prop, \
all_pairs_lengths_gt_native, all_pairs_lengths_prop_native, \
all_pairs_lengths_gt_prime, all_pairs_lengths_prop_prime \
= make_graphs(G_gt_init, G_p_init,
weight=weight,
speed_key=speed_key,
travel_time_key=travel_time_key,
linestring_delta=linestring_delta,
is_curved_eps=is_curved_eps,
max_snap_dist=max_snap_dist,
allow_renaming=allow_renaming,
verbose=verbose)
# get large graphs and paths
else:
G_gt_cp, G_p_cp, G_gt_cp_prime, G_p_cp_prime, \
control_points_gt, control_points_prop, \
all_pairs_lengths_gt_native, all_pairs_lengths_prop_native, \
all_pairs_lengths_gt_prime, all_pairs_lengths_prop_prime \
= make_graphs_yuge(G_gt_init, G_p_init,
weight=weight,
speed_key=speed_key,
travel_time_key=travel_time_key,
max_nodes=max_nodes,
max_snap_dist=max_snap_dist,
allow_renaming=allow_renaming,
verbose=verbose,
super_verbose=super_verbose)
if verbose:
print("\nlen control_points_gt:", len(control_points_gt))
if len(G_gt_init.nodes()) < 200:
print("G_gt_init.nodes():", G_gt_init.nodes())
print("len G_gt_init.edges():", len(G_gt_init.edges()))
if len(G_gt_cp.nodes()) < 200:
print("G_gt_cp.nodes():", G_gt_cp.nodes())
print("len G_gt_cp.nodes():", len(G_gt_cp.nodes()))
print("len G_gt_cp.edges():", len(G_gt_cp.edges()))
print("len G_gt_cp_prime.nodes():", len(G_gt_cp_prime.nodes()))
print("len G_gt_cp_prime.edges():", len(G_gt_cp_prime.edges()))
print("\nlen control_points_prop:", len(control_points_prop))
if len(G_p_init.nodes()) < 200:
print("G_p_init.nodes():", G_p_init.nodes())
print("len G_p_init.edges():", len(G_p_init.edges()))
if len(G_p_cp.nodes()) < 200:
print("G_p_cp.nodes():", G_p_cp.nodes())
print("len G_p_cp.nodes():", len(G_p_cp.nodes()))
print("len G_p_cp.edges():", len(G_p_cp.edges()))
print("len G_p_cp_prime.nodes():", len(G_p_cp_prime.nodes()))
if len(G_p_cp_prime.nodes()) < 200:
print("G_p_cp_prime.nodes():", G_p_cp_prime.nodes())
print("len G_p_cp_prime.edges():", len(G_p_cp_prime.edges()))
print("len all_pairs_lengths_gt_native:",
len(dict(all_pairs_lengths_gt_native)))
print("len all_pairs_lengths_gt_prime:",
len(dict(all_pairs_lengths_gt_prime)))
print("len all_pairs_lengths_prop_native",
len(dict(all_pairs_lengths_prop_native)))
print("len all_pairs_lengths_prop_prime",
len(dict(all_pairs_lengths_prop_prime)))
#########################
# Metric
C, C_gt_onto_prop, C_prop_onto_gt = compute_apls_metric(
all_pairs_lengths_gt_native, all_pairs_lengths_prop_native,
all_pairs_lengths_gt_prime, all_pairs_lengths_prop_prime,
control_points_gt, control_points_prop,
min_path_length=min_path_length,
verbose=verbose)
# C_arr.append([outroot, C, C_gt_onto_prop, C_prop_onto_gt])
return [outroot, C, C_gt_onto_prop, C_prop_onto_gt]
# Multiprocessing to accelerate the scoring process.
if n_threads is None:
print("Running in parallel using all threads ...")
else:
print("Running in parallel using {} threads ...".format(n_threads))
map_reduce_res = p_umap(compute_score_arr, root_list, gt_list, gp_list,
num_cpus=n_threads)
C_arr += map_reduce_res # append results below header
# print and save total cost
tf = time.time()
if verbose:
print(("Time to compute metric:", tf - t0, "seconds"))
print(("N input images:", len(root_list)))
# save to csv
path_csv = os.path.join(output_dir, 'scores_weight='+str(weight)+'.csv')
df = pd.DataFrame(C_arr[1:], columns=C_arr[0])
df.to_csv(path_csv)
print("Weight is " + str(weight))
print("Mean APLS = ", np.mean(df['APLS'].values))
################################################################################
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--output_dir', default='./results', type=str,
help='Dir path to write output files into')
parser.add_argument('--truth_dir', default='', type=str,
help='Location of ground truth graphs')
parser.add_argument('--prop_dir', default='', type=str,
help='Location of proposal graphs')
parser.add_argument('--im_dir', default='', type=str,
help='Location of images (optional)')
parser.add_argument('--max_snap_dist', default=4, type=int,
help='Buffer distance (meters) around graph')
parser.add_argument('--linestring_delta', default=50, type=int,
help='Distance between midpoints on edges')
parser.add_argument('--is_curved_eps', default=-1, type=float,
help='Line curvature above which midpoints will be'
' injected, (< 0 to inject midpoints on straight'
' lines). 0.12 is a good value if not all lines are '
' to be used')
parser.add_argument('--min_path_length', default=0.001, type=float,
help='Minimum path length to consider for metric')
parser.add_argument('--max_nodes', default=1000, type=int,
help='Maximum number of nodes to compare for APLS'
' metric')
parser.add_argument('--max_files', default=1000, type=int,
help='Maximum number of graphs to analyze')
parser.add_argument('--weight', default='length', type=str,
help='Weight for APLS metric [length, travel_time_s')
parser.add_argument('--speed_key', default='inferred_speed_mps', type=str,
help='Key in edge properties for speed')
parser.add_argument('--travel_time_key', default='travel_time_s', type=str,
help='Key in edge properties for travel_time')
parser.add_argument('--allow_renaming', default=1, type=int,
help='Switch to rename nodes. Defaults to 1 (True)')
parser.add_argument('--n_threads', default=None, type=int,
help='desired number of threads for multi-proc')
args = parser.parse_args()
# Filtering parameters (shouldn't need changed)
args.gt_subgraph_filter_weight = 'length'
args.gt_min_subgraph_length = 5
args.prop_subgraph_filter_weight = 'length_pix'
args.prop_min_subgraph_length = 10 # GSD = 0.3
# general settings
verbose = False
super_verbose = False
# Gather files
gt_list, gp_list, root_list, _ = gather_files(
args.truth_dir,
args.prop_dir,
im_dir=args.im_dir,
max_files=args.max_files,
gt_subgraph_filter_weight=args.gt_subgraph_filter_weight,
gt_min_subgraph_length=args.gt_min_subgraph_length,
speed_key=args.speed_key,
travel_time_key=args.travel_time_key,
verbose=verbose,
n_threads=args.n_threads)
# Compute
execute(
args.output_dir, gt_list, gp_list, root_list,
weight=args.weight,
speed_key=args.speed_key,
travel_time_key=args.travel_time_key,
max_files=args.max_files,
linestring_delta=args.linestring_delta,
is_curved_eps=args.is_curved_eps,
max_snap_dist=args.max_snap_dist,
max_nodes=args.max_nodes,
min_path_length=args.min_path_length,
allow_renaming=bool(args.allow_renaming),
verbose=verbose,
super_verbose=super_verbose,
n_threads=args.n_threads)
if __name__ == "__main__":
main()
|
py | 7dfbdb5ee6dcd3a6fd3a072ce6f6a09d9e05d4e3 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
bayer_dithering
-----------
:copyright: 2016-09-09 by hbldh <[email protected]>
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import numpy as np
_CLUSTER_DOT_MATRICES = {
4: np.array([[12, 5, 6, 13], [4, 0, 1, 7], [11, 3, 2, 8], [15, 10, 9, 14]], "float")
/ 16.0,
8: np.array(
[
[24, 10, 12, 26, 35, 47, 49, 37],
[8, 0, 2, 14, 45, 59, 61, 51],
[22, 6, 4, 16, 43, 57, 63, 53],
[30, 20, 18, 28, 33, 41, 55, 39],
[34, 46, 48, 36, 25, 11, 13, 27],
[44, 57, 60, 50, 9, 1, 3, 15],
[42, 56, 62, 52, 23, 7, 5, 17],
[32, 40, 54, 38, 31, 21, 19, 29],
],
"float",
)
/ 64.0,
(5, 3): np.array([[9, 3, 0, 6, 12], [10, 4, 1, 7, 13], [11, 5, 2, 8, 14]], "float")
/ 15.0,
}
def cluster_dot_dithering(image, palette, thresholds, order=4):
"""Render the image using the ordered Bayer matrix dithering pattern.
Reference: http://caca.zoy.org/study/part2.html
:param :class:`PIL.Image` image: The image to apply the
ordered dithering to.
:param :class:`~hitherdither.colour.Palette` palette: The palette to use.
:param thresholds: Thresholds to apply dithering at.
:param int order: The size of the Bayer matrix.
:return: The Bayer matrix dithered PIL image of type "P"
using the input palette.
"""
cluster_dot_matrix = _CLUSTER_DOT_MATRICES.get(order)
if cluster_dot_matrix is None:
raise NotImplementedError("Only order 4 and 8 is implemented as of yet.")
ni = np.array(image, "uint8")
thresholds = np.array(thresholds, "uint8")
xx, yy = np.meshgrid(range(ni.shape[1]), range(ni.shape[0]))
xx %= order
yy %= order
factor_threshold_matrix = (
np.expand_dims(cluster_dot_matrix[yy, xx], axis=2) * thresholds
)
new_image = ni + factor_threshold_matrix
return palette.create_PIL_png_from_rgb_array(new_image)
|
py | 7dfbdc27d0c1ec8b3d43e25b07687e1d244036f5 | def melhores(p,df):
"""Função que retorna os melhores jogadores de determinada posição.
Parameters
----------
p : str
String contendo a posição desejada.
df : pandas.core.frame.dataframe
Dataframe.
Returns
-------
list
Lista com os melhores jogadores da posição desejada.
"""
if p == "Atacante":
a = df[df["Position"]== "Atacante"]["Name"].iloc[0]
b = df[df["Position"]== "Ponta Direita"]["Name"].iloc[0]
c = df[df["Position"]== "Ponta Esquerdo"]["Name"].iloc[0]
return [a, b, c]
elif p == "Meia":
a = df[df["Position"]== "Meia Atacante"]["Name"].iloc[0]
b = df[df["Position"]== "Meia Direita"]["Name"].iloc[0]
c = df[df["Position"]== "Meia Esquerda"]["Name"].iloc[0]
return [a, b, c]
elif p == "Lateral":
a = df[df["Position"]== "Lateral Direito"]["Name"].iloc[0]
b = df[df["Position"]== "Lateral Esquerdo"]["Name"].iloc[0]
return [a, b]
elif p == "Zagueiro":
a = df[df["Position"]== "Zagueiro Direito"]["Name"].iloc[0]
b = df[df["Position"]== "Zagueiro Esquerdo"]["Name"].iloc[0]
return [a, b]
elif p == "Goleiro":
a = df[df["Position"]== "Goleiro"]["Name"].iloc[0]
return a
else:
return "Essa posição não existe!"
def melhor_time_atual():
"""Função que retorna uma lista com o melhor time atual, de acordo com o overall e independente do preço.
Returns
-------
list
Lista com os jogadores do time.
"""
gol = melhores("Goleiro",df_fifa)
zag1, zag2 = melhores("Zagueiro",df_fifa)
lat1, lat2 = melhores("Lateral",df_fifa)
mei1, mei2, mei3 = melhores("Meia",df_fifa)
ata1, ata2, ata3 = melhores("Atacante",df_fifa)
return [gol, zag1, zag2, lat1, lat2, mei1, mei2, mei3, ata1, ata2, ata3]
def print_melhor_time(funcao_atual_ou_futuro):
"""Função que imprirmi o melhor time.
Parameters
----------
funcao_atual_ou_futuro : list
Lista contendo um time de jogadores
Returns
-------
str
Texto organizando o time por posição
"""
gol, zag1, zag2, lat1, lat2, mei1, mei2, mei3, ata1, ata2, ata3 = funcao_atual_ou_futuro
return f"""O melhor time é formado por:
-> Goleiro-O melhor goleiro será o {gol}
-> Zagueiro-A melhor dupla de zagueiro será {zag1} e {zag2}
-> Laterais-A melhor dupla de lateral será {lat1} e {lat2}
-> Meias-O melhor meia será composto por {mei1}, {mei2} e {mei3}
-> Atacantes->O melhor ataque será composto por {ata1}, {ata2} e {ata3}"""
def melhor_time_futuro():
"""Função que retorna uma string com o melhor time atual, de acordo com o potencial.
Returns
-------
str
String com os jogadores do time.
"""
gol = melhores("Goleiro",df_fifa_novo)
zag1, zag2 = melhores("Zagueiro",df_fifa_novo)
lat1, lat2 = melhores("Lateral",df_fifa_novo)
mei1, mei2, mei3 = melhores("Meia",df_fifa_novo)
ata1, ata2, ata3 = melhores("Atacante",df_fifa_novo)
return [gol, zag1, zag2, lat1, lat2, mei1, mei2, mei3, ata1, ata2, ata3]
def valor_total_time(funcao_atual_ou_futuro):
"""Função que retorna o valor total do time.
Parameters
----------
funcao_atual_ou_futuro : function
Função que retorna os jogadores para calcular o valor da soma dos valores individuais de cada um.
Returns
-------
float
Preço total do time.
"""
dataframe = df_fifa[df_fifa["Name"].isin(funcao_atual_ou_futuro)]
sli = 11 - len(dataframe)
dataframe = dataframe[:sli].reset_index()
return dataframe["Value"].sum()
def porcentagem_canhoto(num,df):
"""Esta função calcula a porcentagem de canhotos entre os num jogadores mais bem avaliados.
Parameters
----------
num : int
Quantidade num de jogadores mais bem avaliados.
df : pandas.core.frame.dataframe
Dataframe.
Returns
-------
str
String com a porcentagem de jogadores canhotos entre os num melhores.
"""
try:
if num>len(df):
print("Essa quuantidade de jogadores é maior do que a existente no dataframe!")
a = df["Preferred_Foot"].iloc[0:num+1]
s = 0
for i in a:
if i == "Left":
s += 1
result = (s/num)*100
return f"A porcentagem dos canhotos em relação aos {num} mais bem avaliados é de {round(result,2)}%"
except TypeError:
print("A variável num precisa ser do tipo int!")
return "A variável num precisa ser do tipo int!"
|
py | 7dfbdc920533ce031917b45e95ac138a1ecbf985 | #!/usr/bin/env python
u"""
predict_tide.py (09/2020)
Predict tides at a single time using harmonic constants
CALLING SEQUENCE:
ht = predict_tide(t,hc,con)
INPUTS:
t: days relative to Jan 1, 1992 (48622mjd)
hc: harmonic constant vector (complex)
constituents: tidal constituent IDs
OUTPUT:
ht: tide values reconstructed using the nodal corrections
OPTIONS:
DELTAT: time correction for converting to Ephemeris Time (days)
CORRECTIONS: use nodal corrections from OTIS/ATLAS or GOT models
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
PROGRAM DEPENDENCIES:
load_constituent.py: loads parameters for a given tidal constituent
load_nodal_corrections.py: loads nodal corrections for tidal constituents
UPDATE HISTORY:
Updated 09/2020: append output mask over each constituent
Updated 08/2020: change time variable names to not overwrite functions
Updated 07/2020: added function docstrings
Updated 11/2019: can output an array of heights with a single time stamp
such as for estimating tide height maps from imagery
Updated 09/2019: added netcdf option to CORRECTIONS option
Updated 08/2018: added correction option ATLAS for localized OTIS solutions
Updated 07/2018: added option to use GSFC GOT nodal corrections
Updated 09/2017: Rewritten in Python
"""
import numpy as np
from pyTMD.load_constituent import load_constituent
from pyTMD.load_nodal_corrections import load_nodal_corrections
def predict_tide(t,hc,constituents,DELTAT=0.0,CORRECTIONS='OTIS'):
"""
Predict tides at a single time using harmonic constants
Arguments
---------
t: days relative to 1992-01-01T00:00:00
hc: harmonic constant vector (complex)
constituents: tidal constituent IDs
Keyword arguments
-----------------
DELTAT: time correction for converting to Ephemeris Time (days)
CORRECTIONS: use nodal corrections from OTIS/ATLAS or GOT models
Returns
-------
ht: tide values reconstructed using the nodal corrections
"""
#-- number of points and number of constituents
npts,nc = np.shape(hc)
#-- load the nodal corrections
#-- convert time to Modified Julian Days (MJD)
pu,pf,G = load_nodal_corrections(t + 48622.0, constituents,
DELTAT=DELTAT, CORRECTIONS=CORRECTIONS)
#-- allocate for output tidal elevation
ht = np.ma.zeros((npts))
ht.mask = np.zeros((npts),dtype=np.bool)
#-- for each constituent
for k,c in enumerate(constituents):
if CORRECTIONS in ('OTIS','ATLAS','netcdf'):
#-- load parameters for each constituent
amp,ph,omega,alpha,species = load_constituent(c)
#-- add component for constituent to output tidal elevation
th = omega*t*86400.0 + ph + pu[0,k]
elif CORRECTIONS in ('GOT','FES'):
th = G[0,k]*np.pi/180.0 + pu[0,k]
#-- sum over all tides
ht.data[:] += pf[0,k]*hc.real[:,k]*np.cos(th) - \
pf[0,k]*hc.imag[:,k]*np.sin(th)
ht.mask[:] |= (hc.real.mask[:,k] | hc.imag.mask[:,k])
#-- return the tidal elevation after removing singleton dimensions
return np.squeeze(ht)
|
py | 7dfbdd02d3545eb31fb86cda9c48bb69294ebc64 | from tesseract.tesseract_common import FilesystemPath, Isometry3d, Translation3d, Quaterniond, \
ManipulatorInfo
from tesseract.tesseract_environment import Environment
from tesseract.tesseract_scene_graph import SimpleResourceLocator, SimpleResourceLocatorFn
from tesseract.tesseract_command_language import CartesianWaypoint, Waypoint, \
PlanInstructionType_FREESPACE, PlanInstructionType_START, PlanInstruction, Instruction, \
CompositeInstruction, flatten
from tesseract.tesseract_process_managers import ProcessPlanningServer, ProcessPlanningRequest, \
FREESPACE_PLANNER_NAME
import os
import re
import traceback
from tesseract_viewer import TesseractViewer
import numpy as np
import time
import sys
TESSERACT_SUPPORT_DIR = os.environ["TESSERACT_SUPPORT_DIR"]
def _locate_resource(url):
try:
url_match = re.match(r"^package:\/\/tesseract_support\/(.*)$",url)
if (url_match is None):
return ""
if not "TESSERACT_SUPPORT_DIR" in os.environ:
return ""
tesseract_support = os.environ["TESSERACT_SUPPORT_DIR"]
return os.path.join(tesseract_support, os.path.normpath(url_match.group(1)))
except:
traceback.print_exc()
abb_irb2400_urdf_fname = FilesystemPath(os.path.join(TESSERACT_SUPPORT_DIR,"urdf","abb_irb2400.urdf"))
abb_irb2400_srdf_fname = FilesystemPath(os.path.join(TESSERACT_SUPPORT_DIR,"urdf","abb_irb2400.srdf"))
t_env = Environment()
# locator_fn must be kept alive by maintaining a reference
locator_fn = SimpleResourceLocatorFn(_locate_resource)
t_env.init(abb_irb2400_urdf_fname, abb_irb2400_srdf_fname, SimpleResourceLocator(locator_fn))
manip_info = ManipulatorInfo()
manip_info.manipulator = "manipulator"
viewer = TesseractViewer()
viewer.update_environment(t_env, [0,0,0])
joint_names = ["joint_%d" % (i+1) for i in range(6)]
viewer.update_joint_positions(joint_names, np.array([1,-.2,.01,.3,-.5,1]))
viewer.start_serve_background()
t_env.setState(joint_names, np.ones(6)*0.1)
wp1 = CartesianWaypoint(Isometry3d.Identity() * Translation3d(.6,-.8,0.6) * Quaterniond(0,0,1.0,0))
wp2 = CartesianWaypoint(Isometry3d.Identity() * Translation3d(.4,.8,1.5) * Quaterniond(0.7071,0,0.7071,0))
start_instruction = PlanInstruction(Waypoint(wp1), PlanInstructionType_START, "DEFAULT")
plan_f1 = PlanInstruction(Waypoint(wp2), PlanInstructionType_FREESPACE, "DEFAULT")
program = CompositeInstruction("DEFAULT")
program.setStartInstruction(Instruction(start_instruction))
program.setManipulatorInfo(manip_info)
program.append(Instruction(plan_f1))
planning_server = ProcessPlanningServer(t_env, 1)
planning_server.loadDefaultProcessPlanners()
request = ProcessPlanningRequest()
request.name = FREESPACE_PLANNER_NAME
request.instructions = Instruction(program)
response = planning_server.run(request)
planning_server.waitForAll()
assert response.interface.isSuccessful()
results = flatten(response.getResults().cast_CompositeInstruction())
viewer.update_trajectory(results)
if sys.version_info[0] < 3:
input("press enter")
else:
input("press enter")
|
py | 7dfbdd2af5ee6d428972a702bcac72e724b5be94 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import setuptools
from zana import version
requirements = [
]
test_requirements = [
"pytest",
]
setuptools.setup(
name="zana",
version=version.__version__,
description="",
long_description="",
author="Sam Nicholls",
author_email="[email protected]",
maintainer="Sam Nicholls",
maintainer_email="[email protected]",
packages=setuptools.find_packages(),
install_requires=requirements,
entry_points = {
},
test_suite="tests",
tests_require=test_requirements,
)
|
py | 7dfbde09e42950bbd35d7f575da35cede36fd141 | from .i18n_subsites import *
|
py | 7dfbdea4554cdbe60a83a0811c8f5fee422f4fdc | import uuid
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.schema import FetchedValue
from app.api.utils.models_mixins import Base, AuditMixin
from app.extensions import db
from sqlalchemy.ext.associationproxy import association_proxy
class MineReportSubmission(Base, AuditMixin):
__tablename__ = "mine_report_submission"
mine_report_submission_id = db.Column(db.Integer,
primary_key=True,
server_default=FetchedValue())
mine_report_submission_guid = db.Column(UUID(as_uuid=True), server_default=FetchedValue())
mine_report_id = db.Column(db.Integer, db.ForeignKey('mine_report.mine_report_id'))
mine_report_submission_status_code = db.Column(
db.String,
db.ForeignKey('mine_report_submission_status_code.mine_report_submission_status_code'))
submission_date = db.Column(db.DateTime)
documents = db.relationship(
'MineDocument', lazy='selectin', secondary='mine_report_document_xref')
comments = db.relationship(
'MineReportComment',
order_by='MineReportComment.comment_datetime',
primaryjoin="and_(MineReportComment.mine_report_submission_id == MineReportSubmission.mine_report_submission_id, MineReportComment.deleted_ind==False)",
lazy='joined')
report = db.relationship('MineReport', lazy='joined')
mine_report_guid = association_proxy('report', 'mine_report_guid')
def __repr__(self):
return '<MineReportSubmission %r>' % self.mine_report_submission_guid
@classmethod
def find_latest_by_mine_report_guid(cls, _id):
try:
uuid.UUID(_id, version=4)
return cls.query.filter_by(mine_report_guid=_id).order_by(cls.mine_report_submission_id.desc()).first()
except ValueError:
return None
@classmethod
def find_by_mine_report_guid(cls, _id):
try:
uuid.UUID(_id, version=4)
return cls.query.filter_by(mine_report_guid=_id).all()
except ValueError:
return None
@classmethod
def find_by_guid(cls, _id):
try:
uuid.UUID(_id, version=4)
return cls.query.filter_by(mine_report_submission_guid=_id).first()
except ValueError:
return None
|
py | 7dfbdef63b0acc8d2f187c7bcd21501569a39d9a | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
from qiskit_metal import Dict
import math
from scipy.spatial import distance
import os
import gdspy
import geopandas
import shapely
from shapely.geometry import LineString as LineString
from copy import deepcopy
from operator import itemgetter
from typing import TYPE_CHECKING
from typing import Dict as Dict_
from typing import List, Tuple, Union, Any, Iterable
import pandas as pd
from pandas.api.types import is_numeric_dtype
import numpy as np
from qiskit_metal.renderers.renderer_base import QRenderer
from qiskit_metal.toolbox_metal.parsing import is_true
from qiskit_metal import config
if not config.is_building_docs():
from qiskit_metal.toolbox_python.utility_functions import can_write_to_path
from qiskit_metal.toolbox_python.utility_functions import get_range_of_vertex_to_not_fillet
if TYPE_CHECKING:
# For linting typechecking, import modules that can't be loaded here under normal conditions.
# For example, I can't import QDesign, because it requires Qrenderer first. We have the
# chicken and egg issue.
from qiskit_metal.designs import QDesign
class QSkeletonRenderer(QRenderer):
"""Extends QRenderer to create new Skeleton QRenderer. This QRenderer will print to
a file the number_of_bones and the names of QGeometry tables that will be used to export
the QComponents the user highlighted.
"""
#: Default options, over-written by passing ``options` dict to render_options.
#: Type: Dict[str, str]
default_options = Dict(
# An option unique to QSkeletonRenderer.
number_of_bones='206',)
"""Default options"""
name = 'skeleton'
"""Name used in Metal code to refer to this QRenderer."""
# When additional columns are added to QGeometry, this is the example to populate it.
# e.g. element_extensions = dict(
# base=dict(color=str, klayer=int),
# path=dict(thickness=float, material=str, perfectE=bool),
# poly=dict(thickness=float, material=str), )
# Add columns to junction table during QGDSRenderer.load()
# element_extensions is now being populated as part of load().
# Determined from element_table_data.
# Dict structure MUST be same as element_extensions!!!!!!
# This dict will be used to update QDesign during init of renderer.
# Keeping this as a cls dict so could be edited before renderer is instantiated.
# To update component.options junction table.
element_table_data = dict(
# Example of adding a column named "skeleton_a_column_name"
# with default values of "a_default_value" to the junction table.
# Note: QSkeletonRenderer.name is prefixed to "a_column_name" when the table is appended by QComponents.
junction=dict(a_column_name='a_default_value'))
"""element extensions dictionary element_extensions = dict() from base class"""
def __init__(self,
design: 'QDesign',
initiate=True,
render_template: Dict = None,
render_options: Dict = None):
"""Create a QRenderer for GDS interface: export and import.
Args:
design (QDesign): Use QGeometry within QDesign to obtain elements.
initiate (bool, optional): True to initiate the renderer. Defaults to True.
render_template (Dict, optional): Typically used by GUI for template options for GDS. Defaults to None.
render_options (Dict, optional): Used to overide all options. Defaults to None.
"""
super().__init__(design=design,
initiate=initiate,
render_template=render_template,
render_options=render_options)
QSkeletonRenderer.load()
# Updated each time write_qgeometry_table_names_to_file() is called.
self.chip_info = dict()
# For a skeleton_renderer user, this is kept to examplify self.logger.warning.
def _can_write_to_path(self, file: str) -> int:
"""Check if can write file.
Args:
file (str): Has the path and/or just the file name.
Returns:
int: 1 if access is allowed. Else returns 0, if access not given.
"""
status, directory_name = can_write_to_path(file)
if status:
return 1
self.logger.warning(f'Not able to write to directory.'
f'File:"{file}" not written.'
f' Checked directory:"{directory_name}".')
return 0
def check_qcomps(self,
highlight_qcomponents: list = []) -> Tuple[list, int]:
"""Confirm the list doesn't have names of componentes repeated.
Comfirm that the name of component exists in QDesign.
Args:
highlight_qcomponents (list, optional): List of strings which denote the name of QComponents to render.
Defaults to []. Empty list means to render entire design.
Returns:
Tuple[list, int]:
list: Unique list of QComponents to render.
int: 0 if all ended well. Otherwise, 1 if QComponent name not in design.
"""
# Remove identical QComponent names.
unique_qcomponents = list(set(highlight_qcomponents))
# Confirm all QComponent are in design.
for qcomp in unique_qcomponents:
if qcomp not in self.design.name_to_id:
self.logger.warning(
f'The component={qcomp} in highlight_qcomponents not'
' in QDesign. The GDS data not generated.')
return unique_qcomponents, 1
# For Subtraction bounding box.
# If list passed to export is the whole chip, then want to use the bounding box from design planar.
# If list is subset of chip, then caluclate a custom bounding box and scale it.
if len(unique_qcomponents) == len(self.design._components):
# Since user wants all of the chip to be rendered, use the design.planar bounding box.
unique_qcomponents[:] = []
return unique_qcomponents, 0
def get_qgeometry_tables_for_skeleton(self,
highlight_qcomponents: list = []
) -> Tuple[int, list]:
"""Using self.design, this method does the following:
1. Gather the QGeometries to be used to write to file.
Duplicate names in hightlight_qcomponents will be removed without warning.
Args:
highlight_qcomponents (list): List of strings which denote the name of QComponents to render.
If empty, render all comonents in design.
If QComponent names are dupliated, duplicates will be ignored.
Returns:
Tuple[int, list]:
int: 0 if all ended well. Otherwise, 1 if QComponent name(s) not in design.
list: The names of QGeometry tables used for highlight_qcomponentes.
"""
unique_qcomponents, status = self.check_qcomps(highlight_qcomponents)
table_names_for_highlight = list()
if status == 1:
return 1, table_names_for_highlight
for chip_name in self.chip_info:
for table_name in self.design.qgeometry.get_element_types():
# Get table for chip and table_name, and reduce to keep just the list of unique_qcomponents.
table = self.get_table(table_name, unique_qcomponents,
chip_name)
# A place where a logic can happen, for each table, within a chip.
# Demo for skeleton QRenderer.
if len(table) != 0:
table_names_for_highlight.append(table_name + '\n')
return 0, table_names_for_highlight
def get_table(self, table_name: str, unique_qcomponents: list,
chip_name: str) -> geopandas.GeoDataFrame:
"""If unique_qcomponents list is empty, get table using table_name from QGeometry tables
for all elements with table_name. Otherwise, return a table with fewer elements, for just the
qcomponents within the unique_qcomponents list.
Args:
table_name (str): Can be "path", "poly", etc. from the QGeometry tables.
unique_qcomponents (list): User requested list of component names to export to GDS file.
Returns:
geopandas.GeoDataFrame: Table of elements within the QGeometry.
"""
# self.design.qgeometry.tables is a dict. key=table_name, value=geopandas.GeoDataFrame
if len(unique_qcomponents) == 0:
table = self.design.qgeometry.tables[table_name]
else:
table = self.design.qgeometry.tables[table_name]
# Convert string QComponent.name to QComponent.id
highlight_id = [
self.design.name_to_id[a_qcomponent]
for a_qcomponent in unique_qcomponents
]
# Remove QComponents which are not requested.
table = table[table['component'].isin(highlight_id)]
table = table[table['chip'] == chip_name]
return table
def write_qgeometry_table_names_to_file(self,
file_name: str,
highlight_qcomponents: list = []
) -> int:
"""Obtain the names of the QGeometry Pandas tables and write them to a file.
The names will be for qcomponents that were selected or all of the qcomponents within
the qdesign.
Args:
file_name (str): File name which can also include directory path.
If the file exists, it will be overwritten.
highlight_qcomponents (list): List of strings which denote the name of QComponents to render.
If empty, render all qcomponents in qdesign.
Returns:
int: 0=file_name can not be written, otherwise 1=file_name has been written
"""
if not self._can_write_to_path(file_name):
return 0
self.chip_info.clear()
# Just for demo, a new plug-in may not need this.
self.chip_info.update(self.get_chip_names())
status, table_names_used = self.get_qgeometry_tables_for_skeleton(
highlight_qcomponents)
# The method parse_value, returns a float.
total_bones = str(int(self.parse_value(self.options.number_of_bones)))
total_bones_text = 'Number of bones: ' + total_bones + '\n'
if (status == 0):
skeleton_out = open(file_name, 'w')
skeleton_out.writelines(total_bones_text)
skeleton_out.writelines(table_names_used)
skeleton_out.close()
return 1
else:
return 0
def get_chip_names(self) -> Dict:
""" Returns a dict of unique chip names for ALL tables within QGeometry.
In another words, for every "path" table, "poly" table ... etc, this method will search for unique
chip names and return a dict of unique chip names from QGeometry table.
Returns:
Dict: dict with key of chip names and value of empty dict to hold things for renderers.
"""
chip_names = Dict()
for table_name in self.design.qgeometry.get_element_types():
table = self.design.qgeometry.tables[table_name]
names = table['chip'].unique().tolist()
chip_names += names
unique_list = list(set(chip_names))
unique_dict = Dict()
for chip in unique_list:
unique_dict[chip] = Dict()
return unique_dict
|
py | 7dfbdf13da60b4fc03f65edb69dc7cff62e58ef5 | from db import db
class ResultModel(db.Model):
__tablename__ = 'RESULT'
id_result = db.Column('id_result', db.Integer, primary_key=True)
gross_score = db.Column('gross_score', db.Numeric(5), nullable=False)
considerate_score = db.Column('considerate_score', db.Numeric(5), nullable=False)
classification = db.Column('classification', db.String(20), nullable=False)
evaluation_test_result_id_evaluation_test = db.Column('fk_evaluation_test',
db.Integer, db.ForeignKey('EVALUATION_TEST.id_evaluation_test'),
unique=True, nullable=False)
def __init__(self, gross_score, considerate_score, classification, evaluation_test_result_id_evaluation_test):
self.gross_score = gross_score
self.considerate_score = considerate_score
self.classification = classification
self.evaluation_test_result_id_evaluation_test = evaluation_test_result_id_evaluation_test
def json(self):
return {
'id_result': self.id_result,
'gross_score': str(self.gross_score),
'considerate_score': str(self.considerate_score),
'classification': self.classification,
}
@classmethod
def find_by_id(cls, id_result):
return cls.query.filter_by(id_result=id_result).first()
def save_to_db(self):
db.session.add(self)
db.session.commit()
def delete_from_db(self):
db.session.delete(self)
db.session.commit()
|
py | 7dfbdf145c9d1c8ec6693fba6e4de21008b85a3a | import numpy as np
class IdentityActivation:
def __init__(self, predecessor):
# Remember what precedes this layer
self.predecessor = predecessor
# The activation function keeps the dimensions of its predecessor
self.input_size = self.predecessor.output_size
self.output_size = self.input_size
# Create an empty matrix to store this layer's last activation
self.activation = np.zeros(self.output_size)
# Initialize weights, if necessary
self.init_weights()
# Will never require a gradient
self.require_gradient = False
# This activation function has no parameters, so pass
def init_weights(self):
pass
# It also has to gradients, so pass here too
def zero_grad(self):
pass
# During a forward pass, it just passes its input forward unmodified
def forward(self, x, evaluate=False):
# Save a copy of the activation
self.activation = x
return self.activation
# During backrop it passes the delta on unmodified
def backprop(self, delta, y):
return delta
# It has no parameters
def report_params(self):
return []
class InputLayer(IdentityActivation):
def __init__(self, input_size):
# The size here is determined by the data that's going to be used
self.input_size = (0, input_size)
self.output_size = self.input_size
# Create an empty matrix to store this layer's last activation
self.activation = np.zeros(self.output_size)
class SigmoidActivation(IdentityActivation):
# During a forward pass, it just applies the sigmoid function
def forward(self, x, evaluate=False):
self.activation = 1.0/(1.0+np.exp(-x))
return self.activation
# During backprop, it passes the delta through its derivative
def backprop(self, delta, y):
return delta * self.activation * (1 - self.activation)
class Variable:
def __init__(self, data):
self.data = data
self.shape = self.data.shape
self.grad = np.zeros(self.shape)
# Cheap way of zeroing its gradient
def zero_grad(self):
self.grad *= 0
class DenseLayer(IdentityActivation):
def __init__(self, predecessor, hidden, use_bias=True, require_gradient=True, positive_params=False):
# Remember what precedes this layer
self.predecessor = predecessor
self.input_size = self.predecessor.output_size
self.hidden = hidden
self.output_size = (0, self.hidden)
# It is possible that we don't want a bias term
self.use_bias = use_bias
# If you need non-negative parameters
self.positive_params = positive_params
# Save its activation
self.activation = np.zeros(self.output_size)
# Initialize the weights and biases
self.init_params()
# Most of the time, this layer will use gradients to train itself
# However, this can be disabled manually
self.require_gradient = require_gradient
def zero_grad(self):
self.weight.zero_grad()
if self.use_bias:
self.bias.zero_grad()
def init_params(self):
size_measure = self.input_size[1]
if self.positive_params:
lower, upper = 0., 0.5
else:
lower, upper = -1., 1.
# Weights are initialized by a normal distribution
self.weight = Variable(
np.sqrt(2/size_measure) * np.random.uniform(lower, upper, size=(self.input_size[1], self.hidden))
)
if self.use_bias:
self.bias = Variable(
np.sqrt(2/size_measure) * np.random.uniform(lower, upper, size=(1, self.hidden))
)
# The forward pass is a matrix multiplication, with optional bias
def forward(self, x, evaluate=False):
x = x @ self.weight.data
if self.use_bias:
x += self.bias.data
self.activation = x
return self.activation
# The delta just needs to be multipled by the layer's weight
def backprop(self, delta, y):
# Only calculate gradients if it's required
if self.require_gradient:
# The weight update requires the previous layer's activation
self.weight.grad += self.predecessor.activation.transpose() @ delta
# The bias update requires the delta to be "squished"
# This can be done by multiplying by a vector of 1s
if self.use_bias:
self.bias.grad += np.ones((1, delta.shape[0])) @ delta
return delta @ self.weight.data.transpose()
# This DenseLayer is the first example of a layer with parameters
def report_params(self):
if self.use_bias:
return [self.bias, self.weight]
else:
return [self.weight]
class SigmoidNLL(DenseLayer):
# The forward pass is a matrix multiplication, with optional bias
def forward(self, x, evaluate=False):
# The feed forward starts off normal
x = x @ self.weight.data
if self.use_bias:
x += self.bias.data
# It changes when we apply the sigmoid
self.activation = 1.0/(1.0+np.exp(-x))
return self.activation
# The delta is started here
def backprop(self, delta, y):
# Starting the delta
delta = self.activation - y
# The update is the same as a DenseLayer
self.weight.grad += self.predecessor.activation.transpose() @ delta
if self.use_bias:
self.bias.grad += np.ones((1, delta.shape[0])) @ delta
# The delta is passed backwards like a Denselayer
return delta @ self.weight.data.transpose()
class SoftmaxCrossEntropy(DenseLayer):
# The forward pass is a matrix multiplication, with optional bias
def forward(self, x, evaluate=False):
# The feed forward starts off normal
x = x @ self.weight.data
if self.use_bias:
x += self.bias.data
# It changes when we apply the sigmoid
softmax_sum = np.exp(x) @ np.ones((self.hidden, 1))
self.activation = np.exp(x) / softmax_sum
return self.activation
# The delta is started here
def backprop(self, delta, y):
# Starting the delta
delta = self.activation - y
# The update is the same as a DenseLayer
self.weight.grad += self.predecessor.activation.transpose() @ delta
if self.use_bias:
self.bias.grad += np.ones((1, delta.shape[0])) @ delta
# The delta is passed backwards like a Denselayer
return delta @ self.weight.data.transpose()
# This class stores a list of layers for training
class NeuralNetwork:
def __init__(self):
self.layers = []
def feed_forward(self, x):
for layer in self.layers:
x = layer.forward(x)
return x
# When you want the model to know you're not training
def evaluate(self, x):
for layer in self.layers:
x = layer.forward(x, evaluate=True)
return x
def back_propagation(self, y):
delta = np.zeros((0,0))
for layer in reversed(self.layers):
delta = layer.backprop(delta, y)
def step(self,lr):
for layer in self.layers:
layer.update(lr)
def zero_gradients(self):
for layer in self.layers:
layer.zero_grad()
class SGDOptimizer:
def __init__(self, list_of_layers):
self.list_of_layers = list_of_layers
self.list_of_variables = []
for layer in self.list_of_layers:
self.list_of_variables += layer.report_params()
def step(self, lr):
for variable in self.list_of_variables:
variable.data -= lr * variable.grad
class AdaGradOptimizer:
def __init__(self, list_of_layers):
self.list_of_layers = list_of_layers
self.list_of_variables = []
for layer in self.list_of_layers:
self.list_of_variables += layer.report_params()
self.gradient_histories = dict()
for variable in self.list_of_variables:
self.gradient_histories[variable] = np.ones(variable.shape)
def step(self, lr):
for variable in self.list_of_variables:
variable.data -= (lr / np.sqrt(self.gradient_histories[variable])) * variable.grad
self.gradient_histories[variable] += variable.grad**2
class AdamOptimizer:
def __init__(self, list_of_layers, beta1=0.9, beta2=0.999, eps=0.00000001):
self.list_of_layers = list_of_layers
self.beta1 = beta1
self.beta2 = beta2
self.eps = eps
self.list_of_variables = []
for layer in self.list_of_layers:
self.list_of_variables += layer.report_params()
self.adam_mean = dict()
self.adam_var = dict()
for variable in self.list_of_variables:
self.adam_mean[variable] = np.zeros(variable.shape)
self.adam_var[variable] = np.zeros(variable.shape)
def step(self, lr):
for variable in self.list_of_variables:
self.adam_mean[variable] = self.adam_mean[variable] * self.beta1 + variable.grad * (1 - self.beta1)
self.adam_var[variable] = self.adam_var[variable] * self.beta2 + (variable.grad**2) * (1 - self.beta2)
mean_hat = self.adam_mean[variable] / (1 - self.beta1)
var_hat = self.adam_var[variable] / (1 - self.beta2)
variable.data -= (lr * mean_hat)/(np.sqrt(var_hat) + self.eps)
class ReLUActivation(IdentityActivation):
# During a forward pass, it just applies the sigmoid function
def forward(self, x, evaluate=False):
self.activation = x * (x > 0)
return self.activation
# During backprop, it passes the delta through its derivative
def backprop(self, delta, y):
return delta * (self.activation > 0)
class SoftplusActivation(IdentityActivation):
# During a forward pass, it just applies the sigmoid function
def forward(self, x, evaluate=False):
self.activation = np.log(1.0 + np.exp(x))
return self.activation
# During backprop, it passes the delta through its derivative
def backprop(self, delta, y):
return delta * 1.0/(1.0 + np.exp(-self.activation))
class DropoutLayer(IdentityActivation):
def __init__(self, predecessor, probability=0.5):
self.predecessor = predecessor
self.input_size = self.predecessor.output_size
self.output_size = self.input_size
self.activation = np.zeros(self.output_size)
self.gradient = np.zeros(self.output_size)
self.init_weights()
self.require_gradient = False
# Noise
self.probability = probability
def forward(self, x, evaluate=False):
if evaluate:
self.activation = x
else:
dropout = np.random.choice([0, 1], size=x.shape, p=[self.probability, 1 - self.probability])
self.activation = (x * dropout)/(1 - self.probability)
return self.activation
class BatchNormLayer(IdentityActivation):
def __init__(self, predecessor, eps=0.01):
self.predecessor = predecessor
self.input_size = self.predecessor.output_size
self.output_size = self.input_size
self.hidden = self.output_size[1]
self.activation = np.zeros(self.output_size)
self.gradient = np.zeros(self.output_size)
self.init_params()
self.zero_grad()
self.require_gradient = True
# Batchnorm requires a constant for "numerical stability"
self.eps = eps
# We need to save mean and variance as constants for backprop
self.mean = np.zeros((1, self.hidden))
self.var = np.zeros((1, self.hidden))
# Also, save the xhat
self.xhat = np.zeros((1, self.hidden))
# We also want to keep running means and variances during training
# These become evaluation statistics
self.eval_mean = np.zeros((1, self.hidden))
self.eval_var = np.zeros((1, self.hidden))
def init_params(self):
# Initialize gamma (mean) and beta (variance)
self.gamma = Variable(np.ones((1, self.hidden)))
self.beta = Variable(np.zeros((1, self.hidden)))
def zero_grad(self):
self.gamma.zero_grad()
self.beta.zero_grad()
def forward(self, x, evaluate=False):
if evaluate:
xhat = (x - self.eval_mean) / np.sqrt(self.eval_var + self.eps)
self.activation = self.gamma.data * xhat + self.beta.data
else:
# Batch mean and variance
self.mean = np.mean(x, axis=0)
self.var = np.var(x, axis=0)
# Evaluation mean and variance
self.eval_mean = 0.9*self.eval_mean + 0.1*self.mean
self.eval_var = 0.9*self.eval_var + 0.1*self.var
# Calculate xhat and the final normalized activation
self.xhat = (x - self.mean) / np.sqrt(self.var + self.eps)
self.activation = self.gamma.data * self.xhat + self.beta.data
return self.activation
def backprop(self, delta, y):
N = delta.shape[0]
self.gamma.grad += np.sum(delta * self.xhat, axis=0)
self.beta.grad += np.sum(delta, axis=0)
x_mean = self.predecessor.activation - self.mean
inv_var_eps = 1 / np.sqrt(self.var + self.eps)
d_xhat = delta * self.gamma.data
d_var = np.sum(d_xhat * x_mean, axis=0) * -0.5 * inv_var_eps**3
d_mean = np.sum(d_xhat * -inv_var_eps, axis=0) + (d_var * np.mean(-2.0 * x_mean))
delta = (d_xhat * inv_var_eps) + (d_var * 2 * x_mean / N) + (d_mean / N)
return delta
def report_params(self):
return [self.beta, self.gamma]
def NLLCost(prediction, truth):
return -np.mean(np.sum(truth*np.log(prediction) + (1.0-truth)*np.log(1.0-prediction), 1))
def CrossEntropy(prediction, truth):
return -np.mean(np.sum(truth*np.log(prediction), 1))
def accuracy(prediction, labels):
tests = prediction.argmax(axis=1) == labels
return(tests.sum() / prediction.shape[0])
|
py | 7dfbdf3410b64d1bc6b4312942a9151fa780de60 | #!/usr/bin/python3
# Python program to resize the Quran images.
# Author : Abdallah Abdelazim
from PIL import Image
from os import makedirs, listdir
from os.path import exists, isfile, join, splitext
# Images source & output directories
SRC_DIR = '/home/abdallah/Desktop/resize_test/'
OUT_DIR = '/home/abdallah/Desktop/resize_test_out/'
# Image new size
SIZE = {
'page_1_2': (1080, 1462),
'page_other': (1080, 1612)
}
if not exists(OUT_DIR):
makedirs(OUT_DIR)
files = [f for f in listdir(SRC_DIR) if isfile(join(SRC_DIR, f))]
for f in files:
fpath = join(SRC_DIR, f)
img = Image.open(fpath)
imageNum = int(splitext(f)[0])
if imageNum == 1 or imageNum == 2:
resizedImg = img.resize(SIZE['page_1_2'])
else:
resizedImg = img.resize(SIZE['page_other'])
outpath = join(OUT_DIR, f)
resizedImg.save(outpath)
print('Saved {}'.format(f))
|
py | 7dfbdf41d80c7f69efb3bfb96e544575562aefc8 | """
WSGI config for mittab project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os, sys
sys.path.append('/var/www/tab')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mittab.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
py | 7dfbe12edf51f55e828c53676eda687197b8a680 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2021 by The Linux Foundation
# SPDX-License-Identifier: MIT-0
#
__author__ = 'Konstantin Ryabitsev <[email protected]>'
import sys
import os
import re
import hashlib
import base64
import subprocess
import logging
import tempfile
import time
import datetime
import urllib.parse
import email.utils
import email.header
from pathlib import Path
from typing import Optional, Tuple, Union
from io import BytesIO
logger = logging.getLogger(__name__)
# Overridable via [patatt] parameters
GPGBIN = None
SSHKBIN = None
# Hardcoded defaults
DEVSIG_HDR = b'X-Developer-Signature'
DEVKEY_HDR = b'X-Developer-Key'
# Result and severity levels
RES_VALID = 0
RES_NOSIG = 4
RES_NOKEY = 8
RES_ERROR = 16
RES_BADSIG = 32
REQ_HDRS = [b'from', b'subject']
OPT_HDRS = [b'message-id']
# Quick cache for key info
KEYCACHE = dict()
# My version
__VERSION__ = '0.5-dev'
MAX_SUPPORTED_FORMAT_VERSION = 1
class SigningError(Exception):
def __init__(self, message: str, errors: Optional[list] = None):
super().__init__(message)
self.errors = errors
class ConfigurationError(Exception):
def __init__(self, message: str, errors: Optional[list] = None):
super().__init__(message)
self.errors = errors
class ValidationError(Exception):
def __init__(self, message: str, errors: Optional[list] = None):
super().__init__(message)
self.errors = errors
class NoKeyError(ValidationError):
def __init__(self, message: str, errors: Optional[list] = None):
super().__init__(message)
self.errors = errors
class BodyValidationError(ValidationError):
def __init__(self, message: str, errors: Optional[list] = None):
super().__init__(message, errors)
class DevsigHeader:
def __init__(self, hval: Optional[bytes] = None):
self._headervals = list()
self._body_hash = None
# it doesn't need to be in any particular order,
# but that's just anarchy, anarchy, I say!
self._order = ['v', 'a', 't', 'l', 'i', 's', 'h', 'bh']
self.hval = None
self.hdata = dict()
if hval:
self.from_bytes(hval)
else:
self.hdata['v'] = b'1'
def from_bytes(self, hval: bytes) -> None:
self.hval = DevsigHeader._dkim_canonicalize_header(hval)
hval = re.sub(rb'\s*', b'', self.hval)
for chunk in hval.split(b';'):
parts = chunk.split(b'=', 1)
if len(parts) < 2:
continue
self.set_field(parts[0].decode(), parts[1])
def get_field(self, field: str, decode: bool = False) -> Union[None, str, bytes]:
value = self.hdata.get(field)
if isinstance(value, bytes) and decode:
return value.decode()
return value
def set_field(self, field: str, value: Union[None, str, bytes]) -> None:
if value is None:
del self.hdata[field]
return
if isinstance(value, str):
value = value.encode()
self.hdata[field] = value
# do any git-mailinfo normalization prior to calling this
def set_body(self, body: bytes, maxlen: Optional[int] = None) -> None:
if maxlen:
if maxlen > len(body):
raise ValidationError('maxlen is larger than payload')
if maxlen < len(body):
body = body[:maxlen]
self.hdata['l'] = bytes(len(body))
hashed = hashlib.sha256()
hashed.update(body)
self._body_hash = base64.b64encode(hashed.digest())
# do any git-mailinfo normalization prior to calling this
def set_headers(self, headers: list, mode: str) -> None:
parsed = list()
allhdrs = set()
# DKIM operates on headers in reverse order
for header in reversed(headers):
try:
left, right = header.split(b':', 1)
hname = left.strip().lower()
parsed.append((hname, right))
allhdrs.add(hname)
except ValueError:
continue
reqset = set(REQ_HDRS)
optset = set(OPT_HDRS)
self._headervals = list()
if mode == 'sign':
# Make sure REQ_HDRS is a subset of allhdrs
if not reqset.issubset(allhdrs):
raise SigningError('The following required headers not present: %s'
% (b', '.join(reqset.difference(allhdrs)).decode()))
# Add optional headers that are actually present
optpresent = list(allhdrs.intersection(optset))
signlist = REQ_HDRS + sorted(optpresent)
self.hdata['h'] = b':'.join(signlist)
elif mode == 'validate':
hfield = self.get_field('h')
signlist = [x.strip() for x in hfield.split(b':')]
# Make sure REQ_HEADERS are in this set
if not reqset.issubset(set(signlist)):
raise ValidationError('The following required headers not signed: %s'
% (b', '.join(reqset.difference(set(signlist))).decode()))
else:
raise RuntimeError('Unknown set_header mode: %s' % mode)
for shname in signlist:
if shname not in allhdrs:
# Per RFC:
# Nonexistent header fields do not contribute to the signature computation (that is, they are
# treated as the null input, including the header field name, the separating colon, the header field
# value, and any CRLF terminator).
continue
at = 0
for hname, rawval in list(parsed):
if hname == shname:
self._headervals.append(hname + b':' + DevsigHeader._dkim_canonicalize_header(rawval))
parsed.pop(at)
break
at += 1
def sanity_check(self) -> None:
if 'a' not in self.hdata:
raise RuntimeError('Must set "a" field first')
if not self._body_hash:
raise RuntimeError('Must use set_body first')
if not self._headervals:
raise RuntimeError('Must use set_headers first')
def validate(self, keyinfo: Union[str, bytes, None]) -> Tuple[str, str]:
self.sanity_check()
# Start by validating the body hash. If it fails to match, we can
# bail early, before needing to do any signature validation.
if self.get_field('bh') != self._body_hash:
raise BodyValidationError('Body content validation failed')
# Check that we have a b= field
if not self.get_field('b'):
raise RuntimeError('Missing "b=" value')
pts = self.hval.rsplit(b'b=', 1)
dshdr = pts[0] + b'b='
bdata = re.sub(rb'\s*', b'', pts[1])
# Calculate our own digest
hashed = hashlib.sha256()
# Add in our _headervals first (they aready have CRLF endings)
hashed.update(b''.join(self._headervals))
# and the devsig header now, without the trailing CRLF
hashed.update(DEVSIG_HDR.lower() + b':' + dshdr)
vdigest = hashed.digest()
algo = self.get_field('a', decode=True)
if algo.startswith('ed25519'):
sdigest = DevsigHeader._validate_ed25519(bdata, keyinfo)
signtime = self.get_field('t', decode=True)
signkey = keyinfo
if not signtime:
raise ValidationError('t= field is required for ed25519 sigs')
if sdigest != vdigest:
raise ValidationError('Header validation failed')
elif algo.startswith('openssh'):
DevsigHeader._validate_openssh(bdata, vdigest, keyinfo)
signtime = self.get_field('t', decode=True)
signkey = keyinfo
if not signtime:
raise ValidationError('t= field is required for openssh sigs')
elif algo.startswith('openpgp'):
sdigest, (good, valid, trusted, signkey, signtime) = DevsigHeader._validate_openpgp(bdata, keyinfo)
if sdigest != vdigest:
raise ValidationError('Header validation failed')
else:
raise ValidationError('Unknown algorithm: %s', algo)
return signkey, signtime
def sign(self, keyinfo: Union[str, bytes], split: bool = True) -> Tuple[bytes, bytes]:
self.sanity_check()
self.set_field('bh', self._body_hash)
algo = self.get_field('a', decode=True)
hparts = list()
for fn in self._order:
fv = self.get_field(fn)
if fv is not None:
hparts.append(b'%s=%s' % (fn.encode(), fv))
hparts.append(b'b=')
dshval = b'; '.join(hparts)
hashed = hashlib.sha256()
# Add in our _headervals first (they aready have CRLF endings)
hashed.update(b''.join(self._headervals))
# and ourselves now, without the trailing CRLF
hashed.update(DEVSIG_HDR.lower() + b':' + dshval)
digest = hashed.digest()
if algo.startswith('ed25519'):
bval, pkinfo = DevsigHeader._sign_ed25519(digest, keyinfo)
elif algo.startswith('openpgp'):
bval, pkinfo = DevsigHeader._sign_openpgp(digest, keyinfo)
elif algo.startswith('openssh'):
bval, pkinfo = DevsigHeader._sign_openssh(digest, keyinfo)
else:
raise RuntimeError('Unknown a=%s' % algo)
if split:
return dshval + DevsigHeader.splitter(bval), pkinfo
return dshval + bval, pkinfo
@staticmethod
def _sign_ed25519(payload: bytes, privkey: bytes) -> Tuple[bytes, bytes]:
global KEYCACHE
try:
from nacl.signing import SigningKey
from nacl.encoding import Base64Encoder
except ModuleNotFoundError:
raise RuntimeError('This operation requires PyNaCl libraries')
if privkey not in KEYCACHE:
sk = SigningKey(privkey, encoder=Base64Encoder)
vk = base64.b64encode(sk.verify_key.encode())
KEYCACHE[privkey] = (sk, vk)
else:
sk, vk = KEYCACHE[privkey]
bdata = sk.sign(payload, encoder=Base64Encoder)
return bdata, vk
@staticmethod
def _validate_ed25519(sigdata: bytes, pubkey: bytes) -> bytes:
try:
from nacl.signing import VerifyKey
from nacl.encoding import Base64Encoder
from nacl.exceptions import BadSignatureError
except ModuleNotFoundError:
raise RuntimeError('This operation requires PyNaCl libraries')
vk = VerifyKey(pubkey, encoder=Base64Encoder)
try:
return vk.verify(sigdata, encoder=Base64Encoder)
except BadSignatureError:
raise ValidationError('Failed to validate signature')
@staticmethod
def _sign_openssh(payload: bytes, keyfile: str) -> Tuple[bytes, bytes]:
global KEYCACHE
keypath = os.path.expanduser(os.path.expandvars(keyfile))
if not os.access(keypath, os.R_OK):
raise SigningError('Unable to read openssh public key %s' % keypath)
sshkargs = ['-Y', 'sign', '-n', 'patatt', '-f', keypath]
ecode, out, err = sshk_run_command(sshkargs, payload)
if ecode > 0:
raise SigningError('Running ssh-keygen failed', errors=err.decode().split('\n'))
# Remove the header/footer
sigdata = b''
for bline in out.split(b'\n'):
if bline.startswith(b'----'):
continue
sigdata += bline
if keypath not in KEYCACHE:
# Now get the fingerprint of this keyid
sshkargs = ['-l', '-f', keypath]
ecode, out, err = sshk_run_command(sshkargs, payload)
if ecode > 0:
raise SigningError('Running ssh-keygen failed', errors=err.decode().split('\n'))
chunks = out.split()
keyfp = chunks[1]
KEYCACHE[keypath] = keyfp
else:
keyfp = KEYCACHE[keypath]
return sigdata, keyfp
@staticmethod
def _validate_openssh(sigdata: bytes, payload: bytes, keydata: bytes) -> None:
with tempfile.TemporaryDirectory(suffix='.patch-attest-poc') as td:
# Start by making a signers file
fpath = os.path.join(td, 'signers')
spath = os.path.join(td, 'sigdata')
with open(fpath, 'wb') as fh:
chunks = keydata.split()
bcont = b'patatter@local namespaces="patatt" ' + chunks[0] + b' ' + chunks[1] + b'\n'
logger.debug('allowed-signers: %s', bcont)
fh.write(bcont)
with open(spath, 'wb') as fh:
bcont = b'-----BEGIN SSH SIGNATURE-----\n' + sigdata + b'\n-----END SSH SIGNATURE-----\n'
logger.debug('sigdata: %s', bcont)
fh.write(bcont)
sshkargs = ['-Y', 'verify', '-n', 'patatt', '-I', 'patatter@local', '-f', fpath, '-s', spath]
ecode, out, err = sshk_run_command(sshkargs, payload)
if ecode > 0:
raise ValidationError('Failed to validate openssh signature', errors=err.decode().split('\n'))
@staticmethod
def _sign_openpgp(payload: bytes, keyid: str) -> Tuple[bytes, bytes]:
global KEYCACHE
gpgargs = ['-s', '-u', keyid]
ecode, out, err = gpg_run_command(gpgargs, payload)
if ecode > 0:
raise SigningError('Running gpg failed', errors=err.decode().split('\n'))
bdata = base64.b64encode(out)
# Now get the fingerprint of this keyid
if keyid not in KEYCACHE:
gpgargs = ['--with-colons', '--fingerprint', keyid]
ecode, out, err = gpg_run_command(gpgargs)
if ecode > 0:
raise SigningError('Running gpg failed', errors=err.decode().split('\n'))
pkid = None
keyfp = None
for line in out.split(b'\n'):
if line.startswith(b'pub:'):
fields = line.split(b':')
pkid = fields[4]
elif line.startswith(b'fpr:') and pkid:
fields = line.split(b':')
if fields[9].find(pkid) > 0:
keyfp = fields[9]
break
KEYCACHE[keyid] = keyfp
else:
keyfp = KEYCACHE[keyid]
return bdata, keyfp
@staticmethod
def _validate_openpgp(sigdata: bytes, pubkey: Optional[bytes]) -> Tuple[bytes, tuple]:
global KEYCACHE
bsigdata = base64.b64decode(sigdata)
vrfyargs = ['--verify', '--output', '-', '--status-fd=2']
if pubkey:
with tempfile.TemporaryFile(suffix='.patch-attest-poc') as temp_keyring:
keyringargs = ['--no-default-keyring', f'--keyring={temp_keyring.name}']
if pubkey in KEYCACHE:
logger.debug('Reusing cached keyring')
temp_keyring.write(KEYCACHE[pubkey])
else:
logger.debug('Importing into new keyring')
gpgargs = keyringargs + ['--status-fd=1', '--import']
ecode, out, err = gpg_run_command(gpgargs, stdin=pubkey)
# look for IMPORT_OK
if out.find(b'[GNUPG:] IMPORT_OK') < 0:
raise ValidationError('Could not import GnuPG public key')
KEYCACHE[pubkey] = temp_keyring.read()
gpgargs = keyringargs + vrfyargs
ecode, out, err = gpg_run_command(gpgargs, stdin=bsigdata)
else:
logger.debug('Verifying using default keyring')
ecode, out, err = gpg_run_command(vrfyargs, stdin=bsigdata)
if ecode > 0:
if err.find(b'[GNUPG:] NO_PUBKEY '):
raise NoKeyError('No matching key found')
raise ValidationError('Failed to validate PGP signature')
good, valid, trusted, signkey, signtime = DevsigHeader._check_gpg_status(err)
if good and valid:
return out, (good, valid, trusted, signkey, signtime)
raise ValidationError('Failed to validate PGP signature')
@staticmethod
def _check_gpg_status(status: bytes) -> Tuple[bool, bool, bool, str, str]:
good = False
valid = False
trusted = False
signtime = ''
signkey = ''
logger.debug('GNUPG status:\n\t%s', status.decode().strip().replace('\n', '\n\t'))
gs_matches = re.search(rb'^\[GNUPG:] GOODSIG ([0-9A-F]+)\s+(.*)$', status, flags=re.M)
if gs_matches:
good = True
vs_matches = re.search(rb'^\[GNUPG:] VALIDSIG ([0-9A-F]+) (\d{4}-\d{2}-\d{2}) (\d+)', status, flags=re.M)
if vs_matches:
valid = True
signkey = vs_matches.groups()[0].decode()
signtime = vs_matches.groups()[2].decode()
ts_matches = re.search(rb'^\[GNUPG:] TRUST_(FULLY|ULTIMATE)', status, flags=re.M)
if ts_matches:
trusted = True
return good, valid, trusted, signkey, signtime
@staticmethod
def splitter(longstr: bytes, limit: int = 78) -> bytes:
splitstr = list()
first = True
while len(longstr) > limit:
at = limit
if first:
first = False
at -= 2
splitstr.append(longstr[:at])
longstr = longstr[at:]
splitstr.append(longstr)
return b' '.join(splitstr)
@staticmethod
def _dkim_canonicalize_header(hval: bytes) -> bytes:
# Handle MIME encoded-word syntax or other types of header encoding if
# present. The decode_header() function requires a str argument (not
# bytes) so we must decode our bytes first, this is easy as RFC2822 (sec
# 2.2) says header fields must be composed of US-ASCII characters. The
# resulting string is re-encoded to allow further processing.
if b'?q?' in hval:
hval = hval.decode('ascii', errors='ignore')
hval = str(email.header.make_header(email.header.decode_header(hval)))
hval = hval.encode('utf-8')
# We only do relaxed for headers
# o Unfold all header field continuation lines as described in
# [RFC5322]; in particular, lines with terminators embedded in
# continued header field values (that is, CRLF sequences followed by
# WSP) MUST be interpreted without the CRLF. Implementations MUST
# NOT remove the CRLF at the end of the header field value.
hval = re.sub(rb'[\r\n]', b'', hval)
# o Convert all sequences of one or more WSP characters to a single SP
# character. WSP characters here include those before and after a
# line folding boundary.
hval = re.sub(rb'\s+', b' ', hval)
# o Delete all WSP characters at the end of each unfolded header field
# value.
# o Delete any WSP characters remaining before and after the colon
# separating the header field name from the header field value. The
# colon separator MUST be retained.
hval = hval.strip() + b'\r\n'
return hval
class PatattMessage:
def __init__(self, msgdata: bytes):
self.headers = list()
self.body = b''
self.lf = b'\n'
self.signed = False
self.canon_headers = None
self.canon_body = None
self.canon_identity = None
self.sigs = None
self.load_from_bytes(msgdata)
def git_canonicalize(self):
if self.canon_body is not None:
return
# Generate a new payload using m and p and canonicalize with \r\n endings,
# trimming any excess blank lines ("simple" DKIM canonicalization).
m, p, i = PatattMessage._get_git_mailinfo(b''.join(self.headers) + self.lf + self.body)
self.canon_body = b''
for line in re.sub(rb'[\r\n]*$', b'', m + p).split(b'\n'):
self.canon_body += re.sub(rb'[\r\n]*$', b'', line) + b'\r\n'
idata = dict()
for line in re.sub(rb'[\r\n]*$', b'', i).split(b'\n'):
left, right = line.split(b':', 1)
idata[left.lower()] = right.strip()
# Theoretically, we should always see an "Email" line
self.canon_identity = idata.get(b'email', b'').decode()
# Now substituting headers returned by mailinfo
self.canon_headers = list()
for header in self.headers:
try:
left, right = header.split(b':', 1)
lleft = left.lower()
if lleft == b'from':
right = b' ' + idata.get(b'author', b'') + b' <' + idata.get(b'email', b'') + b'>'
elif lleft == b'subject':
right = b' ' + idata.get(b'subject', b'')
self.canon_headers.append(left + b':' + right)
except ValueError:
self.canon_headers.append(header)
def sign(self, algo: str, keyinfo: Union[str, bytes], identity: Optional[str], selector: Optional[str]) -> None:
# Remove any devsig headers
for header in list(self.headers):
if header.startswith(DEVSIG_HDR) or header.startswith(DEVKEY_HDR):
self.headers.remove(header)
self.git_canonicalize()
ds = DevsigHeader()
ds.set_headers(self.canon_headers, mode='sign')
ds.set_body(self.canon_body)
ds.set_field('l', str(len(self.canon_body)))
if not identity:
identity = self.canon_identity
ds.set_field('i', identity)
if selector:
ds.set_field('s', selector)
if algo not in ('ed25519', 'openpgp', 'openssh'):
raise SigningError('Unsupported algorithm: %s' % algo)
ds.set_field('a', '%s-sha256' % algo)
if algo in ('ed25519', 'openssh'):
# Set signing time for non-pgp sigs
ds.set_field('t', str(int(time.time())))
hv, pkinfo = ds.sign(keyinfo)
dshdr = email.header.make_header([(DEVSIG_HDR + b': ' + hv, 'us-ascii')], maxlinelen=78)
self.headers.append(dshdr.encode().encode() + self.lf)
# Make informational header about the key used
idata = [
b'i=%s' % identity.encode(),
b'a=%s' % algo.encode(),
]
if algo == 'openpgp':
idata.append(b'fpr=%s' % pkinfo)
elif algo == 'openssh':
idata.append(b'fpr=%s' % pkinfo)
else:
idata.append(b'pk=%s' % pkinfo)
dkhdr = email.header.make_header([(DEVKEY_HDR + b': ' + b'; '.join(idata), 'us-ascii')], maxlinelen=78)
self.headers.append(dkhdr.encode().encode() + self.lf)
def validate(self, identity: str, pkey: Union[bytes, str, None], trim_body: bool = False) -> str:
vds = None
for ds in self.sigs:
if ds.get_field('i', decode=True) == identity:
vds = ds
break
if vds is None:
raise ValidationError('No signatures matching identity %s' % identity)
self.git_canonicalize()
vds.set_headers(self.canon_headers, mode='validate')
if trim_body:
lfield = vds.get_field('l')
if lfield:
try:
maxlen = int(lfield)
vds.set_body(self.canon_body, maxlen=maxlen)
except ValueError:
vds.set_body(self.canon_body)
else:
vds.set_body(self.canon_body)
return vds.validate(pkey)
def as_bytes(self):
return b''.join(self.headers) + self.lf + self.body
def as_string(self, encoding='utf-8'):
return self.as_bytes().decode(encoding)
def load_from_bytes(self, msgdata: bytes) -> None:
# We use simplest parsing -- using Python's email module would be overkill
ldshn = DEVSIG_HDR.lower()
with BytesIO(msgdata) as fh:
while True:
line = fh.readline()
if not len(line):
break
if not len(line.strip()):
self.lf = line
self.body = fh.read()
break
# is it a wrapped header?
if line[0] in ("\x09", "\x20", 0x09, 0x20):
if not len(self.headers):
raise RuntimeError('Not a valid RFC2822 message')
# attach it to the previous header
self.headers[-1] += line
continue
# Is it a signature header?
if line.lower().startswith(ldshn):
self.signed = True
self.headers.append(line)
if not len(self.headers) or not len(self.body):
raise RuntimeError('Not a valid RFC2822 message')
def get_sigs(self) -> list:
if self.sigs is not None:
return self.sigs
ldshn = DEVSIG_HDR.lower()
self.sigs = list()
from_id = None
for header in self.headers:
try:
left, right = header.split(b':', 1)
hn = left.strip().lower()
hv = right
if hn == ldshn:
self.sigs.append(DevsigHeader(hv))
elif hn == b'from':
parts = email.utils.parseaddr(hv.decode().strip())
from_id = parts[1]
except ValueError:
raise RuntimeError('Error parsing headers')
if from_id:
for ds in self.sigs:
if 'i' not in ds.hdata:
ds.set_field('i', from_id)
return self.sigs
@staticmethod
def _get_git_mailinfo(payload: bytes) -> Tuple[bytes, bytes, bytes]:
with tempfile.TemporaryDirectory(suffix='.git-mailinfo') as td:
mf = os.path.join(td, 'm')
pf = os.path.join(td, 'p')
cmdargs = ['git', 'mailinfo', '--encoding=utf-8', '--no-scissors', mf, pf]
ecode, i, err = _run_command(cmdargs, stdin=payload)
if ecode > 0:
logger.debug('FAILED : Failed running git-mailinfo:')
logger.debug(err.decode())
raise RuntimeError('Failed to run git-mailinfo: %s' % err.decode())
with open(mf, 'rb') as mfh:
m = mfh.read()
with open(pf, 'rb') as pfh:
p = pfh.read()
return m, p, i
def get_data_dir():
if 'XDG_DATA_HOME' in os.environ:
datahome = os.environ['XDG_DATA_HOME']
else:
datahome = os.path.join(str(Path.home()), '.local', 'share')
datadir = os.path.join(datahome, 'patatt')
Path(datadir).mkdir(parents=True, exist_ok=True)
return datadir
def _run_command(cmdargs: list, stdin: bytes = None, env: Optional[dict] = None) -> Tuple[int, bytes, bytes]:
sp = subprocess.Popen(cmdargs, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
logger.debug('Running %s', ' '.join(cmdargs))
(output, error) = sp.communicate(input=stdin)
return sp.returncode, output, error
def git_run_command(gitdir: Optional[str], args: list, stdin: Optional[bytes] = None,
env: Optional[dict] = None) -> Tuple[int, bytes, bytes]:
if gitdir:
args = ['git', '--git-dir', gitdir, '--no-pager'] + args
else:
args = ['git', '--no-pager'] + args
return _run_command(args, stdin=stdin, env=env)
def get_config_from_git(regexp: str, section: Optional[str] = None, defaults: Optional[dict] = None,
multivals: Optional[list] = None):
if multivals is None:
multivals = list()
args = ['config', '-z', '--get-regexp', regexp]
ecode, out, err = git_run_command(None, args)
if defaults is None:
defaults = dict()
if not len(out):
return defaults
gitconfig = defaults
out = out.decode()
for line in out.split('\x00'):
if not line:
continue
key, value = line.split('\n', 1)
try:
chunks = key.split('.')
# Drop the starting part
chunks.pop(0)
cfgkey = chunks.pop(-1).lower()
if len(chunks):
if not section:
# Ignore it
continue
# We're in a subsection
sname = '.'.join(chunks)
if sname != section:
# Not our section
continue
elif section:
# We want config from a subsection specifically
continue
if cfgkey in multivals:
if cfgkey not in gitconfig:
gitconfig[cfgkey] = list()
gitconfig[cfgkey].append(value)
else:
gitconfig[cfgkey] = value
except ValueError:
logger.debug('Ignoring git config entry %s', line)
return gitconfig
def gpg_run_command(cmdargs: list, stdin: bytes = None) -> Tuple[int, bytes, bytes]:
set_bin_paths(None)
cmdargs = [GPGBIN, '--batch', '--no-auto-key-retrieve', '--no-auto-check-trustdb'] + cmdargs
return _run_command(cmdargs, stdin)
def sshk_run_command(cmdargs: list, stdin: bytes = None) -> Tuple[int, bytes, bytes]:
set_bin_paths(None)
cmdargs = [SSHKBIN] + cmdargs
return _run_command(cmdargs, stdin)
def get_git_toplevel(gitdir: str = None) -> str:
cmdargs = ['git']
if gitdir:
cmdargs += ['--git-dir', gitdir]
cmdargs += ['rev-parse', '--show-toplevel']
ecode, out, err = _run_command(cmdargs)
if ecode == 0:
return out.decode().strip()
return ''
def make_pkey_path(keytype: str, identity: str, selector: str) -> str:
chunks = identity.split('@', 1)
if len(chunks) != 2:
raise ValidationError('identity must include both local and domain parts')
local = chunks[0].lower()
domain = chunks[1].lower()
selector = selector.lower()
# urlencode all potentially untrusted bits to make sure nobody tries path-based badness
keypath = os.path.join(urllib.parse.quote_plus(keytype), urllib.parse.quote_plus(domain),
urllib.parse.quote_plus(local), urllib.parse.quote_plus(selector))
return keypath
def get_public_key(source: str, keytype: str, identity: str, selector: str) -> Tuple[bytes, str]:
keypath = make_pkey_path(keytype, identity, selector)
logger.debug('Looking for %s in %s', keypath, source)
# ref:refs/heads/someref:in-repo/path
if source.startswith('ref:'):
# split by :
parts = source.split(':', 4)
if len(parts) < 4:
raise ConfigurationError('Invalid ref, must have at least 3 colons: %s' % source)
gitrepo = parts[1]
gitref = parts[2]
gitsub = parts[3]
if not gitrepo:
gitrepo = get_git_toplevel()
if not gitrepo:
raise KeyError('Not in a git tree, so cannot use a ref:: source')
gitrepo = os.path.expanduser(gitrepo)
if gitrepo.find('$') >= 0:
gitrepo = os.path.expandvars(gitrepo)
if os.path.isdir(os.path.join(gitrepo, '.git')):
gittop = os.path.join(gitrepo, '.git')
else:
gittop = gitrepo
# it could omit the refspec, meaning "whatever the current ref"
# grab the key from a fully ref'ed path
subpath = os.path.join(gitsub, keypath)
if not gitref:
# What is our current ref?
cmdargs = ['symbolic-ref', 'HEAD']
ecode, out, err = git_run_command(gittop, cmdargs)
if ecode == 0:
gitref = out.decode().strip()
if not gitref:
raise KeyError('Could not figure out current ref in %s' % gittop)
keysrc = f'{gitref}:{subpath}'
cmdargs = ['show', keysrc]
ecode, out, err = git_run_command(gittop, cmdargs)
if ecode == 0:
# Handle one level of symlinks
if out.find(b'\n') < 0 < out.find(b'/'):
# Check this path as well
linktgt = os.path.normpath(os.path.join(os.path.dirname(subpath), out.decode()))
keysrc = f'{gitref}:{linktgt}'
cmdargs = ['show', keysrc]
ecode, out, err = git_run_command(gittop, cmdargs)
if ecode == 0:
logger.debug('KEYSRC : %s (symlinked)', keysrc)
return out, 'ref:%s:%s' % (gittop, keysrc)
logger.debug('KEYSRC : %s', keysrc)
return out, 'ref:%s:%s' % (gittop, keysrc)
# Does it exist on disk but hasn't been committed yet?
fullpath = os.path.join(gitrepo, subpath)
if os.path.exists(fullpath):
with open(fullpath, 'rb') as fh:
logger.debug('KEYSRC : %s', fullpath)
return fh.read(), fullpath
raise KeyError('Could not find %s in %s:%s' % (subpath, gittop, gitref))
# It's a disk path, then
# Expand ~ and env vars
source = os.path.expanduser(source)
if source.find('$') >= 0:
source = os.path.expandvars(source)
fullpath = os.path.join(source, keypath)
if os.path.exists(fullpath):
with open(fullpath, 'rb') as fh:
logger.debug('Loaded key from %s', fullpath)
return fh.read(), fullpath
raise KeyError('Could not find %s' % fullpath)
def _load_messages(cmdargs) -> dict:
import sys
if len(cmdargs.msgfile):
# Load all message from the files passed to make sure they all parse correctly
messages = dict()
for msgfile in cmdargs.msgfile:
with open(msgfile, 'rb') as fh:
messages[msgfile] = fh.read()
elif not sys.stdin.isatty():
messages = {'-': sys.stdin.buffer.read()}
else:
logger.critical('E: Pipe a message to sign or pass filenames with individual messages')
raise RuntimeError('Nothing to do')
return messages
def sign_message(msgdata: bytes, algo: str, keyinfo: Union[str, bytes],
identity: Optional[str], selector: Optional[str]) -> bytes:
pm = PatattMessage(msgdata)
pm.sign(algo, keyinfo, identity=identity, selector=selector)
return pm.as_bytes()
def set_bin_paths(config: Optional[dict]) -> None:
global GPGBIN, SSHKBIN
if GPGBIN is None:
gpgcfg = get_config_from_git(r'gpg\..*')
if config and config.get('gpg-bin'):
GPGBIN = config.get('gpg-bin')
elif gpgcfg.get('program'):
GPGBIN = gpgcfg.get('program')
else:
GPGBIN = 'gpg'
if SSHKBIN is None:
sshcfg = get_config_from_git(r'gpg\..*', section='ssh')
if config and config.get('ssh-keygen-bin'):
SSHKBIN = config.get('ssh-keygen-bin')
elif sshcfg.get('program'):
SSHKBIN = sshcfg.get('program')
else:
SSHKBIN = 'ssh-keygen'
def cmd_sign(cmdargs, config: dict) -> None:
# Do we have the signingkey defined?
usercfg = get_config_from_git(r'user\..*')
if not config.get('identity') and usercfg.get('email'):
# Use user.email
config['identity'] = usercfg.get('email')
if not config.get('signingkey'):
if usercfg.get('signingkey'):
logger.info('N: Using pgp key %s defined by user.signingkey', usercfg.get('signingkey'))
logger.info('N: Override by setting patatt.signingkey')
config['signingkey'] = 'openpgp:%s' % usercfg.get('signingkey')
else:
logger.critical('E: patatt.signingkey is not set')
logger.critical('E: Perhaps you need to run genkey first?')
sys.exit(1)
try:
messages = _load_messages(cmdargs)
except IOError as ex:
logger.critical('E: %s', ex)
sys.exit(1)
sk = config.get('signingkey')
if sk.startswith('ed25519:'):
algo = 'ed25519'
identifier = sk[8:]
keysrc = None
if identifier.startswith('/') and os.path.exists(identifier):
keysrc = identifier
else:
# datadir/private/%s.key
ddir = get_data_dir()
skey = os.path.join(ddir, 'private', '%s.key' % identifier)
if os.path.exists(skey):
keysrc = skey
else:
# finally, try .git/%s.key
gtdir = get_git_toplevel()
if gtdir:
skey = os.path.join(gtdir, '.git', '%s.key' % identifier)
if os.path.exists(skey):
keysrc = skey
if not keysrc:
logger.critical('E: Could not find the key matching %s', identifier)
sys.exit(1)
logger.info('N: Using ed25519 key: %s', keysrc)
with open(keysrc, 'r') as fh:
keydata = fh.read()
elif sk.startswith('openpgp:'):
algo = 'openpgp'
keydata = sk[8:]
elif sk.startswith('openssh:'):
algo = 'openssh'
keydata = sk[8:]
else:
logger.critical('E: Unknown key type: %s', sk)
sys.exit(1)
for fn, msgdata in messages.items():
try:
pm = PatattMessage(msgdata)
pm.sign(algo, keydata, identity=config.get('identity'), selector=config.get('selector'))
logger.debug('--- SIGNED MESSAGE STARTS ---')
logger.debug(pm.as_string())
if fn == '-':
sys.stdout.buffer.write(pm.as_bytes())
else:
with open(fn, 'wb') as fh:
fh.write(pm.as_bytes())
logger.critical('SIGN | %s', os.path.basename(fn))
except SigningError as ex:
logger.critical('E: %s', ex)
sys.exit(1)
except RuntimeError as ex:
logger.critical('E: %s: %s' % (fn, ex))
sys.exit(1)
def validate_message(msgdata: bytes, sources: list, trim_body: bool = False) -> list:
attestations = list()
pm = PatattMessage(msgdata)
if not pm.signed:
logger.debug('message is not signed')
attestations.append((RES_NOSIG, None, None, None, None, ['no signatures found']))
return attestations
# Find all identities for which we have public keys
for ds in pm.get_sigs():
errors = list()
a = ds.get_field('a', decode=True)
i = ds.get_field('i', decode=True)
s = ds.get_field('s', decode=True)
t = ds.get_field('t', decode=True)
if not s:
s = 'default'
if a.startswith('ed25519'):
algo = 'ed25519'
elif a.startswith('openpgp'):
algo = 'openpgp'
elif a.startswith('openssh'):
algo = 'openssh'
else:
errors.append('%s/%s Unknown algorigthm: %s' % (i, s, a))
attestations.append((RES_ERROR, i, t, None, a, errors))
continue
pkey = keysrc = None
for source in sources:
try:
pkey, keysrc = get_public_key(source, algo, i, s)
break
except KeyError:
pass
if not pkey and algo in ('ed25519', 'openssh'):
errors.append('%s/%s no matching %s key found' % (i, s, algo))
attestations.append((RES_NOKEY, i, t, None, algo, errors))
continue
try:
signkey, signtime = pm.validate(i, pkey, trim_body=trim_body)
if keysrc is None:
# Default keyring used
keysrc = '(default keyring)/%s' % signkey
attestations.append((RES_VALID, i, signtime, keysrc, algo, errors))
except NoKeyError:
# Not in default keyring
errors.append('%s/%s no matching openpgp key found' % (i, s))
attestations.append((RES_NOKEY, i, t, None, algo, errors))
except ValidationError:
if keysrc is None:
errors.append('failed to validate using default keyring')
else:
errors.append('failed to validate using %s' % keysrc)
attestations.append((RES_BADSIG, i, t, keysrc, algo, errors))
return attestations
def cmd_validate(cmdargs, config: dict):
import mailbox
if len(cmdargs.msgfile) == 1:
# Try to open as an mbox file
try:
mbox = mailbox.mbox(cmdargs.msgfile[0])
except IOError as ex:
logger.critical('E: %s', ex)
sys.exit(1)
messages = dict()
for msg in mbox:
subject = msg.get('Subject', 'No subject')
messages[subject] = msg.as_bytes()
else:
try:
messages = _load_messages(cmdargs)
except IOError as ex:
logger.critical('E: %s', ex)
sys.exit(1)
ddir = get_data_dir()
pdir = os.path.join(ddir, 'public')
sources = config.get('keyringsrc')
if pdir not in sources:
sources.append(pdir)
if config.get('trimbody', 'no') == 'yes':
trim_body = True
else:
trim_body = False
highest_err = 0
for fn, msgdata in messages.items():
try:
attestations = validate_message(msgdata, sources, trim_body=trim_body)
for result, identity, signtime, keysrc, algo, errors in attestations:
if result > highest_err:
highest_err = result
if result == RES_VALID:
logger.critical(' PASS | %s, %s', identity, fn)
if keysrc:
logger.info(' | key: %s', keysrc)
else:
logger.info(' | key: default GnuPG keyring')
elif result <= RES_NOSIG:
logger.critical(' NOSIG | %s', fn)
for error in errors:
logger.critical(' | %s', error)
elif result <= RES_NOKEY:
logger.critical(' NOKEY | %s, %s', identity, fn)
for error in errors:
logger.critical(' | %s', error)
elif result <= RES_ERROR:
logger.critical(' ERROR | %s, %s', identity, fn)
for error in errors:
logger.critical(' | %s', error)
else:
logger.critical('BADSIG | %s, %s', identity, fn)
for error in errors:
logger.critical(' | %s', error)
except RuntimeError as ex:
highest_err = RES_ERROR
logger.critical(' ERROR | err: %s | %s', ex, fn)
sys.exit(highest_err)
def cmd_genkey(cmdargs, config: dict) -> None:
try:
from nacl.signing import SigningKey
except ModuleNotFoundError:
raise RuntimeError('This operation requires PyNaCl libraries')
# Do we have the signingkey defined?
usercfg = get_config_from_git(r'user\..*')
if not config.get('identity'):
if not usercfg.get('email'):
logger.critical('This operation requires user.email to be set')
sys.exit(1)
# Use user.email
config['identity'] = usercfg.get('email')
identifier = cmdargs.keyname
if not identifier:
identifier = datetime.datetime.today().strftime('%Y%m%d')
ddir = get_data_dir()
sdir = os.path.join(ddir, 'private')
pdir = os.path.join(ddir, 'public')
if not os.path.exists(sdir):
os.mkdir(sdir, mode=0o0700)
if not os.path.exists(pdir):
os.mkdir(pdir, mode=0o0755)
skey = os.path.join(sdir, '%s.key' % identifier)
pkey = os.path.join(pdir, '%s.pub' % identifier)
# Do we have a key with this identifier already present?
if os.path.exists(skey) and not cmdargs.force:
logger.critical('Key already exists: %s', skey)
logger.critical('Use a different -n or pass -f to overwrite it')
raise RuntimeError('Key already exists')
logger.critical('Generating a new ed25519 keypair')
newkey = SigningKey.generate()
# Make sure we write it as 0600
def priv_opener(path, flags):
return os.open(path, flags, 0o0600)
with open(skey, 'wb', opener=priv_opener) as fh:
fh.write(base64.b64encode(bytes(newkey)))
logger.critical('Wrote: %s', skey)
with open(pkey, 'wb') as fh:
fh.write(base64.b64encode(newkey.verify_key.encode()))
logger.critical('Wrote: %s', pkey)
# Also copy it into our local keyring
spkey = os.path.join(pdir, make_pkey_path('ed25519', config.get('identity'), identifier))
Path(os.path.dirname(spkey)).mkdir(parents=True, exist_ok=True)
with open(spkey, 'wb') as fh:
fh.write(base64.b64encode(newkey.verify_key.encode()))
logger.critical('Wrote: %s', spkey)
dpkey = os.path.join(pdir, make_pkey_path('ed25519', config.get('identity'), 'default'))
if not os.path.exists(dpkey):
# symlink our new key to be the default
os.symlink(identifier, dpkey)
logger.critical('Add the following to your .git/config (or global ~/.gitconfig):')
logger.critical('---')
if cmdargs.section:
logger.critical('[patatt "%s"]', cmdargs.section)
else:
logger.critical('[patatt]')
logger.critical(' signingkey = ed25519:%s', identifier)
logger.critical(' selector = %s', identifier)
logger.critical('---')
logger.critical('Next, communicate the contents of the following file to the')
logger.critical('repository keyring maintainers for inclusion into the project:')
logger.critical(pkey)
def cmd_install_hook(cmdargs, config: dict): # noqa
gitrepo = get_git_toplevel()
if not gitrepo:
logger.critical('Not in a git tree, cannot install hook')
sys.exit(1)
hookfile = os.path.join(gitrepo, '.git', 'hooks', 'sendemail-validate')
if os.path.exists(hookfile):
logger.critical('Hook already exists: %s', hookfile)
sys.exit(1)
Path(os.path.join(gitrepo, '.git', 'hooks')).mkdir(parents=True, exist_ok=True)
with open(hookfile, 'w') as fh:
fh.write('#!/bin/sh\n')
fh.write('# installed by patatt install-hook\n')
fh.write('patatt sign --hook "${1}"\n')
os.chmod(hookfile, 0o755)
logger.critical('Hook installed as %s', hookfile)
def command() -> None:
import argparse
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(
prog='patatt',
description='Cryptographically attest patches before sending out',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('-v', '--verbose', action='store_true', default=False,
help='Be a bit more verbose')
parser.add_argument('-d', '--debug', action='store_true', default=False,
help='Show debugging output')
parser.add_argument('-s', '--section', dest='section', default=None,
help='Use config section [patatt "sectionname"]')
parser.add_argument('--version', action='version', version=__VERSION__)
subparsers = parser.add_subparsers(help='sub-command help', dest='subcmd')
sp_sign = subparsers.add_parser('sign', help='Cryptographically attest an RFC2822 message')
sp_sign.add_argument('--hook', dest='hookmode', action='store_true', default=False,
help='Git hook mode')
sp_sign.add_argument('msgfile', nargs='*', help='RFC2822 message files to sign')
sp_sign.set_defaults(func=cmd_sign)
sp_val = subparsers.add_parser('validate', help='Validate a devsig-signed message')
sp_val.add_argument('msgfile', nargs='*', help='Individual signed message files to validate or an mbox')
sp_val.set_defaults(func=cmd_validate)
sp_gen = subparsers.add_parser('genkey', help='Generate a new ed25519 keypair')
sp_gen.add_argument('-n', '--keyname', default=None,
help='Name to use for the key, e.g. "workstation", or "default"')
sp_gen.add_argument('-f', '--force', action='store_true', default=False,
help='Overwrite any existing keys, if found')
sp_gen.set_defaults(func=cmd_genkey)
sp_install = subparsers.add_parser('install-hook', help='Install sendmail-validate hook into the current repo')
sp_install.set_defaults(func=cmd_install_hook)
_args = parser.parse_args()
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
formatter = logging.Formatter('%(message)s')
try:
if _args.hookmode:
formatter = logging.Formatter('patatt: %(message)s')
except AttributeError:
pass
ch.setFormatter(formatter)
if _args.verbose:
ch.setLevel(logging.INFO)
elif _args.debug:
ch.setLevel(logging.DEBUG)
else:
ch.setLevel(logging.CRITICAL)
logger.addHandler(ch)
config = get_config_from_git(r'patatt\..*', section=_args.section, multivals=['keyringsrc'])
# Append some extra keyring locations
if 'keyringsrc' not in config:
config['keyringsrc'] = list()
config['keyringsrc'] += ['ref:::.keys', 'ref:::.local-keys', 'ref::refs/meta/keyring:']
set_bin_paths(config)
logger.debug('config: %s', config)
if 'func' not in _args:
parser.print_help()
sys.exit(1)
try:
_args.func(_args, config)
except RuntimeError:
sys.exit(1)
if __name__ == '__main__':
command()
|
py | 7dfbe15d8c7ec068e2a38b7b6c5baaa5d8641d5f | from agent import Agent
from utilities import *
from transition import Board
from tools import initial_state
def setup_game():
print("Welcome to Breakthrough!")
print("This game has 4 different intelligent agents you can play with.")
print("Please choose the opponent agents")
agents = { 1: 'Evasive',
2: 'Conquerer',
3: 'House Lannister',
4: 'House Stark'
}
for i in range(1,5):
print("{0} : {1}".format(i, agents[i]))
try:
print("Enter two numbers between 1 and 4, separated by a space:")
agent1, agent2 = map(int, input().split())
agent1 = int(agent1)
agent2 = int(agent2)
print("You have chosen {0} and {1} to play".format(agents[agent1],
agents[agent2]))
utility_functions = {1: evasive,
2: conqueror,
3: house_lannister,
4: house_stark
}
print("\nPlease decide how your board should look like.")
print("Enter # of rows, # of columns and # of rows with players")
print("separated by spaces")
row, col, p_rows = map(int, input().split())
if p_rows > row:
raise ValueError("rows with players must be less than # of rows")
list2d = initial_state(row, col, p_rows)
print("This is how the board looks like:")
print("\n######################################################\n")
for row in list2d:
for column in row:
print(column, end= " ")
print("\n")
print("######################################################\n")
except (KeyError,ValueError) as e:
print("KeyError or ValueError occured: ", e)
print("Please try again.")
return list2d, utility_functions[agent1], utility_functions[agent2]
def run_game(list2d, agent1, agent2):
"""This is where the game runs.
The agents interact with the Board here
The Board make sure to enforce the game rules."""
Smith = 'X'
John = 'O'
starting_turn = Smith
agent_smith = Agent(list2d, starting_turn, agent1, Smith)
agent_john = Agent(list2d, starting_turn, agent2, John)
# initialize the Board
board = Board(list2d, John)
print("Player {0} has the first turn".format(starting_turn))
moves = 0
while True:
moves = moves + 1
# retrieve the current state
curr = board.get_current_state()
# show the current state
board.display_state()
whose_turn = board.get_turn()
next_move = None
if whose_turn == Smith:
print("{0}'s turn now.".format(Smith))
next_move = agent_smith.next_move(curr, Smith)
elif whose_turn == John:
next_move = agent_john.next_move(curr, John)
print("{0}'s turn now.".format(John))
else:
print("Something wrong with the board")
move_dest, direction = next_move
# Perform the move on the board
# Try again if it's wrong turn
move_success = board.move(move_dest, direction)
if move_success == True:
# Keep playing
board.display_state()
pass
else:
print("Try a valid move.")
# print(board.terminal_state())
if board.terminal_state() != None:
print("This game Ended. To play again, run `game.py`")
break
# show the new state
print("Total number of moves made:", moves)
if __name__ == '__main__':
list2d, player1, player2 = setup_game()
run_game(list2d, player1, player2)
|
py | 7dfbe1aa50a7baba8ba8c816b1652fff560dc6fa | import MySQLdb
import time
con = MySQLdb.connect( # O nome da tabela é 'mensagens_contatos'
host="",
user="",
password="",
port = 0000,
db=""
)
print(con)
# Parametros do cursor estão definidos para retornar um dict
c = con.cursor(MySQLdb.cursors.DictCursor)
# Função que devolve dados determinados de uma tabela
def select(fields, tables, where=None):
global c
query = "SELECT " + fields + " FROM " + tables + ' ORDER BY id_usuarios_report ASC' # Detalhe importante: devolve em ordem ascendente.
if (where):
query += " WHERE " + where
c.execute(query)
return c.fetchall()
# Determina quantidade inicial de reports, irá servir de comparador para saber se existe report um novo
aux = len(select("id_usuarios_report","mensagens_contatos"))
# Realiza a escuta por uma entrada de report nova e, caso detecte uma, quebra o loop para entrar no BOT.
print("Escutando...")
while True:
cont = 0
db_info=select("id_usuarios_report, email_usuario, problema_reportado, status, mensagem","mensagens_contatos")
time.sleep(1)
for i in db_info:
cont += 1
if cont > aux:
print("Nova entrada detectada... Iniciando Bot")
db_userinfo = db_info[cont-1]
aux = cont
con.commit()
break
con.commit() |
py | 7dfbe2357517752315f9220303c95b3b9a677904 | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 基础功能
Case Name : 双向订阅内容不同
Description :
1.在两个集群创建表
2.创建发布端订阅端
3.修改表数据
4.查询是否同步
5.修改数据
6.修改表数据
7.修改集群A发布端
8.修改数据
9.查看数据是否更新
10.修改数据
11.查看数据是否更新
Expect :
1.成功
2.成功
3.成功
4.集群B:tb_pubsub_case052_1更新(1, '1', 'tb_pubsub_case050_1', 'equal'),其余未更新
5.成功
6.集群A:tb_pubsub_case052_2更新(2, '2', 'tb_pubsub_case052_22', 'equal2'),
其余未更新"
7.成功
8.成功
9.均更新
10.成功
11.tb_pubsub_case052_2更新,其余未更新
History :
"""
import unittest
import os
from yat.test import macro
from yat.test import Node
from testcase.utils.Logger import Logger
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Common import Common
from testcase.utils.Constant import Constant
Primary_SH = CommonSH('PrimaryDbUser')
@unittest.skipIf(3 != Primary_SH.get_node_num(), '非1+2环境不执行')
class Pubsubclass(unittest.TestCase):
def setUp(self):
self.log = Logger()
self.log.info("-----------this is setup-----------")
self.log.info("-Opengauss_Function_Pub_Sub_Case0052 start-")
self.pri_userdb_pub = Node(node='PrimaryDbUser')
self.pri_userdb_sub = Node(node='remote1_PrimaryDbUser')
self.constant = Constant()
self.commsh_pub = CommonSH('PrimaryDbUser')
self.commsh_sub = CommonSH('remote1_PrimaryDbUser')
self.com_pub = Common()
self.com_sub = Common('remote1_PrimaryDbUser')
self.tb_name1 = 'tb_pubsub_case052_1'
self.tb_name2 = 'tb_pubsub_case052_2'
self.subname1 = "sub_case052_1"
self.pubname1 = "pub_case052_1"
self.parent_path_pub = os.path.dirname(macro.DB_INSTANCE_PATH)
self.parent_path_sub = os.path.dirname(macro.DB_INSTANCE_PATH_REMOTE1)
self.pub_port = str(int(self.pri_userdb_pub.db_port) + 1)
self.sub_port = str(int(self.pri_userdb_sub.db_port) + 1)
self.wal_level_pub = self.com_pub.show_param("wal_level")
self.wal_level_sub = self.com_sub.show_param("wal_level",
macro.DB_ENV_PATH_REMOTE1)
self.user_param_pub = f'-U {self.pri_userdb_pub.db_user} ' \
f'-W {self.pri_userdb_pub.db_password}'
self.user_param_sub = f'-U {self.pri_userdb_sub.db_user} ' \
f'-W {self.pri_userdb_sub.db_password}'
cmd = f"cp " \
f"{os.path.join(macro.DB_INSTANCE_PATH, 'pg_hba.conf')} " \
f"{os.path.join(self.parent_path_pub, 'pg_hba.conf')};"
self.log.info(cmd)
result = self.pri_userdb_pub.sh(cmd).result()
self.log.info(result)
cmd = f"cp " \
f"{os.path.join(macro.DB_INSTANCE_PATH_REMOTE1, 'pg_hba.conf')}" \
f" {os.path.join(self.parent_path_sub, 'pg_hba.conf')};"
self.log.info(cmd)
result = self.pri_userdb_sub.sh(cmd).result()
self.log.info(result)
def test_pubsub(self):
text = '--step:预置条件,修改pg_hba expect:成功'
self.log.info(text)
guc_res = self.commsh_pub.execute_gsguc(
'reload', self.constant.GSGUC_SUCCESS_MSG, '',
'all', False, False, '',
f'host replication {self.pri_userdb_sub.db_user} '
f'{self.pri_userdb_sub.db_host}/32 sha256')
self.log.info(guc_res)
self.assertTrue(guc_res, '执行失败:' + text)
result = self.commsh_pub.execute_gsguc(
'set', self.constant.GSGUC_SUCCESS_MSG, 'wal_level=logical')
self.assertTrue(result, '执行失败:' + text)
result = self.commsh_pub.restart_db_cluster(True)
flg = self.constant.START_SUCCESS_MSG in result or 'Degrade' in result
self.assertTrue(flg, '执行失败:' + text)
guc_res = self.commsh_sub.execute_gsguc(
'reload', self.constant.GSGUC_SUCCESS_MSG, '',
'all', False, False, macro.DB_INSTANCE_PATH_REMOTE1,
f'host replication {self.pri_userdb_pub.db_user} '
f'{self.pri_userdb_pub.db_host}/32 sha256',
macro.DB_ENV_PATH_REMOTE1)
self.log.info(guc_res)
self.assertTrue(guc_res, '执行失败:' + text)
result = self.commsh_sub.execute_gsguc(
'set', self.constant.GSGUC_SUCCESS_MSG,
'wal_level=logical',
dn_path=macro.DB_INSTANCE_PATH_REMOTE1,
env_path=macro.DB_ENV_PATH_REMOTE1)
self.assertTrue(result, '执行失败:' + text)
result = self.commsh_sub.restart_db_cluster(
True, env_path=macro.DB_ENV_PATH_REMOTE1)
flg = self.constant.START_SUCCESS_MSG in result or 'Degrade' in result
self.assertTrue(flg, '执行失败:' + text)
text = '--step1:两个集群均创建表 expect:成功--'
self.log.info(text)
sql = f"CREATE TABLE {self.tb_name1}(id NUMBER(7) CONSTRAINT " \
f"s_longtext_id_nn NOT NULL, use_filename " \
f"VARCHAR2(20) primary key, filename VARCHAR2(255), " \
f"text VARCHAR2(2000) );" \
f"CREATE TABLE {self.tb_name2}" \
f"(like {self.tb_name1} including all);"
result = self.commsh_pub.execut_db_sql(
sql, sql_type=self.user_param_pub)
self.log.info(result)
self.assertEqual(result.count(self.constant.TABLE_CREATE_SUCCESS),
4, '执行失败:' + text)
result = self.commsh_sub.execut_db_sql(sql, self.user_param_sub, None,
macro.DB_ENV_PATH_REMOTE1)
self.log.info(result)
self.assertEqual(result.count(self.constant.TABLE_CREATE_SUCCESS),
4, '执行失败:' + text)
text = '--step2:创建发布端订阅端 expect:成功--'
self.log.info(text)
sql = f"CREATE PUBLICATION {self.pubname1} for table {self.tb_name1};"
result = self.commsh_pub.execut_db_sql(sql,
sql_type=self.user_param_pub)
self.log.info(result)
self.assertIn(self.constant.create_pub_succ_msg, result,
'执行失败:' + text)
self.assertNotIn(self.constant.SQL_WRONG_MSG[1], result,
'执行失败:' + text)
result = self.commsh_sub.execute_generate(
macro.COMMON_PASSWD, env_path=macro.DB_ENV_PATH_REMOTE1)
self.assertIn('', result, '执行失败:' + text)
sql = f"CREATE PUBLICATION {self.pubname1} " \
f"for table {self.tb_name2};" \
f"CREATE SUBSCRIPTION {self.subname1} CONNECTION " \
f"'host={self.pri_userdb_pub.db_host} " \
f"port={self.pub_port} " \
f"user={self.pri_userdb_pub.db_user} " \
f"dbname={self.pri_userdb_pub.db_name} " \
f"password={self.pri_userdb_pub.ssh_password}' " \
f"PUBLICATION {self.pubname1};"
result = self.commsh_sub.execut_db_sql(sql,
self.user_param_sub, None,
macro.DB_ENV_PATH_REMOTE1)
self.log.info(result)
self.assertIn(self.constant.create_sub_succ_msg,
result, '执行失败:' + text)
self.assertIn(self.constant.create_pub_succ_msg, result,
'执行失败:' + text)
self.assertNotIn(self.constant.SQL_WRONG_MSG[1], result,
'执行失败:' + text)
result = self.commsh_pub.execute_generate(macro.COMMON_PASSWD)
self.assertIn('', result, '执行失败:' + text)
sql = f"CREATE SUBSCRIPTION {self.subname1} CONNECTION " \
f"'host={self.pri_userdb_sub.db_host} " \
f"port={self.sub_port} " \
f"user={self.pri_userdb_sub.db_user} " \
f"dbname={self.pri_userdb_sub.db_name} " \
f"password={self.pri_userdb_sub.ssh_password}' " \
f"PUBLICATION {self.pubname1};"
result = self.commsh_pub.execut_db_sql(sql,
sql_type=self.user_param_pub)
self.log.info(result)
self.assertNotIn(self.constant.SQL_WRONG_MSG[1], result,
'执行失败:' + text)
self.assertIn(self.constant.create_sub_succ_msg,
result, '执行失败:' + text)
text = '--step3:修改表数据 expect:成功--'
self.log.info(text)
sql = f"insert into {self.tb_name1} values(1, " \
f"'1', '{self.tb_name1}', 'equal');" \
f"insert into {self.tb_name2} values(1, " \
f"'1', '{self.tb_name2}', 'equal');"
result = self.commsh_pub.execut_db_sql(sql,
sql_type=self.user_param_pub)
self.log.info(result)
self.assertEqual(result.count(self.constant.INSERT_SUCCESS_MSG),
2, '执行失败' + text)
text = "--step4:查询是否同步 expect:集群B:tb_pubsub_case052_1" \
"更新(1, '1', 'tb_pubsub_case052_1', 'equal'),其余未更新--"
self.log.info(text)
sql_select = f"select * from {self.tb_name1};" \
f"select * from {self.tb_name2};"
result = self.commsh_pub.execut_db_sql(sql_select,
sql_type=self.user_param_pub)
self.log.info("集群A查询结果:" + result)
result = self.commsh_sub.execut_db_sql(sql_select,
self.user_param_sub,
None,
macro.DB_ENV_PATH_REMOTE1)
self.log.info("集群B查询结果:" + result)
self.assertIn('0 rows', result, '执行失败' + text)
self.assertIn('1 row', result, '执行失败' + text)
self.assertIn(f'1 | 1 | {self.tb_name1} | equal',
result, '执行失败' + text)
text = '--step5:修改表数据expect:成功--'
self.log.info(text)
sql = f"insert into {self.tb_name1} values(2, " \
f"'2', '{self.tb_name1}2', 'equal2');" \
f"insert into {self.tb_name2} values(2, " \
f"'2', '{self.tb_name2}2', 'equal2');"
result = self.commsh_sub.execut_db_sql(sql,
self.user_param_sub,
None,
macro.DB_ENV_PATH_REMOTE1)
self.log.info(result)
self.assertEqual(result.count(self.constant.INSERT_SUCCESS_MSG),
2, '执行失败' + text)
text = "--step6:查询是否同步 expect:集群A:tb_pubsub_case052_2更新(2, " \
"'2', 'tb_pubsub_case052_22', 'equal2'),其余未更新--"
self.log.info(text)
result = self.commsh_sub.execut_db_sql(sql_select,
self.user_param_sub,
None,
macro.DB_ENV_PATH_REMOTE1)
self.log.info("集群B查询结果:" + result)
result = self.commsh_pub.execut_db_sql(sql_select,
sql_type=self.user_param_pub)
self.log.info("集群A查询结果:" + result)
self.assertEqual(result.count('1 row'), 1, '执行失败' + text)
self.assertEqual(result.count('2 rows'), 1, '执行失败' + text)
self.assertIn(f'2 | 2 | {self.tb_name2}2 | equal2',
result.splitlines()[-2], '执行失败' + text)
self.assertNotIn(f'2 | 2 | {self.tb_name1}2 | equal2',
result, '执行失败' + text)
text = '--step7:修改集群A发布端 expect:成功--'
self.log.info(text)
sql = f"alter PUBLICATION {self.pubname1} add table {self.tb_name2};"
result = self.commsh_pub.execut_db_sql(sql,
sql_type=self.user_param_pub)
self.log.info(result)
self.assertIn(self.constant.alter_pub_succ_msg,
result, '执行失败' + text)
text = '--step8:修改表数据 expect:成功--'
self.log.info(text)
sql = f"insert into {self.tb_name1} values(3, " \
f"'3', '{self.tb_name1}3', 'equal3');" \
f"insert into {self.tb_name2} values(3, " \
f"'3', '{self.tb_name2}3', 'equal3');"
result = self.commsh_pub.execut_db_sql(sql,
sql_type=self.user_param_pub)
self.log.info(result)
self.assertEqual(result.count(self.constant.INSERT_SUCCESS_MSG),
2, '执行失败' + text)
text = "--step9:查询是否同步 expect:均更新--"
self.log.info(text)
result = self.commsh_pub.execut_db_sql(sql_select,
sql_type=self.user_param_pub)
self.log.info("集群A查询结果:" + result)
result = self.commsh_sub.execut_db_sql(sql_select,
self.user_param_sub,
None,
macro.DB_ENV_PATH_REMOTE1)
self.log.info("集群B查询结果:" + result)
self.assertEqual(result.count('2 rows'), 1, '执行失败' + text)
self.assertEqual(result.count('3 rows'), 1, '执行失败' + text)
self.assertIn(f'3 | 3 | {self.tb_name1}3 | equal3',
result.splitlines()[4], '执行失败' + text)
self.assertIn(f'3 | 3 | {self.tb_name2}3 | equal3',
result.splitlines()[-2], '执行失败' + text)
text = '--step10:修改表数据expect:成功--'
self.log.info(text)
sql = f"insert into {self.tb_name1} values(4, " \
f"'4', '{self.tb_name1}4', 'equal4');" \
f"insert into {self.tb_name2} values(4, " \
f"'4', '{self.tb_name2}4', 'equal4');"
result = self.commsh_sub.execut_db_sql(sql,
self.user_param_sub,
None,
macro.DB_ENV_PATH_REMOTE1)
self.log.info(result)
self.assertEqual(result.count(self.constant.INSERT_SUCCESS_MSG),
2, '执行失败' + text)
text = "--step11:查询是否同步 expect:tb_pubsub_case052_2更新,其余未更新--"
self.log.info(text)
result = self.commsh_sub.execut_db_sql(sql_select,
self.user_param_sub,
None,
macro.DB_ENV_PATH_REMOTE1)
self.log.info("集群B查询结果:" + result)
result = self.commsh_pub.execut_db_sql(sql_select,
sql_type=self.user_param_pub)
self.log.info("集群A查询结果:" + result)
self.assertEqual(result.count('2 rows'), 1, '执行失败' + text)
self.assertEqual(result.count('4 rows'), 1, '执行失败' + text)
self.assertIn(f'4 | 4 | {self.tb_name2}4 | equal4',
result.splitlines()[-2], '执行失败' + text)
def tearDown(self):
self.log.info('------------this is tearDown-------------')
text = '--清理环境--'
self.log.info(text)
sql = f"DROP PUBLICATION if exists {self.pubname1};" \
f"DROP SUBSCRIPTION {self.subname1};"
drop_pub_result = self.commsh_pub.execut_db_sql(
sql, sql_type=self.user_param_pub)
self.log.info(drop_pub_result)
drop_sub_result = self.commsh_sub.execut_db_sql(
sql, self.user_param_sub, None, macro.DB_ENV_PATH_REMOTE1)
self.log.info(drop_sub_result)
sql = f"DROP table if exists {self.tb_name2};" \
f"DROP table if exists {self.tb_name1};"
result = self.commsh_sub.execut_db_sql(sql, self.user_param_sub, None,
macro.DB_ENV_PATH_REMOTE1)
self.log.info(result)
result = self.commsh_pub.execut_db_sql(sql,
sql_type=self.user_param_pub)
self.log.info(result)
cmd = f"mv " \
f"{os.path.join(self.parent_path_pub, 'pg_hba.conf')} "\
f"{os.path.join(macro.DB_INSTANCE_PATH, 'pg_hba.conf')} "
self.log.info(cmd)
result = self.pri_userdb_pub.sh(cmd).result()
self.log.info(result)
cmd = f"mv " \
f"{os.path.join(self.parent_path_sub, 'pg_hba.conf')} "\
f"{os.path.join(macro.DB_INSTANCE_PATH_REMOTE1, 'pg_hba.conf')} "
self.log.info(cmd)
result = self.pri_userdb_sub.sh(cmd).result()
self.log.info(result)
result_guc = self.commsh_pub.execute_gsguc(
'set', self.constant.GSGUC_SUCCESS_MSG,
f'wal_level={self.wal_level_pub}')
result_guc1 = self.commsh_sub.execute_gsguc(
'set', self.constant.GSGUC_SUCCESS_MSG,
f'wal_level={self.wal_level_sub}',
dn_path=macro.DB_INSTANCE_PATH_REMOTE1,
env_path=macro.DB_ENV_PATH_REMOTE1)
self.commsh_pub.restart_db_cluster(True)
self.commsh_sub.restart_db_cluster(True, macro.DB_ENV_PATH_REMOTE1)
self.assertTrue(result_guc, '执行失败:' + text)
self.assertTrue(result_guc1, '执行失败:' + text)
self.assertIn(self.constant.drop_pub_succ_msg, drop_pub_result,
'执行失败' + text)
self.assertIn(self.constant.drop_sub_succ_msg, drop_sub_result,
'执行失败' + text)
self.log.info("-Opengauss_Function_Pub_Sub_Case0052 end-")
|
py | 7dfbe266738edfdaddc15197010131d19500d3e3 | from abc import ABC, abstractmethod, abstractproperty
from cryptography.hazmat.primitives.keywrap import aes_key_wrap, aes_key_unwrap
class WrappingKey(ABC):
@abstractmethod
def wrap_data_key(self, data_key: bytes) -> bytes:
pass
@abstractmethod
def unwrap_data_key(self, wrapped_data_key: bytes) -> bytes:
pass
@abstractproperty
def algorithm_name(self) -> str:
pass
class AesWrappingKey(WrappingKey):
def __init__(
self,
wrapping_key: bytes,
):
self._wrapping_key = wrapping_key
def wrap_data_key(self, data_key: bytes) -> bytes:
return aes_key_wrap(
wrapping_key=self._wrapping_key,
key_to_wrap=data_key,
)
def unwrap_data_key(self, wrapped_data_key: bytes) -> bytes:
return aes_key_unwrap(
wrapping_key=self._wrapping_key,
wrapped_key=wrapped_data_key,
)
@property
def algorithm_name(self) -> str:
return "AESWrap"
|
py | 7dfbe2c4f30d18095e7a95c0bf2ce59f7cadff2f | # reddit.py
import re
from bobbit.utils import shorten_url
# Metadata
NAME = 'reddit'
ENABLE = True
PATTERN = r'^!reddit (?P<subreddit>[^\s]*)\s*(?P<query>.*)$'
USAGE = '''Usage: !reddit <subreddit> [<query>]
Given a subreddit, this returns an article from the subreddit that match the
query.
Example:
> !reddit linuxmasterrace
'''
TITLE_PATTERN = r'.*(?P<url>http[^\s]+reddit.com/[^\s]+).*'
# Constants
REDDIT_TEMPLATE = 'http://reddit.com/r/{subreddit}/.json'
# Command
async def reddit(bot, message, subreddit, query=''):
url = REDDIT_TEMPLATE.format(subreddit=subreddit)
async with bot.http_client.get(url) as response:
query = query.lower()
json_data = await response.json()
response = 'No results'
try:
for child in json_data['data']['children']:
data = child['data']
title = data['title']
url = data['url']
nsfw = ' [NSFW]' if data['over_18'] else ''
if query not in title.lower() and query not in url.lower():
continue
if data['stickied']:
continue
shorturl = await shorten_url(bot.http_client, url)
response = bot.client.format_text(
'{color}{green}r/{}{color}: ' +
'{bold}{}{bold}{color}{red}{}{color} @ ' +
'{color}{blue}{}{color}',
subreddit, title, nsfw, shorturl
)
break
except (IndexError, KeyError, ValueError):
pass
return message.with_body(response)
# Title Command
async def reddit_title(bot, message, url):
async with bot.http_client.get(url) as response:
try:
text = await response.text()
title = re.findall(r'"title":"([^"]+)"}}}', text)[0]
title, subreddit = title.rsplit(' : ', 1)
return message.with_body(bot.client.format_text(
'{color}{green}r/{}{color}: {bold}{}{bold}',
subreddit, title
))
except IndexError:
pass
# Register
def register(bot):
return (
('command', PATTERN , reddit),
('command', TITLE_PATTERN, reddit_title),
)
# vim: set sts=4 sw=4 ts=8 expandtab ft=python:
|
py | 7dfbe33217a3a243cffce785ee76cffcf234805b | #!/usr/bin/env python3
import argparse
import logging
import socket
from enum import Enum
from typing import Dict
logger = logging.getLogger(__name__)
class CAPIReply(Enum):
"""An enum representing the possible CAPI replies."""
RUNNING = "status,RUNNING"
COMPLETE = "status,COMPLETE" # parameters may follow
INVALID = "status,INVALID"
ERROR = "status,ERROR"
class tlv:
"""Representation of an 1905.1 TLV."""
def __init__(self, type_: int, length: int, value: str):
"""A TLV has a type, a length and a value.
Parameters
----------
type_ : int
The TLV type.
length : int
The length of the TLV. This must correspond to the length of `value`.
value : str
The TLV value, as a string. It must be formatted according to the UCC rules, i.e. with
curly braces and hexadecimal numbers.
"""
self.type = type_
self.length = length
self.value = value
def format(self, tlv_num: int = 0) -> str:
"""Format the TLV for the dev_send_1905 CAPI command.
Parameters
----------
tlv_num : int
The TLV counter. In the CAPI command, if there are multiple TLVs, they must be numbered
starting from 1. `tlv_num` is that number. If there is only a single TLV, set it to
zero.
Returns
-------
str
The string representation of the TLV according to CAPI definition.
"""
if tlv_num:
tlv_num_str = str(tlv_num)
else:
tlv_num_str = ''
return "tlv_type{tlv_num_str},0x{tlv_type:02x}," \
"tlv_length{tlv_num_str},0x{tlv_length:04x}," \
"tlv_value{tlv_num_str},{tlv_value}".format(tlv_num_str=tlv_num_str,
tlv_type=self.type,
tlv_length=self.length,
tlv_value=self.value)
class UCCSocket:
"""Abstraction of the target listening socket.
It connects to the listener and it sends and receives
CAPI commands from it.
"""
def __init__(self, host: str, port: int, timeout: int = 10):
"""Constructor for UCCSocket
Parameters
----------
host: str
The host to connect to. Can either be an ip or a hostname.
port: str
The port to connect to.
timeout: int
(optional) The timeout for both creating a connection,
and receiving or sending data.
"""
self.host = host
self.port = port
self.timeout = timeout
def __enter__(self):
self.conn = socket.create_connection((self.host, self.port), self.timeout)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.conn.close()
def send_cmd(self, command: str) -> None:
"""Send a new CAPI command to the device.
If previous replies from the server were available, they will be discarded.
Parameters
----------
command : str
The command to send. If it does not end with a newline, a new line will be appended.
"""
if command[-1] != "\n":
command += "\n"
self.conn.send(command.encode("utf-8"))
def get_reply(self, verbose: bool = False) -> Dict[str, str]:
"""Wait until the server replies with a `CAPIReply` message other than `CAPIReply.RUNNING`.
The replies from the server will be printed as they are received.
Note that this method only returns once a `CAPIReply.COMPLETE`, `CAPIReply.INVALID`,
or `CAPIReply.ERROR` message has been received from the server.
Parameters
----------
verbose : bool
If True, print out the valid replies (RUNNING and COMPLETE) as they arrive.
Returns
-------
Dict[str, str]
A mapping of parameter -> value. The CAPI COMPLETE message is followed by
parameter,value pairs. These are converted to a dict and returned. If the COMPLETE
message has no parameters, an empty dict is returned.
"""
data = bytearray()
while True:
# resetting data to the next line:
data = data[data.find(b"\n") + 1:]
while b"\n" not in data:
# reading until there is a newline
data.extend(self.conn.recv(256))
replies = data.decode("utf-8").split("\n")
for r in replies:
if not r:
pass # server replied with an empty line
elif CAPIReply.RUNNING.value in r:
if verbose:
print(r)
elif CAPIReply.COMPLETE.value in r:
if verbose:
print(r)
reply_value_str = r[len(CAPIReply.COMPLETE.value) + 1:].strip()
reply_values = reply_value_str.split(',')
return {k: v for k, v in zip(reply_values[::2], reply_values[1::2])}
elif CAPIReply.INVALID.value in r or CAPIReply.ERROR.value in r:
raise ValueError("Server replied with {}".format(r))
else:
raise ValueError("Received an unknown reply from the server:\n {}".format(r))
def cmd_reply(self, command: str, verbose: bool = False) -> Dict[str, str]:
"""Open the connection, send a command and wait for the reply."""
with self:
self.send_cmd(command)
return self.get_reply(verbose)
def dev_get_parameter(self, parameter: str, **additional_parameters: str) -> str:
"""Call dev_get_parameter and return the parameter, or raise KeyError if it is missing.
Parameters
----------
parameter : str
The parameter to query. It is the "parameter" argument of the dev_get_parameter command.
additional_parameters : str
Additional keyword arguments are passed as additional parameters to the
dev_get_parameter command. This is needed for example to get the "macaddr" parameter,
which needs additional "ssid" and "ruid" parameters in the command.
Returns
-------
str
The value of the requested parameter.
"""
command = "dev_get_parameter,program,map,parameter,{}".format(parameter)
if additional_parameters:
command += ',' + ','.join([','.join(param) for param in additional_parameters.items()])
return self.cmd_reply(command)[parameter]
def dev_send_1905(self, dest: str, message_type: int, *tlvs: tlv) -> int:
"""Call dev_send_1905 to `dest` with CMDU type `message_type` and additional `tlvs`.
Parameters
----------
dest : str
The AL-MAC address of the recipient, as a string.
message_type : int
The message type of the 1905.1 message to be sent, as an integer.
tlvs : tlv
Additional arguments are the TLVs in the 1905.1 message, as `tlv` objects.
Returns
-------
The MID of the message, as an integer.
"""
cmd = "DEV_SEND_1905,DestALid,{dest:s},MessageTypeValue,0x{message_type:04x}"\
.format(**locals())
if len(tlvs) > 1:
formatted_tlvs = [tlv.format(tlv_num + 1) for (tlv_num, tlv) in enumerate(tlvs)]
cmd += ',' + ','.join(formatted_tlvs)
elif tlvs:
cmd += ',' + tlvs[0].format()
return int(self.cmd_reply(cmd)["mid"], base=0)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Simulated UCC")
parser.add_argument("host", help="The device hostname or IP.", type=str)
parser.add_argument("port", help="The listening port on the device.", type=int)
parser.add_argument("command", help="The CAPI command to send.")
args = parser.parse_args()
socket.gethostbyname(args.host)
UCCSocket(args.host, args.port).cmd_reply(args.command, True)
|
py | 7dfbe423ccf1efc7664db23d6e5545b09e4c4b17 | from flexi.tree import Tree
from flexi.tree import RootTree
from flexi.tree import create_sub_tree
from flexi.xml.xml import load
from flexi.xml.xml import dump
|
py | 7dfbe5c3292e884d8ba3000e434694eac31da239 | from ctm_python_client.core.base import BaseJob
class SLAManagementJob(BaseJob):
def __init__(
self,
folder,
job_name,
service_name,
service_priority,
created_by,
job_runs_deviations_tolerance,
complete_in,
events_to_wait_for,
events_to_delete,
host=None,
run_as=None,
description=None,
):
BaseJob.__init__(
self, folder, job_name, description=description, host=host, run_as=run_as
)
self.service_name = service_name
self.service_priority = service_priority
self.created_by = created_by
self.job_runs_deviations_tolerance = job_runs_deviations_tolerance
self.complete_in = complete_in
self.events_to_wait_for = events_to_wait_for
self.events_to_delete = events_to_delete
def get_json(self):
job_json = BaseJob.get_json(self)
job_json["Type"] = "Job:SLAManagement"
if self.service_name != None:
job_json["ServiceName"] = self.service_name
if self.service_priority != None:
job_json["ServicePriority"] = self.service_priority
if self.created_by != None:
job_json["CreatedBy"] = self.created_by
if self.job_runs_deviations_tolerance != None:
job_json["JobRunsDeviationsTolerance"] = self.job_runs_deviations_tolerance
if self.complete_in != None:
job_json["CompleteIn"] = self.complete_in
if self.events_to_wait_for != None:
job_json["eventsToWaitFor"] = self.events_to_wait_for
if self.events_to_delete != None:
job_json["eventsToDelete"] = self.events_to_delete
return job_json
|
py | 7dfbe605a6148b5391efdaebecc3cf0e8b5cbcc5 | import re
import os
import sys
from utils.utils import *
from keywords.keywords import KEYWORDS
def uppercase(x): return x.group(1).upper()
re_replace = re.compile(r'\b({})\b'.format('|'.join(KEYWORDS)))
def SQLformat(sqlFile):
"""
This function helps to uppercase the keywords used in sql file.
param: sqlFile: File path of sql file.
"""
try:
with open(sqlFile) as input_sqlFile:
content = input_sqlFile.read()
with open(sqlFile, 'w') as output_sqlFile:
output_sqlFile.write(re_replace.sub(uppercase, content.lower()))
print(
f'{SUCCESS}{BOLD}[INFO]:{END} {FILE}"{sqlFile}"{END} --> {SUCCESS}done.{END}')
except Exception as e:
print(
f'{FAILURE}{BOLD}[Error]:{END} {FILE}{BOLD}"{sqlFile}"{END}\n --> {FAILURE}', e)
if __name__ == '__main__':
print(f'''{HEADING}{BOLD}
____ ____ _ ____ ____ ____ _ _ ____ ___ ___ ____ ____
[__ | | | __ |___ | | |__/ |\/| |__| | | |___ |__/
___] |_\| |___ | |__| | \ | | | | | | |___ | \
~ From SAPHAL
{END}''')
try:
# print(len(sys.argv)-1)
for i in range(len(sys.argv)-1):
sql_file_path = sys.argv[i+1]
# print(sqlFile)
if '.sql' in sql_file_path:
SQLformat(sql_file_path)
elif os.path.isdir(sql_file_path):
print(f'{SUCCESS}{BOLD}[INFO]:{END} {FILE}{BOLD}"{sql_file_path}"{END} :: directory')
files = os.listdir(sql_file_path)
# print(files)
# print(len(files))
for i in range(len(files)):
if '.sql' in files[i]:
SQLformat(sql_file_path+'/'+files[i])
print(f'{SUCCESS} ==> directory processed.{END}')
else:
print(
f'{FAILURE}{BOLD}[Error]:{END} {FILE}{BOLD}"{sql_file_path}"{END}\n --> {FAILURE}Select valid SQL file path.')
print()
except Exception as e:
print(f'{FAILURE}{BOLD}[Error]:{END}{FAILURE}', e, '\n')
exit(0)
|
py | 7dfbe72349ff9b5b590e729270d06347fb6906dd | r"""
PQ-Trees
This module implements PQ-Trees, a data structure use to represent all
permutations of the columns of a matrix which satisfy the *consecutive ones*
*property*:
A binary matrix satisfies the *consecutive ones property* if the 1s are
contiguous in each of its rows (or equivalently, if no row contains the regexp
pattern `10^+1`).
Alternatively, one can say that a sequence of sets `S_1,...,S_n` satisfies the
*consecutive ones property* if for any `x` the indices of the sets containing
`x` is an interval of `[1,n]`.
This module is used for the recognition of Interval Graphs (see
:meth:`~sage.graphs.generic_graph.GenericGraph.is_interval`).
**P-tree and Q-tree**
- A `P`-tree with children `c_1,...,c_k` (which can be `P`-trees, `Q`-trees, or
actual sets of points) indicates that all `k!` permutations of the children
are allowed.
Example: `\{1,2\},\{3,4\},\{5,6\}` (disjoint sets can be permuted in any way)
- A `Q`-tree with children `c_1,...,c_k` (which can be `P`-trees, `Q`-trees, or
actual sets of points) indicates that only two permutations of its children
are allowed: `c_1,...,c_k` or `c_k,...,c_1`.
Example: `\{1,2\},\{2,3\},\{3,4\},\{4,5\},\{5,6\}` (only two permutations of
these sets have the *consecutive ones property*).
**Computation of all possible orderings**
#. In order to compute all permutations of a sequence of sets `S_1,...,S_k`
satisfying the *consecutive ones property*, we initialize `T` as a `P`-tree
whose children are all the `S_1,...,S_k`, thus representing the set of all
`k!` permutations of them.
#. We select some element `x` and update the data structure `T` to restrict the
permutations it describes to those that keep the occurrences of `x` on an
interval of `[1,...,k]`. This will result in a new `P`-tree whose children
are:
* all `\bar c_x` sets `S_i` which do *not* contain `x`.
* a new `P`-tree whose children are the `c_x` sets `S_i` containing `x`.
This describes the set of all `c_x!\times \bar c'_x!` permutations of
`S_1,...,S_k` that keep the sets containing `x` on an interval.
#. We take a second element `x'` and update the data structure `T` to restrict
the permutations it describes to those that keep `x'` on an interval of
`[1,...,k]`. The sets `S_1,...,S_k` belong to 4 categories:
* The family `S_{00}` of sets which do not contain any of
`x,x'`.
* The family `S_{01}` of sets which contain `x'` but do not contain
`x`.
* The family `S_{10}` of sets which contain `x` but do not contain
`x'`.
* The family `S_{11}` of sets which contain `x'` and `x'`.
With these notations, the permutations of `S_1,...,S_k` which keep the
occurrences of `x` and `x'` on an interval are of two forms:
* <some sets `S_{00}`>, <sets from `S_{10}`>, <sets from `S_{11}`>, <sets from `S_{01}`>, <other sets from `S_{00}`>
* <some sets `S_{00}`>, <sets from `S_{01}`>, <sets from `S_{11}`>, <sets from `S_{10}`>, <other sets from `S_{00}`>
These permutations can be modeled with the following `PQ`-tree:
* A `P`-tree whose children are:
* All sets from `S_{00}`
* A `Q`-tree whose children are:
* A `P`-tree with whose children are the sets from `S_{10}`
* A `P`-tree with whose children are the sets from `S_{11}`
* A `P`-tree with whose children are the sets from `S_{01}`
#. One at a time, we update the data structure with each element until they are
all exhausted, or until we reach a proof that no permutation satisfying the
*consecutive ones property* exists.
Using these two types of tree, and exploring the different cases of
intersection, it is possible to represent all the possible permutations of
our sets satisfying our constraints, or to prove that no such ordering
exists. This is the whole purpose of this module, and is explained with more
details in many places, for example in the following document from Hajiaghayi
[Haj2000]_.
Authors:
Nathann Cohen (initial implementation)
Methods and functions
---------------------
"""
################################################################################
# Copyright (C) 2012 Nathann Cohen <[email protected]> #
# #
# Distributed under the terms of the GNU General Public License (GPL) #
# https://www.gnu.org/licenses/ #
################################################################################
# Constants, to make the code more readable
FULL = 2
PARTIAL = 1
EMPTY = 0
ALIGNED = True
UNALIGNED = False
##########################################################################
# Some Lambda Functions #
# #
# As the elements of a PQ-Tree can be either P-Trees, Q-Trees, or the #
# sets themselves (the leaves), the following lambda function are #
# meant to be applied both on PQ-Trees and Sets, and mimic for the #
# latter the behaviour we expect from the corresponding methods #
# defined in class PQ #
##########################################################################
set_contiguous = lambda tree, x : (
tree.set_contiguous(x) if isinstance(tree, PQ) else
((FULL, ALIGNED) if x in tree
else (EMPTY, ALIGNED)))
new_P = lambda liste : P(liste) if len(liste) > 1 else liste[0]
new_Q = lambda liste : Q(liste) if len(liste) > 1 else liste[0]
flatten = lambda x : x.flatten() if isinstance(x, PQ) else x
impossible_msg = "Impossible"
def reorder_sets(sets):
r"""
Reorders a collection of sets such that each element appears on an
interval.
Given a collection of sets `C = S_1,...,S_k` on a ground set `X`,
this function attempts to reorder them in such a way that `\forall
x \in X` and `i<j` with `x\in S_i, S_j`, then `x\in S_l` for every
`i<l<j` if it exists.
INPUT:
- ``sets`` - a list of instances of ``list, Set`` or ``set``
ALGORITHM:
PQ-Trees
EXAMPLES:
There is only one way (up to reversal) to represent contiguously
the sequence ofsets `\{i-1, i, i+1\}`::
sage: from sage.graphs.pq_trees import reorder_sets
sage: seq = [Set([i-1,i,i+1]) for i in range(1,15)]
We apply a random permutation::
sage: p = Permutations(len(seq)).random_element()
sage: seq = [ seq[p(i+1)-1] for i in range(len(seq)) ]
sage: ordered = reorder_sets(seq)
sage: if not 0 in ordered[0]:
....: ordered = ordered.reverse()
sage: print(ordered)
[{0, 1, 2}, {1, 2, 3}, {2, 3, 4}, {3, 4, 5}, {4, 5, 6}, {5, 6, 7}, {8, 6, 7}, {8, 9, 7}, {8, 9, 10}, {9, 10, 11}, {10, 11, 12}, {11, 12, 13}, {12, 13, 14}, {13, 14, 15}]
"""
if len(sets) <= 2:
return sets
s = set().union(*sets) # union of the sets
tree = P(sets)
for i in s:
tree.set_contiguous(i)
tree = flatten(tree)
return tree.ordering()
class PQ:
r"""
PQ-Trees
This class should not be instantiated by itself: it is extended by
:class:`P` and :class:`Q`. See the documentation of
:mod:`sage.graphs.pq_trees` for more information.
AUTHOR : Nathann Cohen
"""
def __init__(self, seq):
r"""
Construction of a PQ-Tree
EXAMPLES::
sage: from sage.graphs.pq_trees import P, Q
sage: p = Q([[1,2], [2,3], P([[2,4], [2,8], [2,9]])])
:trac:`17787`::
sage: Graph('GvGNp?').is_interval()
False
"""
from sage.sets.set import Set
self._children = []
for e in seq:
if isinstance(e, list):
e = Set(e)
if not e in self._children:
self._children.append(e)
def reverse(self):
r"""
Recursively reverses ``self`` and its children
EXAMPLES::
sage: from sage.graphs.pq_trees import P, Q
sage: p = Q([[1,2], [2,3], P([[2,4], [2,8], [2,9]])])
sage: p.ordering()
[{1, 2}, {2, 3}, {2, 4}, {8, 2}, {9, 2}]
sage: p.reverse()
sage: p.ordering()
[{9, 2}, {8, 2}, {2, 4}, {2, 3}, {1, 2}]
"""
for i in self._children:
if isinstance(i, PQ):
i.reverse()
self._children.reverse()
def __contains__(self, v):
r"""
Tests whether there exists an element of ``self`` containing
an element ``v``
INPUT:
- ``v`` -- an element of the ground set
EXAMPLES::
sage: from sage.graphs.pq_trees import P, Q
sage: p = Q([[1,2], [2,3], P([[2,4], [2,8], [2,9]])])
sage: 5 in p
False
sage: 9 in p
True
"""
return any(v in i for i in self)
def __iter__(self):
r"""
Iterates over the children of ``self``.
EXAMPLES::
sage: from sage.graphs.pq_trees import P, Q
sage: p = Q([[1,2], [2,3], P([[2,4], [2,8], [2,9]])])
sage: for i in p:
....: print(i)
{1, 2}
{2, 3}
('P', [{2, 4}, {8, 2}, {9, 2}])
"""
for i in self._children:
yield i
def number_of_children(self):
r"""
Returns the number of children of ``self``
EXAMPLES::
sage: from sage.graphs.pq_trees import P, Q
sage: p = Q([[1,2], [2,3], P([[2,4], [2,8], [2,9]])])
sage: p.number_of_children()
3
"""
return len(self._children)
def ordering(self):
r"""
Returns the current ordering given by listing the leaves from
left to right.
EXAMPLES::
sage: from sage.graphs.pq_trees import P, Q
sage: p = Q([[1,2], [2,3], P([[2,4], [2,8], [2,9]])])
sage: p.ordering()
[{1, 2}, {2, 3}, {2, 4}, {8, 2}, {9, 2}]
"""
value = []
for i in self:
if isinstance(i, PQ):
value.extend(i.ordering())
else:
value.append(i)
return value
def __repr__(self):
r"""
Succintly represents ``self``.
EXAMPLES::
sage: from sage.graphs.pq_trees import P, Q
sage: p = Q([[1,2], [2,3], P([[2,4], [2,8], [2,9]])])
sage: print(p)
('Q', [{1, 2}, {2, 3}, ('P', [{2, 4}, {8, 2}, {9, 2}])])
"""
return str((("P" if isinstance(self,P) else "Q"),self._children))
def simplify(self, v, left = False, right = False):
r"""
Returns a simplified copy of self according to the element ``v``
If ``self`` is a partial P-tree for ``v``, we would like to
restrict the permutations of its children to permutations
keeping the children containing ``v`` contiguous. This
function also "locks" all the elements not containing ``v``
inside a `P`-tree, which is useful when one want to keep the
elements containing ``v`` on one side (which is the case when
this method is called).
INPUT:
- ``left, right`` (boolean) -- whether ``v`` is aligned to the
right or to the left
- ``v``-- an element of the ground set
OUTPUT:
If ``self`` is a `Q`-Tree, the sequence of its children is
returned. If ``self`` is a `P`-tree, 2 `P`-tree are returned,
namely the two `P`-tree defined above and restricting the
permutations, in the order implied by ``left, right`` (if
``right =True``, the second `P`-tree will be the one gathering
the elements containing ``v``, if ``left=True``, the
opposite).
.. NOTE::
This method is assumes that ``self`` is partial for ``v``,
and aligned to the side indicated by ``left, right``.
EXAMPLES:
A `P`-Tree ::
sage: from sage.graphs.pq_trees import P, Q
sage: p = P([[2,4], [1,2], [0,8], [0,5]])
sage: p.simplify(0, right = True)
[('P', [{2, 4}, {1, 2}]), ('P', [{0, 8}, {0, 5}])]
A `Q`-Tree ::
sage: q = Q([[2,4], [1,2], [0,8], [0,5]])
sage: q.simplify(0, right = True)
[{2, 4}, {1, 2}, {0, 8}, {0, 5}]
"""
if sum([left, right]) !=1:
raise ValueError("Exactly one of left or right must be specified")
if isinstance(self,Q):
l = []
for c in self._children:
if (isinstance(c,PQ) and # Is c partial?
v in c and # (does c contain sets with
any(v not in cc for cc in c)): # and without v ?)
l.extend(c.simplify(v,right=right,left=left))
else:
l.append(c)
return l
else:
empty = []
full = []
partial = []
for c in self._children:
if v in c:
if (isinstance(c,PQ) and # Is c partial? (does c contain
any(v not in cc for cc in c)): # sets with and without v ?)
partial = c.simplify(v,right=right,left=left)
else:
full.append(c)
else:
empty.append(c)
if empty:
empty = [new_P(empty)]
if full:
full = [new_P(full)]
if right:
return empty+partial+full
else:
return full+partial+empty
def flatten(self):
r"""
Returns a flattened copy of ``self``
If self has only one child, we may as well consider its
child's children, as ``self`` encodes no information. This
method recursively "flattens" trees having only on PQ-tree
child, and returns it.
EXAMPLES::
sage: from sage.graphs.pq_trees import P, Q
sage: p = Q([P([[2,4], [2,8], [2,9]])])
sage: p.flatten()
('P', [{2, 4}, {8, 2}, {9, 2}])
"""
if self.number_of_children() == 1:
return flatten(self._children[0])
else:
self._children = [flatten(x) for x in self._children]
return self
class P(PQ):
r"""
A P-Tree is a PQ-Tree whose children can be permuted in any way.
For more information, see the documentation of :mod:`sage.graphs.pq_trees`.
"""
def set_contiguous(self, v):
r"""
Updates ``self`` so that the sets containing ``v`` are
contiguous for any admissible permutation of its subtrees.
INPUT:
- ``v`` -- an element of the ground set
OUTPUT:
According to the cases :
* ``(EMPTY, ALIGNED)`` if no set of the tree contains
an occurrence of ``v``
* ``(FULL, ALIGNED)`` if all the sets of the tree contain
``v``
* ``(PARTIAL, ALIGNED)`` if some (but not all) of the sets
contain ``v``, all of which are aligned
to the right of the ordering at the end when the function ends
* ``(PARTIAL, UNALIGNED)`` if some (but not all) of the
sets contain ``v``, though it is impossible to align them
all to the right
In any case, the sets containing ``v`` are contiguous when this
function ends. If there is no possibility of doing so, the function
raises a ``ValueError`` exception.
EXAMPLES:
Ensuring the sets containing ``0`` are continuous::
sage: from sage.graphs.pq_trees import P, Q
sage: p = P([[0,3], [1,2], [2,3], [2,4], [4,0],[2,8], [2,9]])
sage: p.set_contiguous(0)
(1, True)
sage: print(p)
('P', [{1, 2}, {2, 3}, {2, 4}, {8, 2}, {9, 2}, ('P', [{0, 3}, {0, 4}])])
Impossible situation::
sage: p = P([[0,1], [1,2], [2,3], [3,0]])
sage: p.set_contiguous(0)
(1, True)
sage: p.set_contiguous(1)
(1, True)
sage: p.set_contiguous(2)
(1, True)
sage: p.set_contiguous(3)
Traceback (most recent call last):
...
ValueError: Impossible
"""
###############################################################
# Defining Variables : #
# #
# Collecting the information of which children are FULL of v, #
# which ones are EMPTY, PARTIAL_ALIGNED and PARTIAL_UNALIGNED #
# #
# Defining variables for their cardinals, just to make the #
# code slightly more readable :-) #
###############################################################
for x in self:
set_contiguous(x, v)
self.flatten()
seq = [set_contiguous(x, v) for x in self]
f_seq = dict(zip(self, seq))
set_FULL = []
set_EMPTY = []
set_PARTIAL_ALIGNED = []
set_PARTIAL_UNALIGNED = []
sorting = {
(FULL, ALIGNED) : set_FULL,
(EMPTY, ALIGNED) : set_EMPTY,
(PARTIAL, ALIGNED) : set_PARTIAL_ALIGNED,
(PARTIAL, UNALIGNED) : set_PARTIAL_UNALIGNED
}
for i in self:
sorting[f_seq[i]].append(i)
n_FULL = len(set_FULL)
n_EMPTY = len(set_EMPTY)
n_PARTIAL_ALIGNED = len(set_PARTIAL_ALIGNED)
n_PARTIAL_UNALIGNED = len(set_PARTIAL_UNALIGNED)
# Excludes the situation where there is no solution.
# read next comment for more explanations
if (n_PARTIAL_ALIGNED > 2 or
(n_PARTIAL_UNALIGNED >= 1 and n_EMPTY != self.number_of_children() -1)):
raise ValueError(impossible_msg)
# From now on, there are at most two pq-trees which are partially filled
# If there is one which is not aligned to the right, all the others are empty
#########################################################
# 1/2 #
# #
# Several easy cases where we can decide without paying #
# attention #
#########################################################
# All the children are FULL
elif n_FULL == self.number_of_children():
return FULL, True
# All the children are empty
elif n_EMPTY == self.number_of_children():
return EMPTY, True
# There is a PARTIAL UNALIGNED element (and all the others are
# empty as we checked before
elif n_PARTIAL_UNALIGNED == 1:
return (PARTIAL, UNALIGNED)
# If there is just one partial element and all the others are
# empty, we just reorder the set to put it at the right end
elif (n_PARTIAL_ALIGNED == 1 and
n_EMPTY == self.number_of_children()-1):
self._children = set_EMPTY + set_PARTIAL_ALIGNED
return (PARTIAL, ALIGNED)
################################################################
# 2/2 #
# #
# From now on, there are at most two partial pq-trees and all #
# of them have v aligned to their right #
# #
# We now want to order them in such a way that all the #
# elements containing v are located on the right #
################################################################
else:
self._children = []
# We first move the empty elements to the left, if any
if n_EMPTY > 0:
self._children.extend(set_EMPTY)
# If there is one partial element we but have to add it to
# the sequence, then add all the full elements
# We must also make sure these elements will not be
# reordered in such a way that the elements containing v
# are not contiguous
# ==> We create a Q-tree
if n_PARTIAL_ALIGNED < 2:
new = []
# add the partial element, if any
if n_PARTIAL_ALIGNED == 1:
subtree = set_PARTIAL_ALIGNED[0]
new.extend(subtree.simplify(v, right = ALIGNED))
# Then the full elements, if any, in a P-tree (we can
# permute any two of them while keeping all the
# elements containing v on an interval
if n_FULL > 0:
new.append(new_P(set_FULL))
# We lock all of them in a Q-tree
self._children.append(new_Q(new))
return PARTIAL, True
# If there are 2 partial elements, we take care of both
# ends. We also know it will not be possible to align the
# interval of sets containing v to the right
else:
new = []
# The second partial element is aligned to the right
# while, as we want to put it at the end of the
# interval, it should be aligned to the left
set_PARTIAL_ALIGNED[1].reverse()
# 1/3
# Left partial subtree
subtree = set_PARTIAL_ALIGNED[0]
new.extend(subtree.simplify(v, right = ALIGNED))
# 2/3
# Center (Full elements, in a P-tree, as they can be
# permuted)
if n_FULL > 0:
new.append(new_P(set_FULL))
# 3/3
# Right partial subtree
subtree = set_PARTIAL_ALIGNED[1]
new.extend(subtree.simplify(v, left= ALIGNED))
# We add all of it, locked in a Q-Tree
self._children.append(new_Q(new))
return PARTIAL, False
def cardinality(self):
r"""
Return the number of orderings allowed by the structure.
.. SEEALSO::
:meth:`orderings` -- iterate over all admissible orderings
EXAMPLES::
sage: from sage.graphs.pq_trees import P, Q
sage: p = P([[0,3], [1,2], [2,3], [2,4], [4,0],[2,8], [2,9]])
sage: p.cardinality()
5040
sage: p.set_contiguous(3)
(1, True)
sage: p.cardinality()
1440
"""
from math import factorial
n = factorial(self.number_of_children())
for c in self._children:
if isinstance(c,PQ):
n = n*c.cardinality()
return n
def orderings(self):
r"""
Iterate over all orderings of the sets allowed by the structure.
.. SEEALSO::
:meth:`cardinality` -- return the number of orderings
EXAMPLES::
sage: from sage.graphs.pq_trees import P, Q
sage: p = P([[2,4], [1,2], [0,8], [0,5]])
sage: for o in p.orderings():
....: print(o)
({2, 4}, {1, 2}, {0, 8}, {0, 5})
({2, 4}, {1, 2}, {0, 5}, {0, 8})
({2, 4}, {0, 8}, {1, 2}, {0, 5})
({2, 4}, {0, 8}, {0, 5}, {1, 2})
...
"""
from itertools import permutations, product
for p in permutations(self._children):
for o in product(*[x.orderings() if isinstance(x,PQ) else [x]
for x in p]):
yield o
class Q(PQ):
r"""
A Q-Tree is a PQ-Tree whose children are ordered up to reversal
For more information, see the documentation of :mod:`sage.graphs.pq_trees`.
"""
def set_contiguous(self, v):
r"""
Updates ``self`` so that the sets containing ``v`` are
contiguous for any admissible permutation of its subtrees.
INPUT:
- ``v`` -- an element of the ground set
OUTPUT:
According to the cases :
* ``(EMPTY, ALIGNED)`` if no set of the tree contains
an occurrence of ``v``
* ``(FULL, ALIGNED)`` if all the sets of the tree contain
``v``
* ``(PARTIAL, ALIGNED)`` if some (but not all) of the sets
contain ``v``, all of which are aligned
to the right of the ordering at the end when the function ends
* ``(PARTIAL, UNALIGNED)`` if some (but not all) of the
sets contain ``v``, though it is impossible to align them
all to the right
In any case, the sets containing ``v`` are contiguous when this
function ends. If there is no possibility of doing so, the function
raises a ``ValueError`` exception.
EXAMPLES:
Ensuring the sets containing ``0`` are continuous::
sage: from sage.graphs.pq_trees import P, Q
sage: q = Q([[2,3], Q([[3,0],[3,1]]), Q([[4,0],[4,5]])])
sage: q.set_contiguous(0)
(1, False)
sage: print(q)
('Q', [{2, 3}, {1, 3}, {0, 3}, {0, 4}, {4, 5}])
Impossible situation::
sage: p = Q([[0,1], [1,2], [2,0]])
sage: p.set_contiguous(0)
Traceback (most recent call last):
...
ValueError: Impossible
"""
#################################################################
# Guidelines : #
# #
# As the tree is a Q-Tree, we can but reverse the order in #
# which the elements appear. It means that we can but check #
# the elements containing v are already contiguous (even #
# though we have to take special care of partial elements -- #
# the endpoints of the interval), and answer accordingly #
# (partial, full, empty, aligned..). We also want to align the #
# elements containing v to the right if possible. #
################################################################
###############################################################
# Defining Variables : #
# #
# Collecting the information of which children are FULL of v, #
# which ones are EMPTY, PARTIAL_ALIGNED and PARTIAL_UNALIGNED #
# #
# Defining variables for their cardinals, just to make the #
# code slightly more readable :-) #
###############################################################
for x in self:
set_contiguous(x, v)
self.flatten()
seq = [set_contiguous(x, v) for x in self]
f_seq = dict(zip(self, seq))
set_FULL = []
set_EMPTY = []
set_PARTIAL_ALIGNED = []
set_PARTIAL_UNALIGNED = []
sorting = {
(FULL, ALIGNED) : set_FULL,
(EMPTY, ALIGNED) : set_EMPTY,
(PARTIAL, ALIGNED) : set_PARTIAL_ALIGNED,
(PARTIAL, UNALIGNED) : set_PARTIAL_UNALIGNED
}
for i in self:
sorting[f_seq[i]].append(i)
n_FULL = len(set_FULL)
n_EMPTY = len(set_EMPTY)
n_PARTIAL_ALIGNED = len(set_PARTIAL_ALIGNED)
n_PARTIAL_UNALIGNED = len(set_PARTIAL_UNALIGNED)
###################################################################
# #
# Picking the good ordering for the children : #
# #
# #
# There is a possibility of aligning to the right iif #
# the vector can assume the form (as a regular expression) : #
# #
# (EMPTY *) PARTIAL (FULL *) Of course, each of these three #
# members could be empty #
# #
# Hence, in the following case we reverse the vector : #
# #
# * if the last element is empty (as we checked the whole #
# vector is not empty #
# #
# * if the last element is partial, aligned, and all the #
# others are full #
###################################################################
if (f_seq[self._children[-1]] == (EMPTY, ALIGNED) or
(f_seq[self._children[-1]] == (PARTIAL, ALIGNED) and n_FULL == self.number_of_children() - 1)):
# We reverse the order of the elements in the SET only. Which means that they are still aligned to the right !
self._children.reverse()
#########################################################
# 1/2 #
# #
# Several easy cases where we can decide without paying #
# attention #
#########################################################
# Excludes the situation where there is no solution.
# read next comment for more explanations
if (n_PARTIAL_ALIGNED > 2 or
(n_PARTIAL_UNALIGNED >= 1 and n_EMPTY != self.number_of_children() -1)):
raise ValueError(impossible_msg)
# From now on, there are at most two pq-trees which are partially filled
# If there is one which is not aligned to the right, all the others are empty
# First trivial case, no checking needed
elif n_FULL == self.number_of_children():
return FULL, True
# Second trivial case, no checking needed
elif n_EMPTY == self.number_of_children():
return EMPTY, True
# Third trivial case, no checking needed
elif n_PARTIAL_UNALIGNED == 1:
return (PARTIAL, UNALIGNED)
# If there is just one partial element
# and all the others are empty, we just reorder
# the set to put it at the right end
elif (n_PARTIAL_ALIGNED == 1 and
n_EMPTY == self.number_of_children()-1):
if set_PARTIAL_ALIGNED[0] == self._children[-1]:
return (PARTIAL, ALIGNED)
else:
return (PARTIAL, UNALIGNED)
##############################################################
# 2/2 #
# #
# We iteratively consider all the children, and check #
# that the elements containing v are indeed #
# located on an interval. #
# #
# We are also interested in knowing whether this interval is #
# aligned to the right #
# #
# Because of the previous tests, we can assume there are at #
# most two partial pq-trees and all of them are aligned to #
# their right #
##############################################################
else:
new_children = []
# Two variables to remember where we are
# according to the interval
seen_nonempty = False
seen_right_end = False
for i in self:
type, aligned = f_seq[i]
# We met an empty element
if type == EMPTY:
# 2 possibilities :
#
# * we have NOT met a non-empty element before
# and it just means we are looking at the
# leading empty elements
#
# * we have met a non-empty element before and it
# means we will never met another non-empty
# element again => we have seen the right end
# of the interval
new_children.append(i)
if seen_nonempty:
seen_right_end = True
# We met a non-empty element
else:
if seen_right_end:
raise ValueError(impossible_msg)
if type == PARTIAL:
# if we see an ALIGNED partial tree after
# having seen a nonempty element then the
# partial tree must be aligned to the left and
# so we have seen the right end
if seen_nonempty and aligned:
i.reverse()
seen_right_end = True
# right partial subtree
subtree = i
new_children.extend(subtree.simplify(v, left = True))
# If we see an UNALIGNED partial element after
# having met a nonempty element, there is no
# solution to the alignment problem
elif seen_nonempty and not aligned:
raise ValueError(impossible_msg)
# If we see an unaligned element but no non-empty
# element since the beginning, we are witnessing both the
# left and right end
elif not seen_nonempty and not aligned:
raise ValueError("Bon, ben ca arrive O_o")
seen_right_end = True
elif not seen_nonempty and aligned:
# left partial subtree
subtree = i
new_children.extend(subtree.simplify(v, right = True))
else:
new_children.append(i)
seen_nonempty = True
# Setting the updated sequence of children
self._children = new_children
# Whether we achieved an alignment to the right is the
# complement of whether we have seen the right end
return (PARTIAL, not seen_right_end)
def cardinality(self):
r"""
Return the number of orderings allowed by the structure.
.. SEEALSO::
:meth:`orderings` -- iterate over all admissible orderings
EXAMPLES::
sage: from sage.graphs.pq_trees import P, Q
sage: q = Q([[0,3], [1,2], [2,3], [2,4], [4,0],[2,8], [2,9]])
sage: q.cardinality()
2
"""
n = 1
for c in self._children:
if isinstance(c,PQ):
n = n*c.cardinality()
return n if (self.number_of_children() == 1) else 2*n
def orderings(self):
r"""
Iterates over all orderings of the sets allowed by the structure
.. SEEALSO::
:meth:`cardinality` -- return the number of orderings
EXAMPLES::
sage: from sage.graphs.pq_trees import P, Q
sage: q = Q([[2,4], [1,2], [0,8], [0,5]])
sage: for o in q.orderings():
....: print(o)
({2, 4}, {1, 2}, {0, 8}, {0, 5})
({0, 5}, {0, 8}, {1, 2}, {2, 4})
"""
if len(self._children) == 1:
c = self._children[0]
for o in (c.orderings() if isinstance(c, PQ) else [c]):
yield o
else:
from itertools import product
for o in product(*[x.orderings() if isinstance(x, PQ) else [x]
for x in self._children]):
yield o
yield o[::-1]
|
py | 7dfbe79cc2c26feb3a8a83db90eaa66519fdbd75 | from pyomo.environ import Block, Expression, Var, Param, NonNegativeReals, units as pyunits
from watertap3.utils import financials
from watertap3.wt_units.wt_unit import WT3UnitProcess
## REFERENCE
## CAPITAL:
# McGiveney & Kawamura
## ELECTRICITY:
#
module_name = 'rapid_mix'
basis_year = 2007
tpec_or_tic = 'TPEC'
class UnitProcess(WT3UnitProcess):
def rapid_mix_setup(self, unit_params):
time = self.flowsheet().config.time
t = time.first()
self.flow_in = pyunits.convert(self.flow_vol_in[t], to_units=pyunits.m ** 3 / pyunits.hr)
self.flow_in_gps = pyunits.convert(self.flow_vol_in[t], to_units=pyunits.gallon / pyunits.second)
self.chem_dict = {}
self.residence_time = Var(time, initialize=5, domain=NonNegativeReals, units=pyunits.second, bounds=(5, 60), doc='Rapid mix residence time [sec]')
try:
self.vel_gradient = unit_params['vel_gradient']
if self.vel_gradient not in [300, 600, 900]:
self.vel_gradient = 900
except (KeyError, TypeError) as e:
self.vel_gradient = 900
try:
self.residence_time.fix(unit_params['residence_time'])
except (KeyError, TypeError) as e:
self.residence_time.fix(5)
try:
self.motor_eff = unit_params['motor_eff']
except (KeyError, TypeError) as e:
self.motor_eff = 0.75
self.mixer_volume_gal = self.flow_in_gps * self.residence_time[t]
def fixed_cap(self):
'''
:return:
'''
if self.vel_gradient == 300:
self.rapid_mix_cap = (3.2559 * self.mixer_volume_gal + 31023) * 1E-6 * self.tpec_tic
elif self.vel_gradient == 600:
self.rapid_mix_cap = (4.0668 * self.mixer_volume_gal + 33040) * 1E-6 * self.tpec_tic
else:
self.rapid_mix_cap = (7.0814 * self.mixer_volume_gal + 33269) * 1E-6 * self.tpec_tic
return self.rapid_mix_cap
def elect(self):
'''
:return:
'''
self.g = self.vel_gradient * pyunits.second ** -1
self.basin_volume_m3 = pyunits.convert(self.mixer_volume_gal, to_units=pyunits.m ** 3)
self.viscosity = 1E-3 * (pyunits.kilogram / (pyunits.second * pyunits.meter))
self.power_needed = self.g ** 2 * self.basin_volume_m3 * self.viscosity
self.power_required = pyunits.convert(self.power_needed, to_units=pyunits.kilowatt) / self.motor_eff
self.rapid_mix_ei = self.power_required / self.flow_in
return self.rapid_mix_ei
def get_costing(self, unit_params=None, year=None):
'''
Initialize the unit in WaterTAP3.
'''
if not isinstance(unit_params, float):
self.rapid_mix_setup(unit_params)
else:
self.rapid_mix_setup({})
financials.create_costing_block(self, basis_year, tpec_or_tic)
self.costing.fixed_cap_inv_unadjusted = Expression(expr=self.fixed_cap(),
doc='Unadjusted fixed capital investment')
self.electricity = Expression(expr=self.elect(),
doc='Electricity intensity [kwh/m3]')
financials.get_complete_costing(self.costing) |
py | 7dfbeb2b4043411ebf1cbf68ffd05d0c6bf9f470 | # This program is free software: you can redistribute it and/or modify it under the
# terms of the Apache License (v2.0) as published by the Apache Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the Apache License for more details.
#
# You should have received a copy of the Apache License along with this program.
# If not, see <https://www.apache.org/licenses/LICENSE-2.0>.
"""Entry-point for resource-monitor."""
# standard libs
import sys
import platform
# ignore broken pipes
if platform.system() == 'Windows':
# FIXME: how do we ignore broken pipes on windows?
pass
else:
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE, SIG_DFL)
# external libs
from cmdkit.app import Application
from cmdkit.cli import Interface, ArgumentError
# internal libs
from ..core.logging import Logger
from ..core.exceptions import CompletedCommand
from .. import (__appname__, __version__, __description__,
__copyright__, __license__, __website__)
# resource commands
from .cpu import CPUDevice
from .gpu import GPUDevice
# public interface
__all__ = ['ResourceMonitor', 'main', ]
DEVICES = {
'cpu': CPUDevice,
'gpu': GPUDevice,
}
PROGRAM = __appname__
USAGE = f"""\
usage: {PROGRAM} [-h] [-v] <device> <resource> [<args>...]
{__description__}\
"""
EPILOG = f"""\
Documentation and issue tracking at:
{__website__}
Copyright {__copyright__}
{__license__}.\
"""
HELP = f"""\
{USAGE}
devices:
cpu {CPUDevice.__doc__}
gpu {GPUDevice.__doc__}
options:
-h, --help Show this message and exit.
-v, --version Show the version and exit.
Use the -h/--help flag with the above resource groups to
learn more about their usage.
{EPILOG}\
"""
log = Logger()
class ResourceMonitor(Application):
"""Application class for resource-monitor."""
interface = Interface(PROGRAM, USAGE, HELP)
interface.add_argument('-v', '--version', version=__version__, action='version')
device: str = None
interface.add_argument('device')
exceptions = {
CompletedCommand: (lambda exc: int(exc.args[0])),
}
def run(self) -> None:
"""Show usage/help/version or defer to group."""
if self.device in DEVICES:
status = DEVICES[self.device].main(sys.argv[2:3])
raise CompletedCommand(status)
else:
raise ArgumentError(f'"{self.device}" is not a device.')
def main() -> int:
"""Entry-point for resource-monitor command-line interface."""
return ResourceMonitor.main(sys.argv[1:2])
|
py | 7dfbeb5be341d0b690b6dd0239a93031b0ffe67c | import math
import random
import sys
import os
from itertools import chain
from pyglet.gl import *
from pyglet.window import key
import pybario
script_dir = os.path.dirname(__file__)
def pix_idx_to_pos(col, row, detector):
height_scale = 0.35
return (col + 3.) / 80 * detector.width * 0.965 - detector.width / 2, (row + 1.) / 336 * detector.height * height_scale - detector.height / 2
_MAX_HITS = 10 # maximum hits to visualize, new hits delete old
_MAX_TRACKS = 3 # maximum hits to visualize, new tracks are only drawn when old ones faded out
_COMBINE_N_READOUTS = 20
_CLEAR_COLOR = (0.87, 0.87, 0.87, 1)
class Hit(object):
dx, dy = 1.5, 1.5
def __init__(self, x, y):
self.x = x - self.dx
self.y = y + self.dy
self.transparency = 100
def update(self, dt):
# Fade out hit
self.transparency += dt * 50
if self.transparency > 255:
return False
return True
def draw(self):
X, Y, Z = self.x + self.dx, self.y + self.dy, 3.
alpha = 255 - int(self.transparency)
pyglet.graphics.draw(4, GL_QUADS, ('v3f', (self.x, self.y, Z, X, self.y, Z, X, Y, Z, self.x, Y, Z)),
('c4B', (255, 0, 0, alpha,
255, 0, 0, alpha,
255, 0, 0, alpha,
255, 0, 0, alpha)))
class Track(object):
dx, dy = 1.5, 1.5
def __init__(self, p1, p2):
self.p1 = p1
self.p2 = p2
direction = (p1[0] - p2[0], p1[1] - p2[1], p1[2] - p2[2])
position = p1
self.track_start = (position[0] - 1000 * direction[0], position[1] - 1000 * direction[1], position[2] - 1000 * direction[2])
self.track_stop = (position[0] + 1000 * direction[0], position[1] + 1000 * direction[1], position[2] + 1000 * direction[2])
self.transparency = 100
def update(self, dt):
# Fade out hit
self.transparency += dt * 1
if self.transparency > 200:
self.transparency = 200
if self.transparency > 255:
return False
return True
def draw(self):
alpha = 255 - int(self.transparency)
# Show track
pyglet.graphics.draw(2, GL_LINES, ('v3f', (self.track_start[0], self.track_start[1], self.track_start[2],
self.track_stop[0], self.track_stop[1], self.track_stop[2])),
('c4B', (0, 128, 187, alpha, 0, 128, 187, alpha)))
# Show track hits too
alpha = 255
x, y = self.p1[0], self.p1[1]
X, Y, Z = self.p1[0] + self.dx, self.p1[1] + self.dy, self.p1[2] + 3.
pyglet.graphics.draw(4, GL_QUADS, ('v3f', (x, y, Z, X, y, Z, X, Y, Z, x, Y, Z)),
('c4B', (255, 0, 0, alpha,
255, 0, 0, alpha,
255, 0, 0, alpha,
255, 0, 0, alpha)))
x, y = self.p2[0], self.p2[1]
X, Y, Z = self.p2[0] + self.dx, self.p2[1] + self.dy, self.p2[2] + 3.
pyglet.graphics.draw(4, GL_QUADS, ('v3f', (x, y, Z, X, y, Z, X, Y, Z, x, Y, Z)),
('c4B', (255, 0, 0, alpha,
255, 0, 0, alpha,
255, 0, 0, alpha,
255, 0, 0, alpha)))
class Module(object):
''' Single module of the telescope '''
def __init__(self, x, y, z):
detector_image = pyglet.image.load(os.path.join(script_dir, 'media', 'SC.png'))
detector = pyglet.sprite.Sprite(detector_image, x=x, y=y, subpixel=True)
detector.scale = 0.1
detector.rotation = 180
detector.x += detector.width / 2.
detector.y += detector.height / 2.
detector.z = z
self.detector = detector
self.hits = []
pix_idc = [(0, 0), (0, 335), (79, 0), (79, 336)]
for col, row in pix_idc:
x, y = pix_idx_to_pos(col, row, detector)
self.hits.append(Hit(x, y))
def add_hits(self, hits):
if not hits:
return False
added_hits = False
for i, (col, row) in enumerate(hits):
x, y = pix_idx_to_pos(col, row, self.detector)
# Do not add existing hits
for hit in self.hits:
if abs(x - hit.x - hit.dx) < 0.1 and abs(y - (hit.y - hit.dy)) < 0.1:
break
else:
if len(self.hits) < _MAX_HITS:
self.hits.append(Hit(x, y))
added_hits = True
elif i < _MAX_HITS:
self.hits.pop(0)
self.hits.append(Hit(x, y))
added_hits = True
else:
break
return added_hits
def update(self, dt):
for i in range(len(self.hits) - 1, -1, -1):
if not self.hits[i].update(dt):
del self.hits[i]
def draw(self):
glTranslatef(0., 0., self.detector.z)
self.detector.draw()
for hit in self.hits:
hit.draw()
glTranslatef(0., 0., -self.detector.z)
class Telescope(object):
''' Visualization of a pixel telesecope '''
def __init__(self, x=0, y=0, z=0):
self.rotation = 0 # telescope rotation
self.rot_speed = 20
self.modules = []
self.modules.append(Module(x, y, 0))
self.modules.append(Module(x, y, 40))
self.tracks = []
self.hit_sound = pyglet.media.load(os.path.join(script_dir, 'media', 'hit.wav'), streaming=False)
self.track_sound = pyglet.media.load(os.path.join(script_dir, 'media', 'track.wav'), streaming=False)
self.play_sounds = 0
def add_module_hits(self, module_hits):
has_hits = []
for i, one_module_hits in enumerate(module_hits):
if one_module_hits is not None:
has_hits.append(self.modules[i].add_hits(one_module_hits))
else:
has_hits.append(False)
if self.play_sounds > 1 and any(has_hits):
self.hit_sound.play()
# Print a track if all modules are hit in this readout
# Only print one track candidate if many tracks are possible
try:
if all(has_hits):
hit_1 = (self.modules[0].hits[-1].x, self.modules[0].hits[-1].y, self.modules[0].detector.z)
hit_2 = (self.modules[1].hits[-1].x, self.modules[1].hits[-1].y, self.modules[1].detector.z)
if len(self.tracks) < _MAX_TRACKS:
self.tracks.append(Track(hit_1, hit_2))
else:
self.tracks.pop(0)
self.tracks.append(Track(hit_1, hit_2))
glClearColor(0.95, 0.95, 0.95, 1)
def reset_background(_):
glClearColor(*_CLEAR_COLOR)
pyglet.clock.schedule_once(reset_background, 0.1)
if self.play_sounds:
self.track_sound.play()
except IndexError:
pass
def update(self, dt):
self.rotation += dt * self.rot_speed
if self.rotation > 360:
self.rotation -= 360
for m in self.modules:
m.update(dt)
for i in range(len(self.tracks) - 1, -1, -1):
if not self.tracks[i].update(dt):
del self.tracks[i]
def draw(self):
''' Called for every frame '''
glRotatef(self.rotation, 0, 0, 1) # rotate telescope
for m in self.modules:
m.draw()
for track in self.tracks:
track.draw()
glRotatef(-self.rotation, 0, 0, 1)
def reset(self):
self.tracks = []
for m in self.modules:
m.hits = []
def add_mc_track(self):
for m in self.modules:
m.add_hits([(random.randint(1, 80), random.randint(1, 336))])
hit_1 = (self.modules[0].hits[-1].x, self.modules[0].hits[-1].y, self.modules[0].detector.z)
hit_2 = (self.modules[1].hits[-1].x, self.modules[1].hits[-1].y, self.modules[1].detector.z)
self.tracks.append(Track(hit_1, hit_2))
class Camera(object):
''' 3d camera movements '''
def __init__(self, pos=[6, -120, 160], rot=[40, 0]):
self.init_pos = pos
self.init_rot = rot
self.pos = list(self.init_pos)
self.rot = list(self.init_rot)
def reset(self):
self.pos = list(self.init_pos)
self.rot = list(self.init_rot)
def mouse_motion(self, dx, dy):
dx /= 8
dy /= 8
self.rot[0] += dy
self.rot[1] -= dx
if self.rot[0] > 90:
self.rot[0] = 90
elif self.rot[0] < -90:
self.rot[0] = -90
def update(self, dt, keys):
s = dt * 10
rotY = -self.rot[1] / 180 * math.pi
dx, dz = s * math.sin(rotY), s * math.cos(rotY)
if keys[key.Q]:
self.pos[0] += dx
self.pos[2] -= dz
if keys[key.E]:
self.pos[0] -= dx
self.pos[2] += dz
if keys[key.A]:
self.pos[0] -= dz
self.pos[2] -= dx
if keys[key.D]:
self.pos[0] += dz
self.pos[2] += dx
if keys[key.S]:
self.pos[1] -= s
if keys[key.W]:
self.pos[1] += s
if keys[key.SPACE]:
self.reset()
class App(pyglet.window.Window):
''' 3d application window'''
def __init__(self, *args, **kwargs):
if sys.version_info[0] < 3:
super(App, self).__init__(*args, **kwargs)
else:
super().__init__(*args, **kwargs)
self.keys = key.KeyStateHandler()
self.push_handlers(self.keys)
pyglet.clock.schedule(self.update)
self.telescope = Telescope()
self.camera = Camera()
self.io = pybario.IO(addresses=['tcp://127.0.0.1:5678', 'tcp://127.0.0.1:5679'], max_hits=_MAX_HITS)
# Interface
self.fps = pyglet.window.FPSDisplay(window=self)
self.fps.label.font_size = 12
# Legend
self.text = pyglet.text.Label("Pixeltreffer", font_name="Arial", font_size=40, width=0.1 * self.width, x=self.width + 50, y=0.85*self.height,
anchor_x='left', anchor_y='center', color=(255, 0, 0, 220))
self.text_2 = pyglet.text.Label("Teilchenspuren", font_name="Arial", font_size=40, width=0.1 * self.width, x=self.width + 50, y=0.85*self.height - 100,
anchor_x='left', anchor_y='center', color=(0, 128, 187, 220))
self.logo = pyglet.sprite.Sprite(pyglet.image.load(os.path.join(script_dir, 'media', 'Silab.png')), x=self.width * 0.98, y=self.height * 0.98, subpixel=True)
self.logo.scale = 0.2
self.sound_logo = pyglet.sprite.Sprite(pyglet.image.load(os.path.join(script_dir, 'media', 'sound_off.png')), x=self.width * 0.98, y=self.height * 0.02, subpixel=True)
self.sound_logo.scale = 0.2
#self.sound_logo.x -= self.sound_logo.width
#self.sound_logo.y += self.sound_logo.height
self.logo.x = self.width * 0.98 - self.logo.width
self.logo.y = self.height * 0.98 - self.logo.height
self.text.x = self.width * 0.6
self.text_2.x = self.width * 0.6
self.sound_logo.x = self.width * 0.98 - self.sound_logo.width
self.sound_logo.y = self.sound_logo.height
# Options
self.show_logo = True
self.pause = False
self.n_ro = 0
self.mh = [None, None]
def push(self, pos, rot):
glPushMatrix()
glRotatef(-rot[0], 1, 0, 0)
glRotatef(-rot[1], 0, 1, 0)
glTranslatef(-pos[0], -pos[1], -pos[2],)
def Projection(self):
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
def Model(self):
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def set2d(self):
self.Projection()
gluOrtho2D(0, self.width, 0, self.height)
self.Model()
def set3d(self):
self.Projection()
gluPerspective(70, self.width / self.height, 0.05, 1000)
self.Model()
def setLock(self, state):
self.lock = state
self.set_exclusive_mouse(state)
lock = False
mouse_lock = property(lambda self: self.lock, setLock)
def on_mouse_motion(self, x, y, dx, dy):
if self.mouse_lock:
self.camera.mouse_motion(dx, dy)
def on_key_press(self, KEY, MOD):
if KEY == key.ESCAPE:
self.close()
elif KEY == key.M:
self.mouse_lock = not self.mouse_lock
elif KEY == key.PLUS:
self.telescope.rot_speed += 10
elif KEY == key.MINUS:
self.telescope.rot_speed -= 10
elif KEY == key.F or (KEY == key.ENTER and MOD == key.MOD_CTRL):
self.set_fullscreen(not self._fullscreen)
elif KEY == key.L:
self.logo.visible = not self.logo.visible
self.sound_logo.visible = self.logo.visible
elif KEY == key.X:
self.telescope.play_sounds += 1
if self.telescope.play_sounds > 2:
self.telescope.play_sounds = 0
if self.telescope.play_sounds:
self.sound_logo.image = pyglet.image.load(os.path.join(script_dir, 'media', 'sound.png') if self.telescope.play_sounds > 1 else os.path.join(script_dir, 'media', 'sound_silent.png'))
else:
self.sound_logo.image = pyglet.image.load(os.path.join(script_dir, 'media', 'sound_off.png'))
elif KEY == key.P:
self.pause = not self.pause
elif KEY == key.R:
self.telescope.reset()
elif KEY == key.SPACE:
self.telescope.add_mc_track()
def update(self, dt):
self.n_ro = self.n_ro + 1
mh = self.io.get_module_hits()
if not self.pause:
if mh[0]:
if self.mh[0] is None:
self.mh[0] = mh[0]
else:
self.mh[0].extend(mh[0])
if mh[1]:
if self.mh[1] is None:
self.mh[1] = mh[1]
else:
self.mh[1].extend(mh[1])
if self.n_ro >= _COMBINE_N_READOUTS:
self.n_ro = 0
self.telescope.add_module_hits(self.mh)
self.mh = [None, None]
self.camera.update(dt, self.keys)
self.telescope.update(dt)
def draw_legend(self):
glMatrixMode(gl.GL_MODELVIEW)
glPushMatrix()
glLoadIdentity()
glMatrixMode(GL_PROJECTION)
glPushMatrix()
glLoadIdentity()
glOrtho(0, self.width, 0, self.height, -1, 1)
self.logo.draw()
self.text.draw()
self.text_2.draw()
self.sound_logo.draw()
glPopMatrix()
glMatrixMode(GL_MODELVIEW)
glPopMatrix()
def on_draw(self):
self.clear()
self.set3d()
self.draw_legend()
self.fps.draw()
self.push(self.camera.pos, self.camera.rot)
self.telescope.draw()
glPopMatrix()
if __name__ == '__main__':
window = App(caption='Pixel detector model', resizable=True, fullscreen=True)
# 3d settings
glClearColor(*_CLEAR_COLOR)
glEnable(GL_DEPTH_TEST)
glEnable(GL_LINE_SMOOTH)
glHint(GL_LINE_SMOOTH_HINT, GL_DONT_CARE)
glLineWidth(5)
glEnable(GL_BLEND) # transparency
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) # transparency
glEnable(GL_CULL_FACE)
pyglet.app.run()
|
py | 7dfbec1f9b42d14c606bf3b910735a31cc3d40d5 | from pathlib import Path
import logging
from erebos import prep
if __name__ == "__main__":
logging.basicConfig(format="%(asctime)s %(levelno)s %(message)s", level="INFO")
calipso_dir = Path("/storage/projects/goes_alg/calipso/west/1km_cloud/")
goes_dir = Path("/storage/projects/goes_alg/goes_data/west/combined/")
save_dir = Path("/storage/projects/goes_alg/combined/west/daytime")
save_dir.mkdir(parents=True, exist_ok=True)
goes_glob = "*.nc"
calipso_glob = "*D_Sub*.hdf"
prep.combine_calipso_goes_files(
calipso_dir, goes_dir, save_dir, goes_glob, calipso_glob
)
|
py | 7dfbecb0fae1b720db8255e8c5542c1801ff9eaf | from functools import partial
from itertools import chain
import pathlib
import shutil
import jinja2
import markdown
from inkblot import converters
from inkblot.document import Document
from inkblot.document_loader import DocumentLoader
def generate(directory: pathlib.Path, config):
md = markdown.Markdown()
outputs = {}
supports = {}
source_dir = directory / config["source_dir"]
for f in source_dir.rglob(f"*.*"):
doc = Document(f, base=source_dir)
if any(part.startswith("_") for part in doc.path.parts):
supports[doc.path.as_posix()] = doc
else:
outputs[doc.path.as_posix()] = doc
support_loader = DocumentLoader(supports)
output_loader = DocumentLoader(outputs)
loader = jinja2.ChoiceLoader([support_loader, output_loader])
env = jinja2.Environment(
loader=loader, autoescape=jinja2.select_autoescape(["html", "xml"])
)
@converters.converter
def jinjafy(doc):
template = env.get_template(doc.path.as_posix())
try:
doc.body = template.render(doc.attributes)
except jinja2.exceptions.TemplateAssertionError:
print("FAILED:\n\n" + doc.body)
return doc
output_path = directory / config["build_dir"]
if output_path.exists():
shutil.rmtree(output_path)
output_path.mkdir()
config["conversions"].update(config["extra_conversions"])
for doc in outputs.values():
for name in config["conversions"].get(doc.suffix, []):
converters.conversions[name](doc)
path = output_path / doc.path.with_suffix(doc.suffix)
if not path.parent.exists():
path.parent.mkdir(parents=True)
if doc.attributes.get("binary", False):
path.write_bytes(doc.body)
else:
path.write_text(doc.body)
|
py | 7dfbed4afcb79b97564ecabb5073816ccbe0772a |
# https://github.com/implus/PytorchInsight/blob/master/classification/models/imagenet/resnet_sge.py
class SpatialGroupEnhance(nn.Module):
def __init__(self, groups=64):
super(SpatialGroupEnhance, self).__init__()
self.groups = groups # 组个数
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.weight = Parameter(torch.zeros(1, groups, 1, 1))
self.bias = Parameter(torch.ones(1, groups, 1, 1))
self.sig = nn.Sigmoid()
def forward(self, x): # (b, c, h, w)
b, c, h, w = x.size()
x = x.view(b * self.groups, -1, h, w)
xn = x * self.avg_pool(x) # 直接乘
# 归一化
xn = xn.sum(dim=1, keepdim=True)
t = xn.view(b * self.groups, -1)
t = t - t.mean(dim=1, keepdim=True)
std = t.std(dim=1, keepdim=True) + 1e-5
t = t / std
t = t.view(b, self.groups, h, w)
# 再学习一组参数
t = t * self.weight + self.bias
t = t.view(b * self.groups, 1, h, w)
# 激活
x = x * self.sig(t)
x = x.view(b, c, h, w)
return x
|
py | 7dfbed6ccd43f551c39a9d6d1fe14ef3b860235d | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
from matplotlib.font_manager import FontProperties
from matplotlib.patches import Patch
import pandas as pd
def make_bar(data, por, title, name, file_name, drop_df=False, range=False, ylabel=None, xlabel=None):
df = pd.DataFrame({
'Label': data,
'Value': por
})
if drop_df:
df = df.drop(labels=0, axis=0)
fig, ax = plt.subplots()
pps = ax.bar('Label', 'Value', data=df, color=['tab:blue', 'tab:cyan', 'tab:orange', 'tab:red', 'tab:gray', 'tab:green', 'tab:purple', 'tab:pink', 'm', 'tab:olive','tab:brown', 'mediumseagreen','k','#c6fc03'])
if ylabel is not None:
plt.ylabel(ylabel)
if xlabel is not None:
plt.xlabel(xlabel)
plt.tick_params(
axis='x',
which='both',
bottom=False,
top=False,
labelbottom=False)
plt.title(title)
fontP = FontProperties()
fontP.set_size('small')
cmap = dict(zip(df['Label'].tolist(), ['tab:blue', 'tab:cyan', 'tab:orange', 'tab:red', 'tab:gray', 'tab:green', 'tab:purple', 'tab:pink', 'm', 'tab:olive','tab:brown', 'mediumseagreen','k','#c6fc03']))
patches = [Patch(color=v, label=k) for k, v in cmap.items()]
if not range:
ax.yaxis.set_major_formatter(mtick.PercentFormatter())
for p in pps:
height = p.get_height()
ax.text(x=p.get_x() + p.get_width() / 2, y=height+.10,
s="{0:.1f}%".format(height),
ha='center')
else:
plt.ylim([0,5])
for p in pps:
height = p.get_height()
ax.text(x=p.get_x() + p.get_width() / 2, y=height+.10,
s="{0:.1f}".format(height),
ha='center')
plt.legend(labels=df['Label'].tolist(), handles=patches, title=name, bbox_to_anchor=(1.05, 1), loc='upper left', prop=fontP)
plt.savefig(file_name, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', format=None,
transparent=False, bbox_inches='tight', pad_inches=0.1,
metadata=None)
plt.clf()
plt.close()
|
py | 7dfbedfb7f7b020fe3f709ab7782b6459842fdd1 | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import os
import uuid
import edgedb
from edb.testbase import http as tb
from edb.tools import test
class TestGraphQLFunctional(tb.GraphQLTestCase):
SCHEMA_DEFAULT = os.path.join(os.path.dirname(__file__), 'schemas',
'graphql.esdl')
SCHEMA_OTHER = os.path.join(os.path.dirname(__file__), 'schemas',
'graphql_other.esdl')
SETUP = os.path.join(os.path.dirname(__file__), 'schemas',
'graphql_setup.edgeql')
# GraphQL queries cannot run in a transaction
TRANSACTION_ISOLATION = False
def test_graphql_http_keepalive_01(self):
with self.http_con() as con:
for _ in range(3):
req1_data = {
'query': '''
{
Setting(order: {value: {dir: ASC}}) {
value
}
}
'''
}
data, headers, status = self.http_con_request(con, req1_data)
self.assertEqual(status, 200)
self.assertNotIn('connection', headers)
self.assertEqual(
headers.get('content-type'),
'application/json')
self.assertEqual(
json.loads(data)['data'],
{'Setting': [{'value': 'blue'}, {'value': 'full'},
{'value': 'none'}]})
req2_data = {
'query': '''
{
NON_EXISTING_TYPE {
name
}
}
'''
}
data, headers, status = self.http_con_request(con, req2_data)
self.assertEqual(status, 200)
self.assertNotIn('connection', headers)
self.assertEqual(
headers.get('content-type'),
'application/json')
self.assertIn(
'QueryError:',
json.loads(data)['errors'][0]['message'])
def test_graphql_http_errors_01(self):
with self.http_con() as con:
data, headers, status = self.http_con_request(
con, {}, path='non-existant')
self.assertEqual(status, 404)
self.assertEqual(headers['connection'], 'close')
self.assertIn(b'Unknown path', data)
with self.assertRaises(OSError):
self.http_con_request(con, {}, path='non-existant2')
def test_graphql_http_errors_02(self):
with self.http_con() as con:
data, headers, status = self.http_con_request(con, {})
self.assertEqual(status, 400)
self.assertEqual(headers['connection'], 'close')
self.assertIn(b'query is missing', data)
with self.assertRaises(OSError):
self.http_con_request(con, {}, path='non-existant')
def test_graphql_http_errors_03(self):
with self.http_con() as con:
data, headers, status = self.http_con_request(
con, {'query': 'blah', 'variables': 'bazz'})
self.assertEqual(status, 400)
self.assertEqual(headers['connection'], 'close')
self.assertIn(b'must be a JSON object', data)
with self.assertRaises(OSError):
self.http_con_request(con, {}, path='non-existant')
def test_graphql_http_errors_04(self):
with self.http_con() as con:
con.send(b'blah\r\n\r\n\r\n\r\n')
data, headers, status = self.http_con_request(
con, {'query': 'blah', 'variables': 'bazz'})
self.assertEqual(status, 400)
self.assertEqual(headers['connection'], 'close')
self.assertIn(b'HttpParserInvalidMethodError', data)
with self.assertRaises(OSError):
self.http_con_request(con, {}, path='non-existant')
def test_graphql_functional_query_01(self):
for _ in range(10): # repeat to test prepared pgcon statements
self.assert_graphql_query_result(r"""
query {
Setting {
name
value
}
}
""", {
'Setting': [{
'name': 'template',
'value': 'blue',
}, {
'name': 'perks',
'value': 'full',
}, {
'name': 'template',
'value': 'none',
}],
}, sort=lambda x: x['value'])
def test_graphql_functional_query_02(self):
self.assert_graphql_query_result(r"""
query {
User(order: {name: {dir: ASC}}) {
name
age
groups {
id
name
}
}
}
""", {
'User': [{
'name': 'Alice',
'age': 27,
'groups': []
}, {
'name': 'Bob',
'age': 21,
'groups': []
}, {
'name': 'Jane',
'age': 25,
'groups': [{
'id': uuid.UUID,
'name': 'upgraded',
}]
}, {
'name': 'John',
'age': 25,
'groups': [{
'id': uuid.UUID,
'name': 'basic',
}]
}],
})
def test_graphql_functional_query_03(self):
self.assert_graphql_query_result(r"""
query mixed {
User {
name
}
Setting {
name
}
}
""", {
'User': [{
'name': 'Alice',
}, {
'name': 'Bob',
}, {
'name': 'Jane',
}, {
'name': 'John',
}],
'Setting': [{
'name': 'perks',
}, {
'name': 'template',
}, {
'name': 'template',
}],
}, sort=lambda x: x['name'])
def test_graphql_functional_query_04(self):
self.assert_graphql_query_result(r"""
query {
User(filter: {name: {eq: "John"}}) {
name
age
groups {
id
name
}
}
}
""", {
'User': [{
'name': 'John',
'age': 25,
'groups': [{
'id': uuid.UUID,
'name': 'basic',
}]
}],
})
def test_graphql_functional_query_05(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"Cannot query field 'Bogus' on type 'Query'",
_line=3, _col=21):
self.graphql_query(r"""
query {
Bogus {
name,
groups {
id
name
}
}
}
""")
def test_graphql_functional_query_06(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"Cannot query field 'bogus' on type 'User'",
_line=5, _col=25):
self.graphql_query(r"""
query {
User {
name,
bogus,
groups {
id
name
}
}
}
""")
def test_graphql_functional_query_07(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"Cannot query field 'age' on type 'NamedObject'",
_line=5, _col=25):
self.graphql_query(r"""
query {
NamedObject {
name,
age,
groups {
id
name
}
}
}
""")
def test_graphql_functional_query_08(self):
self.assert_graphql_query_result(
r"""
query names {
Setting {
name
}
}
query values {
Setting {
value
}
}
""",
{
'Setting': [{
'name': 'perks',
}, {
'name': 'template',
}, {
'name': 'template',
}],
},
sort=lambda x: x['name'],
operation_name='names'
)
self.assert_graphql_query_result(
r"""
query names {
Setting {
name
}
}
query values {
Setting {
value
}
}
""",
{
'Setting': [{
'value': 'blue',
}, {
'value': 'full',
}, {
'value': 'none',
}],
},
sort=lambda x: x['value'],
operation_name='values',
use_http_post=False
)
def test_graphql_functional_query_09(self):
with self.assertRaisesRegex(edgedb.QueryError,
r'provide operation name'):
self.graphql_query('''
query names {
Setting {
name
}
}
query values {
Setting {
value
}
}
''')
def test_graphql_functional_query_10(self):
with self.assertRaisesRegex(edgedb.QueryError,
r'unknown operation named "foo"'):
self.graphql_query('''
query names {
Setting {
name
}
}
query values {
Setting {
value
}
}
''', operation_name='foo')
def test_graphql_functional_query_11(self):
# Test that parse error marshal from the compiler correctly.
with self.assertRaisesRegex(edgedb.QueryError,
r"Expected Name, found '}'",
_line=4, _col=21):
self.graphql_query(r"""
query {
Setting {
}
}
""")
def test_graphql_functional_query_12(self):
# Regression test: variables names were shadowing query names.
self.assert_graphql_query_result(
r"""
query users($name: String, $age: Int64) {
User(filter: {or: [{name: {eq: $name}},
{age: {gt: $age}}]},
order: {name: {dir: ASC}})
{
name
age
}
}
query settings {
Setting {
name
}
}
""",
{
'User': [{
'name': 'Alice',
'age': 27
}],
},
variables={'age': 25, 'name': 'Alice'},
operation_name='users'
)
def test_graphql_functional_query_13(self):
# Test special case errors.
with self.assertRaisesRegex(
edgedb.QueryError,
r"Cannot query field 'gibberish' on type 'Query'\. "
r"There's no corresponding type or alias \"gibberish\" "
r"exposed in EdgeDB\. Please check the configuration settings "
r"for this port to make sure that you're connecting to the "
r"right database\.",
_line=3, _col=21):
self.graphql_query(r"""
query {
gibberish
}
""")
def test_graphql_functional_query_14(self):
# Test special case errors.
with self.assertRaisesRegex(
edgedb.QueryError,
r"Cannot query field 'more__gibberish' on type 'Query'\. "
r"There's no corresponding type or alias \"more::gibberish\" "
r"exposed in EdgeDB\. Please check the configuration settings "
r"for this port to make sure that you're connecting to the "
r"right database\.",
_line=3, _col=21):
self.graphql_query(r"""
query {
more__gibberish
}
""")
def test_graphql_functional_query_15(self):
# Test special case errors.
with self.assertRaisesRegex(
edgedb.QueryError,
r"Cannot query field 'Uxer' on type 'Query'\. "
r"Did you mean 'User'\?",
_line=3, _col=21):
self.graphql_query(r"""
query {
Uxer
}
""")
def test_graphql_functional_query_16(self):
# test filtering by nested object
self.assert_graphql_query_result(r"""
query {
User(filter: {groups: {name: {eq: "basic"}}}) {
name
age
groups {
id
name
}
}
}
""", {
'User': [{
'name': 'John',
'age': 25,
'groups': [{
'id': uuid.UUID,
'name': 'basic',
}]
}],
})
def test_graphql_functional_query_17(self):
# Test unused & null variables
self.assert_graphql_query_result(
r"""
query Person {
Person
{
name
}
}
""",
{
'Person': [{
'name': 'Bob',
}],
},
variables={'name': None},
)
self.assert_graphql_query_result(
r"""
query Person($name: String) {
Person(filter: {name: {eq: $name}})
{
name
}
}
""",
{
'Person': [],
},
variables={'name': None},
)
def test_graphql_functional_query_18(self):
# test filtering by nested object
self.assert_graphql_query_result(r"""
query {
User(filter: {name: {eq: "Alice"}}) {
name
favorites(order: {name: {dir: ASC}}) {
name
}
}
}
""", {
'User': [{
'name': 'Alice',
'favorites': [
{'name': 'basic'},
{'name': 'perks'},
{'name': 'template'},
{'name': 'template'},
{'name': 'unused'},
{'name': 'upgraded'},
]
}],
})
def test_graphql_functional_query_19(self):
# Test built-in object types, by making sure we can query them
# and get some results.
res = self.graphql_query(r"""
{
Object {id}
}
""")
self.assertTrue(len(res) > 0,
'querying "Object" returned no results')
def test_graphql_functional_query_20(self):
# Test built-in object types, by making sure we can query them
# and get some results.
res = self.graphql_query(r"""
{
BaseObject {id}
}
""")
self.assertTrue(len(res) > 0,
'querying "BaseObject" returned no results')
def test_graphql_functional_alias_01(self):
self.assert_graphql_query_result(
r"""
{
SettingAlias {
__typename
name
value
}
Setting {
__typename
name
value
}
}
""",
{
"SettingAlias": [
{
"__typename": "SettingAlias",
"name": "template",
"value": "blue",
},
{
"__typename": "SettingAlias",
"name": "perks",
"value": "full",
},
{
"__typename": "SettingAlias",
"name": "template",
"value": "none",
},
],
"Setting": [
{
"__typename": "Setting_Type",
"name": "template",
"value": "blue",
},
{
"__typename": "Setting_Type",
"name": "perks",
"value": "full",
},
{
"__typename": "Setting_Type",
"name": "template",
"value": "none",
},
],
},
sort=lambda x: x['value']
)
def test_graphql_functional_alias_02(self):
self.assert_graphql_query_result(
r"""
{
SettingAlias {
__typename
name
value
of_group {
__typename
name
}
}
}
""",
{
"SettingAlias": [
{
"__typename": "SettingAlias",
"name": "template",
"value": "blue",
"of_group": {
"__typename": "UserGroup_Type",
"name": "upgraded",
}
},
{
"__typename": "SettingAlias",
"name": "perks",
"value": "full",
"of_group": {
"__typename": "UserGroup_Type",
"name": "upgraded",
}
},
{
"__typename": "SettingAlias",
"name": "template",
"value": "none",
"of_group": {
"__typename": "UserGroup_Type",
"name": "unused",
}
},
],
},
sort=lambda x: x['value']
)
def test_graphql_functional_alias_03(self):
self.assert_graphql_query_result(
r"""
{
SettingAliasAugmented {
__typename
name
value
of_group {
__typename
name
name_upper
}
}
}
""",
{
"SettingAliasAugmented": [
{
"__typename": "SettingAliasAugmented",
"name": "template",
"value": "blue",
"of_group": {
"__typename":
"__SettingAliasAugmented__of_group",
"name": "upgraded",
"name_upper": "UPGRADED",
}
},
{
"__typename": "SettingAliasAugmented",
"name": "perks",
"value": "full",
"of_group": {
"__typename":
"__SettingAliasAugmented__of_group",
"name": "upgraded",
"name_upper": "UPGRADED",
}
},
{
"__typename": "SettingAliasAugmented",
"name": "template",
"value": "none",
"of_group": {
"__typename":
"__SettingAliasAugmented__of_group",
"name": "unused",
"name_upper": "UNUSED",
}
},
],
},
sort=lambda x: x['value']
)
def test_graphql_functional_alias_04(self):
self.assert_graphql_query_result(
r"""
{
ProfileAlias {
__typename
name
value
owner {
__typename
id
}
}
}
""",
{
"ProfileAlias": [
{
"__typename": "ProfileAlias",
"name": "Alice profile",
"value": "special",
"owner": [
{
"__typename": "User_Type",
"id": uuid.UUID,
}
]
},
{
"__typename": "ProfileAlias",
"name": "Bob profile",
"value": "special",
"owner": [
{
"__typename": "Person_Type",
"id": uuid.UUID,
}
]
}
]
},
)
result = self.graphql_query(r"""
query {
ProfileAlias {
owner {
id
}
}
}
""")
user_id = result['ProfileAlias'][0]['owner'][0]['id']
self.assert_graphql_query_result(f"""
query {{
User(filter: {{id: {{eq: "{user_id}"}}}}) {{
name
}}
}}
""", {
'User': [{'name': 'Alice'}]
})
def test_graphql_functional_alias_05(self):
self.assert_graphql_query_result(
r"""
{
SettingAliasAugmented(
filter: {of_group: {name_upper: {eq: "UPGRADED"}}}
) {
name
of_group {
name
name_upper
}
}
}
""",
{
"SettingAliasAugmented": [
{
"name": "perks",
"of_group": {
"name": "upgraded",
"name_upper": "UPGRADED",
}
},
{
"name": "template",
"of_group": {
"name": "upgraded",
"name_upper": "UPGRADED",
}
},
],
},
sort=lambda x: x['name']
)
def test_graphql_functional_alias_06(self):
self.assert_graphql_query_result(
r"""
{
SettingAliasAugmented(
filter: {name: {eq: "perks"}}
) {
name
of_group(
filter: {name_upper: {gt: "U"}}
) {
name
name_upper
}
}
}
""",
{
"SettingAliasAugmented": [
{
"name": "perks",
"of_group": {
"name": "upgraded",
"name_upper": "UPGRADED",
}
},
],
},
)
def test_graphql_functional_arguments_01(self):
result = self.graphql_query(r"""
query {
User {
id
name
age
}
}
""")
alice = [res for res in result['User']
if res['name'] == 'Alice'][0]
self.assert_graphql_query_result(f"""
query {{
User(filter: {{id: {{eq: "{alice['id']}"}}}}) {{
id
name
age
}}
}}
""", {
'User': [alice]
})
def test_graphql_functional_arguments_02(self):
self.assert_graphql_query_result(r"""
query {
User(filter: {
name: {eq: "Bob"},
active: {eq: true},
age: {eq: 21}
}) {
name
age
groups {
id
name
}
}
}
""", {
'User': [{
'name': 'Bob',
'age': 21,
'groups': [],
}],
})
def test_graphql_functional_arguments_03(self):
self.assert_graphql_query_result(r"""
query {
User(filter: {
and: [{name: {eq: "Bob"}}, {active: {eq: true}}],
age: {eq: 21}
}) {
name
score
}
}
""", {
'User': [{
'name': 'Bob',
'score': 4.2,
}],
})
def test_graphql_functional_arguments_04(self):
self.assert_graphql_query_result(r"""
query {
User(filter: {
not: {name: {eq: "Bob"}},
age: {eq: 21}
}) {
name
score
}
}
""", {
'User': [],
})
def test_graphql_functional_arguments_05(self):
self.assert_graphql_query_result(r"""
query {
User(
filter: {
or: [
{not: {name: {eq: "Bob"}}},
{age: {eq: 20}}
]
},
order: {name: {dir: ASC}}
) {
name
score
}
}
""", {
'User': [
{'name': 'Alice', 'score': 5},
{'name': 'Jane', 'score': 1.23},
{'name': 'John', 'score': 3.14},
],
})
def test_graphql_functional_arguments_06(self):
self.assert_graphql_query_result(r"""
query {
User(
filter: {
or: [
{name: {neq: "Bob"}},
{age: {eq: 20}}
]
},
order: {name: {dir: ASC}}
) {
name
score
}
}
""", {
'User': [
{'name': 'Alice', 'score': 5},
{'name': 'Jane', 'score': 1.23},
{'name': 'John', 'score': 3.14},
],
})
def test_graphql_functional_arguments_07(self):
self.assert_graphql_query_result(r"""
query {
User(filter: {
name: {ilike: "%o%"},
age: {gt: 22}
}) {
name
age
}
}
""", {
'User': [
{'name': 'John', 'age': 25},
],
}, sort=lambda x: x['name'])
def test_graphql_functional_arguments_08(self):
self.assert_graphql_query_result(r"""
query {
User(filter: {
name: {like: "J%"},
score: {
gte: 3
lt: 4.5
}
}) {
name
score
}
}
""", {
'User': [
{'name': 'John', 'score': 3.14},
],
}, sort=lambda x: x['name'])
def test_graphql_functional_arguments_09(self):
self.assert_graphql_query_result(r"""
query {
User(filter: {
name: {ilike: "%e"},
age: {lte: 25}
}) {
name
age
}
}
""", {
'User': [
{'name': 'Jane', 'age': 25},
],
}, sort=lambda x: x['name'])
def test_graphql_functional_arguments_10(self):
self.assert_graphql_query_result(r"""
query {
User(
order: {
age: {dir: DESC}
name: {dir: ASC}
}
) {
name
age
}
}
""", {
'User': [
{'age': 27, 'name': 'Alice'},
{'age': 25, 'name': 'Jane'},
{'age': 25, 'name': 'John'},
{'age': 21, 'name': 'Bob'},
],
})
def test_graphql_functional_arguments_11(self):
self.assert_graphql_query_result(r"""
query {
User(
order: {
name: {dir: ASC}
age: {dir: DESC}
}
) {
name
age
}
}
""", {
'User': [
{'age': 27, 'name': 'Alice'},
{'age': 21, 'name': 'Bob'},
{'age': 25, 'name': 'Jane'},
{'age': 25, 'name': 'John'},
],
})
def test_graphql_functional_arguments_12(self):
self.assert_graphql_query_result(r"""
query {
other__Foo(
order: {
select: {dir: ASC, nulls: BIGGEST}
}
) {
after
select
}
}
""", {
'other__Foo': [
{'after': None, 'select': 'a'},
{'after': 'w', 'select': 'b'},
{'after': 'q', 'select': None},
],
})
def test_graphql_functional_arguments_13(self):
self.assert_graphql_query_result(r"""
query {
other__Foo(
order: {
select: {dir: DESC, nulls: SMALLEST}
}
) {
after
select
}
}
""", {
'other__Foo': [
{'after': 'w', 'select': 'b'},
{'after': None, 'select': 'a'},
{'after': 'q', 'select': None},
],
})
def test_graphql_functional_arguments_14(self):
self.assert_graphql_query_result(r"""
query {
User(
order: {name: {dir: ASC}},
first: 2
) {
name
age
}
}
""", {
'User': [
{'age': 27, 'name': 'Alice'},
{'age': 21, 'name': 'Bob'},
],
})
def test_graphql_functional_arguments_15(self):
self.assert_graphql_query_result(r"""
query {
u0: User(
order: {name: {dir: ASC}},
after: "0",
first: 2
) {
name
}
u1: User(
order: {name: {dir: ASC}},
first: 2
) {
name
}
u2: User(
order: {name: {dir: ASC}},
after: "0",
before: "2"
) {
name
}
u3: User(
order: {name: {dir: ASC}},
before: "2",
last: 1
) {
name
}
}
""", {
'u0': [
{'name': 'Bob'},
{'name': 'Jane'},
],
'u1': [
{'name': 'Alice'},
{'name': 'Bob'},
],
'u2': [
{'name': 'Bob'},
],
'u3': [
{'name': 'Bob'},
],
})
@test.xfail('''
'last' is not fully implemented in all cases and ideally
requires negative OFFSET to be implemented
''')
def test_graphql_functional_arguments_16(self):
self.assert_graphql_query_result(r"""
query {
u4: User(
order: {name: {dir: ASC}},
after: "2",
last: 2
) {
name
}
u5: User(
order: {name: {dir: ASC}},
after: "0",
last: 2
) {
name
}
u6: User(
order: {name: {dir: ASC}},
after: "0",
before: "3",
first: 2,
last: 1
) {
name
}
}
""", {
'u4': [
{'name': 'John'},
],
'u5': [
{'name': 'Jane'},
{'name': 'John'},
],
'u6': [
{'name': 'Jane'},
],
})
def test_graphql_functional_arguments_17(self):
self.assert_graphql_query_result(r"""
query {
User(filter: {name: {eq: "Jane"}}) {
name
groups {
name
settings(
order: {name: {dir: ASC}},
first: 1
) {
name
}
}
}
}
""", {
'User': [{
'name': 'Jane',
'groups': [{
'name': 'upgraded',
'settings': [{
'name': 'perks'
}]
}]
}]
})
def test_graphql_functional_arguments_18(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'Expected type String, found 42',
_line=3, _col=46):
self.graphql_query(r"""
query {
User(filter: {name: {eq: 42}}) {
id,
}
}
""")
def test_graphql_functional_arguments_19(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'Expected type String, found 20\.5',
_line=3, _col=46):
self.graphql_query(r"""
query {
User(filter: {name: {eq: 20.5}}) {
id,
}
}
""")
def test_graphql_functional_arguments_20(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'Expected type Float, found "3\.5"',
_line=3, _col=47):
self.graphql_query(r"""
query {
User(filter: {score: {eq: "3.5"}}) {
id,
}
}
""")
def test_graphql_functional_arguments_21(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'Expected type Boolean, found 0',
_line=3, _col=48):
self.graphql_query(r"""
query {
User(filter: {active: {eq: 0}}) {
id,
}
}
""")
def test_graphql_functional_arguments_22(self):
with self.assertRaisesRegex(
edgedb.QueryError,
# this error message is subpar, but this is what we get
# from postgres, because we transfer bigint values to postgres
# as strings
r'invalid input syntax for type bigint: "aaaaa"',
# _line=5, _col=32,
):
self.graphql_query(r"""
query {
u0: User(
order: {name: {dir: ASC}},
after: "aaaaa",
first: 2
) {
name
}
}
""")
def test_graphql_functional_arguments_23(self):
self.assert_graphql_query_result(r"""
query {
User(
order: {name: {dir: ASC}},
first: 1
) {
name
}
}
""", {
'User': [{
'name': 'Alice',
}]
})
def test_graphql_functional_enums_01(self):
self.assert_graphql_query_result(r"""
query {
other__Foo(
order: {color: {dir: DESC}},
first: 1
) {
select
color
}
}
""", {
'other__Foo': [{
'select': None,
'color': "BLUE",
}]
})
def test_graphql_functional_enums_02(self):
self.assert_graphql_query_result(r"""
query {
other__Foo(
order: {color: {dir: ASC}},
after: "0"
) {
select
color
}
}
""", {
"other__Foo": [{
"select": "b",
"color": "GREEN",
}, {
"select": None,
"color": "BLUE",
}]
})
def test_graphql_functional_enums_03(self):
self.assert_graphql_query_result(r"""
query {
other__Foo(
filter: {color: {eq: RED}},
) {
select
color
}
}
""", {
"other__Foo": [{
"select": "a",
"color": "RED",
}]
})
def test_graphql_functional_enums_04(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'String cannot represent a non string value: admin',
_line=4, _col=51):
self.graphql_query(r"""
query {
# enum supplied instead of a string
UserGroup(filter: {name: {eq: admin}}) {
id,
name,
}
}
""")
def test_graphql_functional_fragment_01(self):
self.assert_graphql_query_result(r"""
fragment groupFrag on UserGroup {
id
name
}
query {
User(filter: {name: {eq: "Jane"}}) {
name,
groups {
... groupFrag
}
}
}
""", {
'User': [{
'name': 'Jane',
'groups': [{
'id': uuid.UUID,
'name': 'upgraded',
}]
}],
})
def test_graphql_functional_fragment_02(self):
self.assert_graphql_query_result(r"""
fragment userFrag1 on User {
name
... userFrag2
}
fragment userFrag2 on User {
groups {
... groupFrag
}
}
fragment groupFrag on UserGroup {
id
name
}
query {
User(filter: {name: {eq: "Jane"}}) {
... userFrag1
}
}
""", {
'User': [{
'name': 'Jane',
'groups': [{
'id': uuid.UUID,
'name': 'upgraded',
}]
}],
})
def test_graphql_functional_fragment_03(self):
self.assert_graphql_query_result(r"""
fragment userFrag2 on User {
groups {
... groupFrag
}
}
fragment groupFrag on UserGroup {
id
name
}
query {
User(filter: {name: {eq: "Jane"}}) {
... on User {
name
... userFrag2
}
}
}
""", {
'User': [{
'name': 'Jane',
'groups': [{
'id': uuid.UUID,
'name': 'upgraded',
}]
}],
})
def test_graphql_functional_fragment_04(self):
self.assert_graphql_query_result(r"""
fragment userFrag1 on User {
name
... {
groups {
... groupFrag
}
}
}
fragment groupFrag on UserGroup {
id
name
}
query {
User(filter: {name: {eq: "Jane"}}) {
... userFrag1
}
}
""", {
'User': [{
'name': 'Jane',
'groups': [{
'id': uuid.UUID,
'name': 'upgraded',
}]
}],
})
def test_graphql_functional_fragment_type_01(self):
self.assert_graphql_query_result(r"""
fragment userFrag on User {
id,
name,
}
query {
User(filter: {name: {eq: "Alice"}}) {
... userFrag
}
}
""", {
'User': [{
'id': uuid.UUID,
'name': 'Alice',
}],
})
def test_graphql_functional_fragment_type_02(self):
self.assert_graphql_query_result(r"""
fragment namedFrag on NamedObject {
id,
name,
}
query {
User(filter: {name: {eq: "Alice"}}) {
... namedFrag
}
}
""", {
'User': [{
'id': uuid.UUID,
'name': 'Alice',
}],
})
def test_graphql_functional_fragment_type_03(self):
self.assert_graphql_query_result(r"""
fragment namedFrag on NamedObject {
id,
name,
}
fragment userFrag on User {
... namedFrag
age
}
query {
User(filter: {name: {eq: "Alice"}}) {
... userFrag
}
}
""", {
'User': [{
'id': uuid.UUID,
'name': 'Alice',
'age': 27,
}],
})
def test_graphql_functional_fragment_type_04(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"Fragment 'userFrag' cannot be spread here "
r"as objects of type 'UserGroup' can never be of type 'User'.",
_line=9, _col=25):
self.graphql_query(r"""
fragment userFrag on User {
id,
name,
}
query {
UserGroup {
... userFrag
}
}
""")
def test_graphql_functional_fragment_type_05(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"Fragment 'userFrag' cannot be spread here "
r"as objects of type 'UserGroup' can never be of type 'User'.",
_line=8, _col=21):
self.graphql_query(r"""
fragment userFrag on User {
id,
name,
}
fragment groupFrag on UserGroup {
... userFrag
}
query {
User {
... userFrag
groups {
... groupFrag
}
}
}
""")
def test_graphql_functional_fragment_type_06(self):
self.assert_graphql_query_result(r"""
fragment userFrag on User {
age
score
}
query {
NamedObject {
name
... userFrag
}
}
""", {
"NamedObject": [
{"age": None, "name": "1st", "score": None},
{"age": None, "name": "2nd", "score": None},
{"age": None, "name": "3rd", "score": None},
{"age": None, "name": "4th", "score": None},
{"age": 27, "name": "Alice", "score": 5},
{"age": None, "name": "Alice profile", "score": None},
{"age": 21, "name": "Bob", "score": 4.2},
{"age": None, "name": "Bob profile", "score": None},
{"age": 25, "name": "Jane", "score": 1.23},
{"age": 25, "name": "John", "score": 3.14},
{"age": None, "name": "basic", "score": None},
{"age": None, "name": "perks", "score": None},
{"age": None, "name": "template", "score": None},
{"age": None, "name": "template", "score": None},
{"age": None, "name": "unused", "score": None},
{"age": None, "name": "upgraded", "score": None},
]
}, sort=lambda x: x['name'])
def test_graphql_functional_fragment_type_07(self):
self.assert_graphql_query_result(r"""
fragment frag on NamedObject {
id,
name,
}
query {
NamedObject {
... frag
}
}
""", {
"NamedObject": [
{"id": uuid.UUID, "name": "1st"},
{"id": uuid.UUID, "name": "2nd"},
{"id": uuid.UUID, "name": "3rd"},
{"id": uuid.UUID, "name": "4th"},
{"id": uuid.UUID, "name": "Alice"},
{"id": uuid.UUID, "name": "Alice profile"},
{"id": uuid.UUID, "name": "Bob"},
{"id": uuid.UUID, "name": "Bob profile"},
{"id": uuid.UUID, "name": "Jane"},
{"id": uuid.UUID, "name": "John"},
{"id": uuid.UUID, "name": "basic"},
{"id": uuid.UUID, "name": "perks"},
{"id": uuid.UUID, "name": "template"},
{"id": uuid.UUID, "name": "template"},
{"id": uuid.UUID, "name": "unused"},
{"id": uuid.UUID, "name": "upgraded"},
]
}, sort=lambda x: x['name'])
def test_graphql_functional_fragment_type_08(self):
with self.assertRaisesRegex(
edgedb.QueryError,
"Cannot query field 'age' on type 'NamedObject'",
_line=5, _col=21):
self.graphql_query(r"""
fragment frag on NamedObject {
id,
name,
age,
}
query {
User {
... frag
}
}
""")
def test_graphql_functional_fragment_type_09(self):
with self.assertRaisesRegex(
edgedb.QueryError,
"Cannot query field 'age' on type 'NamedObject'",
_line=7, _col=29):
self.graphql_query(r"""
query {
User {
... on NamedObject {
id,
name,
age,
}
}
}
""")
def test_graphql_functional_fragment_type_10(self):
self.assert_graphql_query_result(r"""
fragment namedFrag on NamedObject {
id,
name,
... userFrag
}
fragment userFrag on User {
age
}
query {
NamedObject {
... namedFrag
}
}
""", {
"NamedObject": [
{"id": uuid.UUID, "name": "1st", "age": None},
{"id": uuid.UUID, "name": "2nd", "age": None},
{"id": uuid.UUID, "name": "3rd", "age": None},
{"id": uuid.UUID, "name": "4th", "age": None},
{"id": uuid.UUID, "name": "Alice", "age": 27},
{"id": uuid.UUID, "name": "Alice profile", "age": None},
{"id": uuid.UUID, "name": "Bob", "age": 21},
{"id": uuid.UUID, "name": "Bob profile", "age": None},
{"id": uuid.UUID, "name": "Jane", "age": 25},
{"id": uuid.UUID, "name": "John", "age": 25},
{"id": uuid.UUID, "name": "basic", "age": None},
{"id": uuid.UUID, "name": "perks", "age": None},
{"id": uuid.UUID, "name": "template", "age": None},
{"id": uuid.UUID, "name": "template", "age": None},
{"id": uuid.UUID, "name": "unused", "age": None},
{"id": uuid.UUID, "name": "upgraded", "age": None},
]
}, sort=lambda x: x['name'])
def test_graphql_functional_fragment_type_11(self):
self.assert_graphql_query_result(r"""
fragment namedFrag on NamedObject {
id,
name,
... userFrag
}
fragment userFrag on User {
age
}
query {
User {
... namedFrag
}
}
""", {
"User": [
{"id": uuid.UUID, "name": "Alice", "age": 27},
{"id": uuid.UUID, "name": "Bob", "age": 21},
{"id": uuid.UUID, "name": "Jane", "age": 25},
{"id": uuid.UUID, "name": "John", "age": 25},
]
}, sort=lambda x: x['name'])
def test_graphql_functional_fragment_type_12(self):
self.assert_graphql_query_result(r"""
query {
NamedObject(order: {name: {dir: ASC}}) {
... on User {
age
}
}
}
""", {
"NamedObject": [
{"age": None},
{"age": None},
{"age": None},
{"age": None},
{"age": 27},
{"age": None},
{"age": 21},
{"age": None},
{"age": 25},
{"age": 25},
{"age": None},
{"age": None},
{"age": None},
{"age": None},
{"age": None},
{"age": None},
]
})
def test_graphql_functional_fragment_type_13(self):
# ISSUE #1800
#
# After using a typed inline fragment the nested fields or
# fields following after the fragment are erroneously using
# the type intersection.
self.assert_graphql_query_result(r"""
query {
NamedObject(filter: {name: {eq: "Alice"}}) {
... on User {
active
profile {
value
}
}
name
}
}
""", {
'NamedObject': [{
'name': 'Alice',
'active': True,
'profile': {
'value': 'special',
}
}],
})
def test_graphql_functional_fragment_type_14(self):
# ISSUE #1800
#
# After using a typed inline fragment the nested fields or
# fields following after the fragment are erroneously using
# the type intersection.
self.assert_graphql_query_result(r"""
fragment userFrag on User {
active
profile {
value
}
}
query {
NamedObject(filter: {name: {eq: "Alice"}}) {
... userFrag
name
}
}
""", {
'NamedObject': [{
'name': 'Alice',
'active': True,
'profile': {
'value': 'special',
}
}],
})
def test_graphql_functional_fragment_type_15(self):
# ISSUE #1800
#
# After using a typed inline fragment the nested fields or
# fields following after the fragment are erroneously using
# the type intersection.
self.assert_graphql_query_result(r"""
query {
NamedObject(filter: {name: {eq: "Alice"}}) {
... on User {
active
profile(filter: {name: {eq: "Alice profile"}}) {
value
}
}
name
}
}
""", {
'NamedObject': [{
'name': 'Alice',
'active': True,
'profile': {
'value': 'special',
}
}],
})
self.assert_graphql_query_result(r"""
query {
NamedObject(filter: {name: {eq: "Alice"}}) {
... on User {
active
profile(filter: {name: {eq: "no such profile"}}) {
value
}
}
name
}
}
""", {
'NamedObject': [{
'name': 'Alice',
'active': True,
'profile': None,
}],
})
def test_graphql_functional_fragment_type_16(self):
# ISSUE #1800
#
# After using a typed inline fragment the nested fields or
# fields following after the fragment are erroneously using
# the type intersection.
self.assert_graphql_query_result(r"""
fragment userFrag on User {
active
profile(filter: {name: {eq: "Alice profile"}}) {
value
}
}
query {
NamedObject(filter: {name: {eq: "Alice"}}) {
... userFrag
name
}
}
""", {
'NamedObject': [{
'name': 'Alice',
'active': True,
'profile': {
'value': 'special',
}
}],
})
self.assert_graphql_query_result(r"""
fragment userFrag on User {
active
profile(filter: {name: {eq: "no such profile"}}) {
value
}
}
query {
NamedObject(filter: {name: {eq: "Alice"}}) {
... userFrag
name
}
}
""", {
'NamedObject': [{
'name': 'Alice',
'active': True,
'profile': None,
}],
})
def test_graphql_functional_fragment_type_17(self):
# ISSUE #1800
#
# After using a typed inline fragment the nested fields or
# fields following after the fragment are erroneously using
# the type intersection.
self.assert_graphql_query_result(r"""
query {
NamedObject(filter: {name: {eq: "Alice"}}) {
... on User {
... {
active
profile {
value
}
}
}
name
}
}
""", {
'NamedObject': [{
'name': 'Alice',
'active': True,
'profile': {
'value': 'special',
}
}],
})
def test_graphql_functional_directives_01(self):
self.assert_graphql_query_result(r"""
query {
User(order: {name: {dir: ASC}}) {
name @include(if: true),
groups @include(if: false) {
id
name
}
}
}
""", {
"User": [
{"name": "Alice"},
{"name": "Bob"},
{"name": "Jane"},
{"name": "John"},
]
})
def test_graphql_functional_directives_02(self):
self.assert_graphql_query_result(r"""
query {
User(order: {name: {dir: ASC}}) {
name @skip(if: true),
groups @skip(if: false) {
id @skip(if: true)
name @skip(if: false)
}
}
}
""", {
"User": [
{"groups": []},
{"groups": []},
{"groups": [{"name": "upgraded"}]},
{"groups": [{"name": "basic"}]},
]
})
def test_graphql_functional_directives_03(self):
self.assert_graphql_query_result(r"""
query {
User(order: {name: {dir: ASC}}) {
name @skip(if: true), @include(if: true),
groups @skip(if: false), @include(if: true) {
id @skip(if: true), @include(if: false)
name @skip(if: false), @include(if: true)
}
}
}
""", {
"User": [
{"groups": []},
{"groups": []},
{"groups": [{"name": "upgraded"}]},
{"groups": [{"name": "basic"}]},
]
})
def test_graphql_functional_directives_04(self):
self.assert_graphql_query_result(r"""
fragment userFrag1 on User {
name
... {
groups @include(if: false) {
... groupFrag
}
}
}
fragment groupFrag on UserGroup {
id
name
}
query {
User(order: {name: {dir: ASC}}) {
... userFrag1
}
}
""", {
"User": [
{"name": "Alice"},
{"name": "Bob"},
{"name": "Jane"},
{"name": "John"},
]
})
def test_graphql_functional_directives_05(self):
self.assert_graphql_query_result(r"""
fragment userFrag1 on User {
name
... @skip(if: true) {
groups {
... groupFrag
}
}
}
fragment groupFrag on UserGroup {
id
name
}
query {
User(order: {name: {dir: ASC}}) {
... userFrag1
}
}
""", {
"User": [
{"name": "Alice"},
{"name": "Bob"},
{"name": "Jane"},
{"name": "John"},
]
})
def test_graphql_functional_directives_06(self):
self.assert_graphql_query_result(r"""
fragment userFrag1 on User {
name
... {
groups {
... groupFrag @skip(if: true)
name
}
}
}
fragment groupFrag on UserGroup {
id
}
query {
User(order: {name: {dir: ASC}}) {
... userFrag1
}
}
""", {
"User": [
{"name": "Alice", "groups": []},
{"name": "Bob", "groups": []},
{"name": "Jane", "groups": [{"name": "upgraded"}]},
{"name": "John", "groups": [{"name": "basic"}]},
]
})
def test_graphql_functional_directives_07(self):
with self.assertRaisesRegex(
edgedb.QueryError,
'Expected type Boolean!, found "true".',
_line=4, _col=43):
self.graphql_query(r"""
query {
User {
name @include(if: "true"),
id
}
}
""")
def test_graphql_functional_typename_01(self):
self.assert_graphql_query_result(r"""
query {
User {
name
__typename
groups {
id
name
__typename
}
}
}
""", {
'User': [{
'name': 'Alice',
'__typename': 'User_Type',
'groups': []
}, {
'name': 'Bob',
'__typename': 'Person_Type',
'groups': []
}, {
'name': 'Jane',
'__typename': 'User_Type',
'groups': [{
'id': uuid.UUID,
'name': 'upgraded',
'__typename': 'UserGroup_Type',
}]
}, {
'name': 'John',
'__typename': 'User_Type',
'groups': [{
'id': uuid.UUID,
'name': 'basic',
'__typename': 'UserGroup_Type',
}]
}],
}, sort=lambda x: x['name'])
def test_graphql_functional_typename_02(self):
self.assert_graphql_query_result(r"""
query {
__typename
__schema {
__typename
}
}
""", {
'__typename': 'Query',
'__schema': {
'__typename': '__Schema',
},
})
def test_graphql_functional_typename_03(self):
self.assert_graphql_query_result(r"""
query {
foo: __typename
User(order: {name: {dir: ASC}}) {
name
bar: __typename
}
}
""", {
"foo": "Query",
"User": [
{"bar": "User_Type", "name": "Alice"},
{"bar": "Person_Type", "name": "Bob"},
{"bar": "User_Type", "name": "Jane"},
{"bar": "User_Type", "name": "John"},
]
})
def test_graphql_functional_scalars_01(self):
self.assert_graphql_query_result(r"""
query {
ScalarTest {
p_bool
p_str
p_datetime
p_local_datetime
p_local_date
p_local_time
p_duration
p_int16
p_int32
p_int64
p_bigint
p_float32
p_float64
p_decimal
}
}
""", {
"ScalarTest": [{
'p_bool': True,
'p_str': 'Hello',
'p_datetime': '2018-05-07T20:01:22.306916+00:00',
'p_local_datetime': '2018-05-07T20:01:22.306916',
'p_local_date': '2018-05-07',
'p_local_time': '20:01:22.306916',
'p_duration': 'PT20H',
'p_int16': 12345,
'p_int32': 1234567890,
'p_int64': 1234567890123,
'p_bigint': 123456789123456789123456789,
'p_float32': 2.5,
'p_float64': 2.5,
'p_decimal':
123456789123456789123456789.123456789123456789123456789,
}]
})
def test_graphql_functional_scalars_02(self):
# JSON is special since it has to be serialized into its
# string representation
self.assert_graphql_query_result(r"""
query {
ScalarTest {
p_json
}
}
""", {
"ScalarTest": [{
'p_json': '{"foo": [1, null, "bar"]}',
}]
})
def test_graphql_functional_scalars_03(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"Cannot query field 'p_bytes' on type 'ScalarTest'",
_line=4, _col=25):
self.graphql_query(r"""
query {
ScalarTest {
p_bytes
}
}
""")
def test_graphql_functional_scalars_04(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"Cannot query field 'p_array_json' on type 'ScalarTest'",
_line=4, _col=25):
self.graphql_query(r"""
query {
ScalarTest {
p_array_json
}
}
""")
def test_graphql_functional_scalars_05(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"Cannot query field 'p_array_bytes' on type 'ScalarTest'",
_line=4, _col=25):
self.graphql_query(r"""
query {
ScalarTest {
p_array_bytes
}
}
""")
def test_graphql_functional_scalars_06(self):
# JSON is special since it has to be serialized into its
# string representation
self.assert_graphql_query_result(r"""
query {
ScalarTest {
p_posint
}
}
""", {
"ScalarTest": [{
'p_posint': 42,
}]
})
def test_graphql_functional_scalars_07(self):
self.assert_graphql_query_result(r"""
query {
ScalarTest {
p_array_str
}
}
""", {
"ScalarTest": [{
'p_array_str': ['hello', 'world'],
}]
})
def test_graphql_functional_duplicates_01(self):
self.assert_graphql_query_result(r"""
query {
User(order: {name: {dir: ASC}}) {
name
name
name
age
}
}
""", {
'User': [
{"age": 27, "name": "Alice"},
{"age": 21, "name": "Bob"},
{"age": 25, "name": "Jane"},
{"age": 25, "name": "John"},
]
})
def test_graphql_functional_duplicates_02(self):
self.assert_graphql_query_result(r"""
query {
User(order: {name: {dir: ASC}}) {
name @include(if: true)
age
name @include(if: true)
}
}
""", {
'User': [
{"age": 27, "name": "Alice"},
{"age": 21, "name": "Bob"},
{"age": 25, "name": "Jane"},
{"age": 25, "name": "John"},
]
})
def test_graphql_functional_duplicates_03(self):
self.assert_graphql_query_result(r"""
query {
User(order: {name: {dir: ASC}}) {
... on User @skip(if: false) {
name @include(if: true)
}
age
name @include(if: true)
}
}
""", {
'User': [
{"age": 27, "name": "Alice"},
{"age": 21, "name": "Bob"},
{"age": 25, "name": "Jane"},
{"age": 25, "name": "John"},
]
})
def test_graphql_functional_duplicates_04(self):
self.assert_graphql_query_result(r"""
fragment f1 on User {
name @include(if: true)
}
fragment f2 on User {
age
name @include(if: true)
... f1
}
query {
User(order: {name: {dir: ASC}}) {
... f2
age
name @include(if: true)
}
}
""", {
'User': [
{"age": 27, "name": "Alice"},
{"age": 21, "name": "Bob"},
{"age": 25, "name": "Jane"},
{"age": 25, "name": "John"},
]
})
def test_graphql_functional_duplicates_05(self):
self.assert_graphql_query_result(r"""
query {
User(order: {name: {dir: ASC}}) {
age
name
name @include(if: true)
name @skip(if: false)
}
}
""", {
'User': [
{"age": 27, "name": "Alice"},
{"age": 21, "name": "Bob"},
{"age": 25, "name": "Jane"},
{"age": 25, "name": "John"},
]
})
def test_graphql_functional_duplicates_06(self):
self.assert_graphql_query_result(r"""
query {
User(order: {name: {dir: ASC}}) {
... @skip(if: false) {
name @include(if: true)
}
age
name
}
}
""", {
'User': [
{"age": 27, "name": "Alice"},
{"age": 21, "name": "Bob"},
{"age": 25, "name": "Jane"},
{"age": 25, "name": "John"},
]
})
def test_graphql_functional_duplicates_07(self):
self.assert_graphql_query_result(r"""
fragment f1 on User {
name @skip(if: false)
}
fragment f2 on User {
age
name @include(if: true)
... f1
}
query {
User(order: {name: {dir: ASC}}) {
... f2
age
name @include(if: true)
}
}
""", {
'User': [
{"age": 27, "name": "Alice"},
{"age": 21, "name": "Bob"},
{"age": 25, "name": "Jane"},
{"age": 25, "name": "John"},
]
})
def test_graphql_functional_variables_01(self):
query = r"""
query($name: String) {
User(filter: {name: {eq: $name}}) {
name,
groups {
name
}
}
}
"""
expected_result = {
'User': [{
'name': 'John',
'groups': [{
'name': 'basic',
}]
}],
}
self.assert_graphql_query_result(
query,
expected_result,
variables={'name': 'John'},
use_http_post=True
)
self.assert_graphql_query_result(
query,
expected_result,
variables={'name': 'John'},
use_http_post=False
)
def test_graphql_functional_variables_02(self):
self.assert_graphql_query_result(
r"""
query($name: String, $age: Int64) {
User(filter: {or: [{name: {eq: $name}},
{age: {gt: $age}}]},
order: {name: {dir: ASC}})
{
name
age
}
}
""",
{
"User": [
{
"name": "Alice",
"age": 27,
},
{
"name": "Jane",
"age": 25,
},
{
"name": "John",
"age": 25,
},
]
},
variables={
"age": 24,
"name": "Alice"
}
)
def test_graphql_functional_variables_03(self):
self.assert_graphql_query_result(r"""
query($val: Int = 3) {
User(filter: {score: {eq: $val}}) {
id,
}
}
""", {
'User': [],
})
def test_graphql_functional_variables_04(self):
self.assert_graphql_query_result(r"""
query($val: Boolean = true) {
User(order: {name: {dir: ASC}}) {
name @include(if: $val),
groups @skip(if: $val) {
name
}
}
}
""", {
"User": [
{"name": "Alice"},
{"name": "Bob"},
{"name": "Jane"},
{"name": "John"},
]
})
def test_graphql_functional_variables_05(self):
self.assert_graphql_query_result(r"""
query($val: Boolean! = true) {
User(order: {name: {dir: ASC}}) {
name @include(if: $val),
id
}
}
""", {
"User": [
{"name": "Alice"},
{"name": "Bob"},
{"name": "Jane"},
{"name": "John"},
]
})
def test_graphql_functional_variables_06(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"no value for the 'val' variable",
_line=4, _col=31):
self.graphql_query(r"""
query($val: Boolean!) {
User {
name @include(if: $val),
id
}
}
""")
def test_graphql_functional_variables_07(self):
self.assert_graphql_query_result(r"""
query($val: String = "John") {
User(filter: {name: {eq: $val}}) {
age,
}
}
""", {
"User": [
{"age": 25},
]
})
def test_graphql_functional_variables_08(self):
self.assert_graphql_query_result(r"""
query($val: Int64 = 20) {
User(filter: {age: {eq: $val}}) {
name,
}
}
""", {
"User": []
})
def test_graphql_functional_variables_09(self):
self.assert_graphql_query_result(r"""
query($val: Float = 3.5) {
User(filter: {score: {eq: $val}}) {
name,
}
}
""", {
"User": []
})
def test_graphql_functional_variables_10(self):
self.assert_graphql_query_result(r"""
query($val: Int = 3) {
User(filter: {score: {eq: $val}}) {
id,
}
}
""", {
"User": []
})
def test_graphql_functional_variables_11(self):
self.assert_graphql_query_result(r"""
query($val: Float = 3) {
User(filter: {score: {eq: $val}}) {
id,
}
}
""", {
"User": []
})
def test_graphql_functional_variables_12(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'Boolean cannot represent a non boolean value: 1',
_line=2, _col=39):
self.graphql_query(r"""
query($val: Boolean = 1) {
User {
name @include(if: $val),
id
}
}
""")
def test_graphql_functional_variables_13(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'Boolean cannot represent a non boolean value: "1"',
_line=2, _col=39):
self.graphql_query(r"""
query($val: Boolean = "1") {
User {
name @include(if: $val),
id
}
}
""")
def test_graphql_functional_variables_14(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'Boolean cannot represent a non boolean value: 1\.3',
_line=2, _col=39):
self.graphql_query(r"""
query($val: Boolean = 1.3) {
User {
name @include(if: $val),
id
}
}
""")
def test_graphql_functional_variables_15(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'String cannot represent a non string value: 1',
_line=2, _col=38):
self.graphql_query(r"""
query($val: String = 1) {
User(filter: {name: {eq: $val}}) {
id
}
}
""")
def test_graphql_functional_variables_16(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'String cannot represent a non string value: 1\.1',
_line=2, _col=38):
self.graphql_query(r"""
query($val: String = 1.1) {
User(filter: {name: {eq: $val}}) {
id
}
}
""")
def test_graphql_functional_variables_17(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'String cannot represent a non string value: true',
_line=2, _col=38):
self.graphql_query(r"""
query($val: String = true) {
User(filter: {name: {eq: $val}}) {
id
}
}
""")
def test_graphql_functional_variables_18(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'Int cannot represent non-integer value: 1\.1',
_line=2, _col=35):
self.graphql_query(r"""
query($val: Int = 1.1) {
User(filter: {age: {eq: $val}}) {
id
}
}
""")
def test_graphql_functional_variables_19(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'Int cannot represent non-integer value: "1"',
_line=2, _col=35):
self.graphql_query(r"""
query($val: Int = "1") {
User(filter: {age: {eq: $val}}) {
id
}
}
""")
def test_graphql_functional_variables_20(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'Int cannot represent non-integer value: true',
_line=2, _col=35):
self.graphql_query(r"""
query($val: Int = true) {
User(filter: {age: {eq: $val}}) {
id
}
}
""")
def test_graphql_functional_variables_21(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'Float cannot represent non numeric value: "1"',
_line=2, _col=37):
self.graphql_query(r"""
query($val: Float = "1") {
User(filter: {score: {eq: $val}}) {
id
}
}
""")
def test_graphql_functional_variables_22(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'Float cannot represent non numeric value: true',
_line=2, _col=37):
self.graphql_query(r"""
query($val: Float = true) {
User(filter: {score: {eq: $val}}) {
id
}
}
""")
def test_graphql_functional_variables_23(self):
self.assert_graphql_query_result(r"""
query($val: ID = "00000000-3576-11e9-8723-cf18c8790091") {
User(filter: {id: {eq: $val}}) {
name
}
}
""", {
"User": []
})
def test_graphql_functional_variables_25(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'ID cannot represent a non-string and non-integer.+: 1\.1',
_line=2, _col=34):
self.graphql_query(r"""
query($val: ID = 1.1) {
User(filter: {id: {eq: $val}}) {
name
}
}
""")
def test_graphql_functional_variables_26(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'ID cannot represent a non-string and non-integer.+: true',
_line=2, _col=34):
self.graphql_query(r"""
query($val: ID = true) {
User(filter: {id: {eq: $val}}) {
name
}
}
""")
def test_graphql_functional_variables_27(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"Variable '\$val' of type '\[String\]' used in position "
r"expecting type 'String'\."):
self.graphql_query(r"""
query($val: [String] = "Foo") {
User(filter: {name: {eq: $val}}) {
id
}
}
""")
def test_graphql_functional_variables_28(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"Variable '\$val' of type '\[String\]' used in position "
r"expecting type 'String'\."):
self.graphql_query(r"""
query($val: [String]) {
User(filter: {name: {eq: $val}}) {
id
}
}
""")
def test_graphql_functional_variables_29(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"Variable '\$val' of type '\[String\]!' used in position "
r"expecting type 'String'."):
self.graphql_query(r"""
query($val: [String]!) {
User(filter: {name: {eq: $val}}) {
id
}
}
""")
def test_graphql_functional_variables_30(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"no value for the 'val' variable"):
self.graphql_query(r"""
query($val: String!) {
User(filter: {name: {eq: $val}}) {
id
}
}
""")
def test_graphql_functional_variables_31(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"String cannot represent a non string value: 123",
_line=2, _col=48):
self.graphql_query(r"""
query($val: [String] = ["Foo", 123]) {
User(filter: {name: {eq: $val}}) {
id
}
}
""")
def test_graphql_functional_variables_32(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"Variable '\$val' of type '\[String\]' used in position "
r"expecting type 'String'\."):
self.graphql_query(r"""
query($val: [String]) {
User(filter: {name: {eq: $val}}) {
id
}
}
""")
def test_graphql_functional_variables_33(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'expected json string'):
self.graphql_query(
r"""
query($name: String) {
User(filter: {name: {eq: $name}}) {
name,
groups {
name
}
}
}
""",
variables={'name': 11})
def test_graphql_functional_variables_34(self):
# Test multiple requests to make sure that caching works correctly
for _ in range(2):
for _ in range(2):
self.assert_graphql_query_result(
r"""
query($val: Boolean!, $min_age: Int64!) {
User(filter: {age: {gt: $min_age}}) {
name @include(if: $val),
age
}
}
""",
{'User': [{'age': 27, 'name': 'Alice'}]},
variables={'val': True, 'min_age': 26}
)
self.assert_graphql_query_result(
r"""
query($val: Boolean!, $min_age: Int64!) {
User(filter: {age: {gt: $min_age}}) {
name @include(if: $val),
age
}
}
""",
{'User': [{'age': 27}]},
variables={'val': False, 'min_age': 26}
)
def test_graphql_functional_variables_35(self):
self.assert_graphql_query_result(
r"""
query($limit: Int!) {
User(
order: {name: {dir: ASC}},
first: $limit
) {
name
}
}
""",
{
'User': [{
'name': 'Alice',
}]
},
variables={'limit': 1},
)
def test_graphql_functional_variables_36(self):
self.assert_graphql_query_result(
r"""
query($idx: String!) {
User(
order: {name: {dir: ASC}},
# this is actually equivalent to OFFSET 2,
# since 'after' doesn't include the value
# referenced by the index
after: $idx
) {
name
}
}
""",
{
'User': [{
'name': 'Jane',
}, {
'name': 'John',
}]
},
variables={'idx': '1'},
)
def test_graphql_functional_variables_37(self):
self.assert_graphql_query_result(
r"""
query($idx: String!, $num: Int!) {
User(
order: {name: {dir: ASC}},
# this is actually equivalent to OFFSET 2,
# since 'after' doesn't include the value
# referenced by the index
after: $idx,
first: $num
) {
name
}
}
""",
{
'User': [{
'name': 'Jane',
}]
},
variables={'idx': '1', 'num': 1},
)
def test_graphql_functional_variables_38(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"Variable '\$limit' of type 'String!' used in position "
r"expecting type 'Int'."):
self.graphql_query(
r"""
query($limit: String!) {
User(
order: {name: {dir: ASC}},
first: $limit
) {
name
}
}
""",
variables={'limit': '1'},
)
# FIXME: the error here comes all the way from Postgres and as
# such refers to Postgres types, ideally we'd like to have an
# error message expressed in terms of GraphQL types.
def test_graphql_functional_variables_39(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r'expected json number.+got json string'):
self.graphql_query(
r"""
query($limit: Int!) {
User(
order: {name: {dir: ASC}},
first: $limit
) {
name
}
}
""",
variables={'limit': '1'},
)
def test_graphql_functional_variables_40(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"Only scalar defaults are allowed\. "
r"Variable 'val' has non-scalar default value\."):
self.graphql_query(r"""
query($val: FilterFloat = {eq: 3.0}) {
User(filter: {score: $val}) {
id,
}
}
""")
def test_graphql_functional_variables_41(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"Variables starting with '_edb_arg__' are prohibited"):
self.graphql_query(r"""
query($_edb_arg__1: Int!) {
User(limit: $_edb_arg__1) {
id,
}
}
""", variables={'_edb_arg__1': 1})
def test_graphql_functional_variables_42(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"Variables starting with '_edb_arg__' are prohibited"):
self.graphql_query(r"""
query($_edb_arg__1: Int = 1) {
User(limit: $_edb_arg__1) {
id,
}
}
""")
def test_graphql_functional_variables_43(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"Only scalar input variables are allowed\. "
r"Variable 'f' has non-scalar value\."):
self.graphql_query(r"""
query user($f: FilterUser!) {
User(filter: $f) {
name
}
}
""", variables={"f": {"name": {"eq": "Alice"}}})
def test_graphql_functional_variables_44(self):
self.assert_graphql_query_result(
r"""
query foo($color: other__ColorEnum!) {
other__Foo(
filter: {color: {eq: $color}},
) {
select
color
}
}
""", {
"other__Foo": [{
"select": "a",
"color": "RED",
}]
},
variables={"color": "RED"},
)
def test_graphql_functional_variables_45(self):
self.assert_graphql_query_result(
r"""
query foo($color: other__ColorEnum! = GREEN) {
other__Foo(
filter: {color: {eq: $color}},
) {
select
color
}
}
""", {
"other__Foo": [{
"select": "b",
"color": "GREEN",
}]
},
)
def test_graphql_functional_inheritance_01(self):
# ISSUE: #709
#
# Testing type and sub-type.
self.assert_graphql_query_result(r"""
query {
Bar {
__typename
q
}
}
""", {
'Bar': [{
'__typename': 'Bar_Type',
'q': 'bar',
}, {
'__typename': 'Bar2_Type',
'q': 'bar2',
}],
}, sort=lambda x: x['q'])
def test_graphql_functional_inheritance_02(self):
# ISSUE: #709
#
# Testing type and sub-type, with a covariant lint target.
self.assert_graphql_query_result(r"""
query {
Rab {
__typename
blah {
__typename
q
}
}
}
""", {
'Rab': [{
'__typename': 'Rab_Type',
'blah': {
'__typename': 'Bar_Type',
'q': 'bar',
}
}, {
'__typename': 'Rab2_Type',
'blah': {
'__typename': 'Bar2_Type',
'q': 'bar2',
}
}],
}, sort=lambda x: x['blah']['q'])
def test_graphql_functional_inheritance_03(self):
# ISSUE: #709
#
# Testing type and sub-type, with a covariant lint target.
#
# Rab2 must keep the target type of the link same as the base
# type, due to limitations of GraphQL inheritance. But as long
# as the actual target type is known, it can be explicitly
# referenced.
self.assert_graphql_query_result(r"""
query {
Rab2 {
blah {
__typename
... on Bar2 {
q
w
}
}
}
}
""", {
'Rab2': [{
'blah': {
'__typename': 'Bar2_Type',
'q': 'bar2',
'w': 'special'
}
}],
})
def test_graphql_functional_order_01(self):
# Test order by nested objects
self.assert_graphql_query_result(r"""
query {
Rab(order: {blah: {q: {dir: DESC}}}) {
blah {
q
}
}
}
""", {
"Rab": [
{
"blah": {
"q": "bar2"
}
},
{
"blah": {
"q": "bar"
},
}
]
})
def test_graphql_functional_order_02(self):
# Test order by nested objects
self.assert_graphql_query_result(r"""
query {
SettingAliasAugmented(
order: {
of_group: {name_upper: {dir: ASC}},
name: {dir: DESC}
}
) {
name
of_group {
name_upper
}
}
}
""", {
"SettingAliasAugmented": [
{
"name": "template",
"of_group": {
"name_upper": "UNUSED"
},
},
{
"name": "template",
"of_group": {
"name_upper": "UPGRADED"
},
},
{
"name": "perks",
"of_group": {
"name_upper": "UPGRADED"
},
},
]
})
def test_graphql_functional_order_03(self):
# Test order by nested objects
self.assert_graphql_query_result(r"""
query {
LinkedList(order: {
next: {next: {name: {dir: DESC, nulls: SMALLEST}}},
name: {dir: ASC}
}) {
name
}
}
""", {
"LinkedList": [
{
"name": "2nd"
},
{
"name": "1st"
},
{
"name": "3rd"
},
{
"name": "4th"
}
]
})
def test_graphql_functional_order_04(self):
# Test order by nested objects
self.assert_graphql_query_result(r"""
query {
User(order: {
profile: {
value: {dir: ASC},
name: {dir: DESC}
}
}) {
name
profile {
name
value
}
}
}
""", {
"User": [
{
"name": "John",
"profile": None,
},
{
"name": "Jane",
"profile": None,
},
{
"name": "Bob",
"profile": {
"name": "Bob profile",
"value": "special",
},
},
{
"name": "Alice",
"profile": {
"name": "Alice profile",
"value": "special",
},
}
]
})
def test_graphql_functional_exists_01(self):
self.assert_graphql_query_result(r"""
query {
User(
filter: {profile: {exists: true}},
order: {name: {dir: ASC}}
) {
name
profile {
name
}
}
}
""", {
"User": [
{
"name": "Alice",
"profile": {
"name": "Alice profile",
},
},
{
"name": "Bob",
"profile": {
"name": "Bob profile",
},
},
]
})
def test_graphql_functional_exists_02(self):
self.assert_graphql_query_result(r"""
query {
User(
filter: {profile: {exists: false}},
order: {name: {dir: ASC}}
) {
name
profile {
name
}
}
}
""", {
"User": [
{
"name": "Jane",
"profile": None,
},
{
"name": "John",
"profile": None,
},
]
})
def test_graphql_functional_exists_03(self):
self.assert_graphql_query_result(r"""
query {
User(
filter: {groups: {settings: {exists: false}}},
order: {name: {dir: ASC}}
) {
name
groups {
name
settings {
name
}
}
}
}
""", {
"User": [
{
"name": "Alice",
"groups": [],
},
{
"name": "Bob",
"groups": [],
},
{
"name": "John",
"groups": [
{
"name": "basic",
"settings": [],
}
],
},
]
})
def test_graphql_functional_exists_04(self):
self.assert_graphql_query_result(r"""
query {
User(
filter: {groups: {settings: {exists: true}}}
) {
name
groups {
name
settings(order: {name: {dir: ASC}}) {
name
}
}
}
}
""", {
"User": [
{
"name": "Jane",
"groups": [
{
"name": "upgraded",
"settings": [
{
"name": "perks",
},
{
"name": "template",
},
]
}
]
}
]
})
def test_graphql_functional_exists_05(self):
self.assert_graphql_query_result(r"""
query {
User(
filter: {groups: {settings: {id: {exists: false}}}},
order: {name: {dir: ASC}}
) {
name
groups {
name
settings {
name
}
}
}
}
""", {
"User": [
{
"name": "Alice",
"groups": [],
},
{
"name": "Bob",
"groups": [],
},
{
"name": "John",
"groups": [
{
"name": "basic",
"settings": [],
}
],
},
]
})
def test_graphql_functional_exists_06(self):
self.assert_graphql_query_result(r"""
query {
User(
filter: {groups: {settings: {id: {exists: true}}}}
) {
name
groups {
name
settings(order: {name: {dir: ASC}}) {
name
}
}
}
}
""", {
"User": [
{
"name": "Jane",
"groups": [
{
"name": "upgraded",
"settings": [
{
"name": "perks",
},
{
"name": "template",
},
]
}
]
}
]
})
class TestGraphQLInit(tb.GraphQLTestCase):
"""Test GraphQL initialization on an empty database."""
# GraphQL queries cannot run in a transaction
TRANSACTION_ISOLATION = False
def test_graphql_init_type_01(self):
# An empty database should still have an "Object" interface.
self.assert_graphql_query_result(r"""
query {
__type(name: "Object") {
__typename
name
kind
}
}
""", {
"__type": {
"kind": "INTERFACE",
"name": "Object",
"__typename": "__Type"
}
})
|
py | 7dfbef5fdf6f84142ccfad6953a54fcfb5206492 | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
ZMQ example using python3's asyncio
CounosX should be started with the command line arguments:
counosxd -testnet -daemon \
-zmqpubrawtx=tcp://127.0.0.1:28332 \
-zmqpubrawblock=tcp://127.0.0.1:28332 \
-zmqpubhashtx=tcp://127.0.0.1:28332 \
-zmqpubhashblock=tcp://127.0.0.1:28332
We use the asyncio library here. `self.handle()` installs itself as a
future at the end of the function. Since it never returns with the event
loop having an empty stack of futures, this creates an infinite loop. An
alternative is to wrap the contents of `handle` inside `while True`.
The `@asyncio.coroutine` decorator and the `yield from` syntax found here
was introduced in python 3.4 and has been deprecated in favor of the `async`
and `await` keywords respectively.
A blocking example using python 2.7 can be obtained from the git history:
https://github.com/bitcoin/bitcoin/blob/37a7fe9e440b83e2364d5498931253937abe9294/contrib/zmq/zmq_sub.py
"""
import binascii
import asyncio
import zmq
import zmq.asyncio
import signal
import struct
import sys
if (sys.version_info.major, sys.version_info.minor) < (3, 4):
print("This example only works with Python 3.4 and greater")
sys.exit(1)
port = 28332
class ZMQHandler():
def __init__(self):
self.loop = asyncio.get_event_loop()
self.zmqContext = zmq.asyncio.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashtx")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtx")
self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % port)
@asyncio.coroutine
def handle(self) :
msg = yield from self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
sequence = "Unknown"
if len(msg[-1]) == 4:
msgSequence = struct.unpack('<I', msg[-1])[-1]
sequence = str(msgSequence)
if topic == b"hashblock":
print('- HASH BLOCK ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"hashtx":
print('- HASH TX ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"rawblock":
print('- RAW BLOCK HEADER ('+sequence+') -')
print(binascii.hexlify(body[:80]))
elif topic == b"rawtx":
print('- RAW TX ('+sequence+') -')
print(binascii.hexlify(body))
# schedule ourselves to receive the next message
asyncio.ensure_future(self.handle())
def start(self):
self.loop.add_signal_handler(signal.SIGINT, self.stop)
self.loop.create_task(self.handle())
self.loop.run_forever()
def stop(self):
self.loop.stop()
self.zmqContext.destroy()
daemon = ZMQHandler()
daemon.start()
|
py | 7dfbf039e49b61a019200e6f5294d24a7feeb09d | #! /usr/bin/env python
# coding=utf-8
#================================================================
# Copyright (C) 2019 * Ltd. All rights reserved.
#
# Editor : VIM
# File name : dataset.py
# Author : YunYang1994
# Created date: 2019-03-15 18:05:03
# Description :
#
#================================================================
import os
import cv2
import random
import numpy as np
import tensorflow as tf
import core.utils as utils
from core.config import cfg
class Dataset(object):
"""implement Dataset here"""
def __init__(self, dataset_type):
self.annot_path = cfg.TRAIN.ANNOT_PATH if dataset_type == 'train' else cfg.TEST.ANNOT_PATH
self.input_sizes = cfg.TRAIN.INPUT_SIZE if dataset_type == 'train' else cfg.TEST.INPUT_SIZE
self.batch_size = cfg.TRAIN.BATCH_SIZE if dataset_type == 'train' else cfg.TEST.BATCH_SIZE
self.data_aug = cfg.TRAIN.DATA_AUG if dataset_type == 'train' else cfg.TEST.DATA_AUG
self.train_input_sizes = cfg.TRAIN.INPUT_SIZE
self.strides = np.array(cfg.YOLO.STRIDES)
self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
self.num_classes = len(self.classes)
self.anchors = np.array(utils.get_anchors(cfg.YOLO.ANCHORS))
self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
self.max_bbox_per_scale = 150
self.annotations = self.load_annotations(dataset_type)
self.num_samples = len(self.annotations)
self.num_batchs = int(np.ceil(self.num_samples / self.batch_size))
self.batch_count = 0
def load_annotations(self, dataset_type):
annotations_txt=[]
with open(self.annot_path, 'r') as f:
txt = f.readlines()
#annotations = [line.strip() for line in txt if len(line.strip().split()[1:]) != 0]
for t in txt[:10]:
annotations_txt.append(get_bb_list(t,training=True))
annotations = [line.strip() for line in annotations_txt if len(line.strip().split()[1:]) != 0]
np.random.shuffle(annotations)
return annotations
def __iter__(self):
return self
def __next__(self):
with tf.device('/cpu:0'):
# self.train_input_size = random.choice(self.train_input_sizes)
self.train_input_size = cfg.TRAIN.INPUT_SIZE
self.train_output_sizes = self.train_input_size // self.strides
batch_image = np.zeros((self.batch_size, self.train_input_size, self.train_input_size, 3), dtype=np.float32)
batch_label_sbbox = np.zeros((self.batch_size, self.train_output_sizes[0], self.train_output_sizes[0],
self.anchor_per_scale, 5 + self.num_classes), dtype=np.float32)
batch_label_mbbox = np.zeros((self.batch_size, self.train_output_sizes[1], self.train_output_sizes[1],
self.anchor_per_scale, 5 + self.num_classes), dtype=np.float32)
batch_label_lbbox = np.zeros((self.batch_size, self.train_output_sizes[2], self.train_output_sizes[2],
self.anchor_per_scale, 5 + self.num_classes), dtype=np.float32)
batch_sbboxes = np.zeros((self.batch_size, self.max_bbox_per_scale, 4), dtype=np.float32)
batch_mbboxes = np.zeros((self.batch_size, self.max_bbox_per_scale, 4), dtype=np.float32)
batch_lbboxes = np.zeros((self.batch_size, self.max_bbox_per_scale, 4), dtype=np.float32)
num = 0
if self.batch_count < self.num_batchs:
while num < self.batch_size:
index = self.batch_count * self.batch_size + num
if index >= self.num_samples: index -= self.num_samples
annotation = self.annotations[index]
image, bboxes = self.parse_annotation(annotation)
label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes, lbboxes = self.preprocess_true_boxes(bboxes)
batch_image[num, :, :, :] = image
batch_label_sbbox[num, :, :, :, :] = label_sbbox
batch_label_mbbox[num, :, :, :, :] = label_mbbox
batch_label_lbbox[num, :, :, :, :] = label_lbbox
batch_sbboxes[num, :, :] = sbboxes
batch_mbboxes[num, :, :] = mbboxes
batch_lbboxes[num, :, :] = lbboxes
num += 1
self.batch_count += 1
batch_smaller_target = batch_label_sbbox, batch_sbboxes
batch_medium_target = batch_label_mbbox, batch_mbboxes
batch_larger_target = batch_label_lbbox, batch_lbboxes
return batch_image, (batch_smaller_target, batch_medium_target, batch_larger_target)
else:
self.batch_count = 0
np.random.shuffle(self.annotations)
raise StopIteration
def random_horizontal_flip(self, image, bboxes):
if random.random() < 0.5:
_, w, _ = image.shape
image = image[:, ::-1, :]
bboxes[:, [0,2]] = w - bboxes[:, [2,0]]
return image, bboxes
def random_crop(self, image, bboxes):
if random.random() < 0.5:
h, w, _ = image.shape
max_bbox = np.concatenate([np.min(bboxes[:, 0:2], axis=0), np.max(bboxes[:, 2:4], axis=0)], axis=-1)
max_l_trans = max_bbox[0]
max_u_trans = max_bbox[1]
max_r_trans = w - max_bbox[2]
max_d_trans = h - max_bbox[3]
crop_xmin = max(0, int(max_bbox[0] - random.uniform(0, max_l_trans)))
crop_ymin = max(0, int(max_bbox[1] - random.uniform(0, max_u_trans)))
crop_xmax = max(w, int(max_bbox[2] + random.uniform(0, max_r_trans)))
crop_ymax = max(h, int(max_bbox[3] + random.uniform(0, max_d_trans)))
image = image[crop_ymin : crop_ymax, crop_xmin : crop_xmax]
bboxes[:, [0, 2]] = bboxes[:, [0, 2]] - crop_xmin
bboxes[:, [1, 3]] = bboxes[:, [1, 3]] - crop_ymin
return image, bboxes
def random_translate(self, image, bboxes):
if random.random() < 0.5:
h, w, _ = image.shape
max_bbox = np.concatenate([np.min(bboxes[:, 0:2], axis=0), np.max(bboxes[:, 2:4], axis=0)], axis=-1)
max_l_trans = max_bbox[0]
max_u_trans = max_bbox[1]
max_r_trans = w - max_bbox[2]
max_d_trans = h - max_bbox[3]
tx = random.uniform(-(max_l_trans - 1), (max_r_trans - 1))
ty = random.uniform(-(max_u_trans - 1), (max_d_trans - 1))
M = np.array([[1, 0, tx], [0, 1, ty]])
image = cv2.warpAffine(image, M, (w, h))
bboxes[:, [0, 2]] = bboxes[:, [0, 2]] + tx
bboxes[:, [1, 3]] = bboxes[:, [1, 3]] + ty
return image, bboxes
def parse_annotation(self, annotation):
line = annotation.split()
image_path = line[0]
if not os.path.exists(image_path):
raise KeyError("%s does not exist ... " %image_path)
image = cv2.imread(image_path)
bboxes = np.array([list(map(int, box.split(','))) for box in line[1:]])
if self.data_aug:
image, bboxes = self.random_horizontal_flip(np.copy(image), np.copy(bboxes))
image, bboxes = self.random_crop(np.copy(image), np.copy(bboxes))
image, bboxes = self.random_translate(np.copy(image), np.copy(bboxes))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image, bboxes = utils.image_preporcess(np.copy(image), [self.train_input_size, self.train_input_size], np.copy(bboxes))
return image, bboxes
def bbox_iou(self, boxes1, boxes2):
boxes1 = np.array(boxes1)
boxes2 = np.array(boxes2)
boxes1_area = boxes1[..., 2] * boxes1[..., 3]
boxes2_area = boxes2[..., 2] * boxes2[..., 3]
boxes1 = np.concatenate([boxes1[..., :2] - boxes1[..., 2:] * 0.5,
boxes1[..., :2] + boxes1[..., 2:] * 0.5], axis=-1)
boxes2 = np.concatenate([boxes2[..., :2] - boxes2[..., 2:] * 0.5,
boxes2[..., :2] + boxes2[..., 2:] * 0.5], axis=-1)
left_up = np.maximum(boxes1[..., :2], boxes2[..., :2])
right_down = np.minimum(boxes1[..., 2:], boxes2[..., 2:])
inter_section = np.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
union_area = boxes1_area + boxes2_area - inter_area
return inter_area / union_area
def preprocess_true_boxes(self, bboxes):
label = [np.zeros((self.train_output_sizes[i], self.train_output_sizes[i], self.anchor_per_scale,
5 + self.num_classes)) for i in range(3)]
bboxes_xywh = [np.zeros((self.max_bbox_per_scale, 4)) for _ in range(3)]
bbox_count = np.zeros((3,))
for bbox in bboxes:
bbox_coor = bbox[:4]
bbox_class_ind = bbox[4]
onehot = np.zeros(self.num_classes, dtype=np.float)
onehot[bbox_class_ind] = 1.0
uniform_distribution = np.full(self.num_classes, 1.0 / self.num_classes)
deta = 0.01
smooth_onehot = onehot * (1 - deta) + deta * uniform_distribution
bbox_xywh = np.concatenate([(bbox_coor[2:] + bbox_coor[:2]) * 0.5, bbox_coor[2:] - bbox_coor[:2]], axis=-1)
bbox_xywh_scaled = 1.0 * bbox_xywh[np.newaxis, :] / self.strides[:, np.newaxis]
iou = []
exist_positive = False
for i in range(3):
anchors_xywh = np.zeros((self.anchor_per_scale, 4))
anchors_xywh[:, 0:2] = np.floor(bbox_xywh_scaled[i, 0:2]).astype(np.int32) + 0.5
anchors_xywh[:, 2:4] = self.anchors[i]
iou_scale = self.bbox_iou(bbox_xywh_scaled[i][np.newaxis, :], anchors_xywh)
iou.append(iou_scale)
iou_mask = iou_scale > 0.3
if np.any(iou_mask):
xind, yind = np.floor(bbox_xywh_scaled[i, 0:2]).astype(np.int32)
label[i][yind, xind, iou_mask, :] = 0
label[i][yind, xind, iou_mask, 0:4] = bbox_xywh
label[i][yind, xind, iou_mask, 4:5] = 1.0
label[i][yind, xind, iou_mask, 5:] = smooth_onehot
bbox_ind = int(bbox_count[i] % self.max_bbox_per_scale)
bboxes_xywh[i][bbox_ind, :4] = bbox_xywh
bbox_count[i] += 1
exist_positive = True
if not exist_positive:
best_anchor_ind = np.argmax(np.array(iou).reshape(-1), axis=-1)
best_detect = int(best_anchor_ind / self.anchor_per_scale)
best_anchor = int(best_anchor_ind % self.anchor_per_scale)
xind, yind = np.floor(bbox_xywh_scaled[best_detect, 0:2]).astype(np.int32)
label[best_detect][yind, xind, best_anchor, :] = 0
label[best_detect][yind, xind, best_anchor, 0:4] = bbox_xywh
label[best_detect][yind, xind, best_anchor, 4:5] = 1.0
label[best_detect][yind, xind, best_anchor, 5:] = smooth_onehot
bbox_ind = int(bbox_count[best_detect] % self.max_bbox_per_scale)
bboxes_xywh[best_detect][bbox_ind, :4] = bbox_xywh
bbox_count[best_detect] += 1
label_sbbox, label_mbbox, label_lbbox = label
sbboxes, mbboxes, lbboxes = bboxes_xywh
return label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes, lbboxes
def __len__(self):
return self.num_batchs
def get_bb_list(image_path, training=False):
if "combined" not in image_path:
image = np.array(cv2.imread(join(image_path.replace("\n", ""))))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
height, width, channels = image.shape
else:
height = 2560
width = 1440
list_bb = []
with open(image_path.replace(".jpg", ".txt").replace("\n", ""), 'r') as f:
str = image_path.replace(".txt", ".jpg").replace("\n", "")
for L in f.readlines():
str+=" "
row = L.replace("\n", "").split(" ")
label = row[0]
centerx = float(row[1]) * width
centery = float(row[2]) * height
width_bb = float(row[3]) * width
height_bb = float(row[4]) * height
x1 = int(centerx - (width_bb / 2))
x2 = int(centerx + (width_bb / 2))
y1 = int(centery - (height_bb / 2))
y2 = int(centery + (height_bb / 2))
str += ",".join([x1.__str__(), y1.__str__(), x2.__str__(), y2.__str__(), label.__str__()])
return str
|
py | 7dfbf16a15ac248394ff768ea4ab4dcd1625c0f8 | #AUTOGENERATED! DO NOT EDIT! File to edit: dev/08_vision_core.ipynb (unless otherwise specified).
__all__ = ['Image', 'ToTensor', 'imagenet_stats', 'cifar_stats', 'mnist_stats', 'n_px', 'shape', 'aspect', 'load_image',
'PILBase', 'PILImage', 'PILImageBW', 'PILMask', 'OpenMask', 'TensorPoint', 'get_annotations', 'TensorBBox',
'LabeledBBox', 'image2tensor', 'encodes', 'encodes', 'PointScaler', 'BBoxLabels', 'BBoxLabeler', 'decodes',
'encodes', 'decodes']
#Cell
from ..test import *
from ..torch_basics import *
from ..data.all import *
from PIL import Image
#Cell
imagenet_stats = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
cifar_stats = ([0.491, 0.482, 0.447], [0.247, 0.243, 0.261])
mnist_stats = ([0.131], [0.308])
#Cell
if not hasattr(Image,'_patched'):
_old_sz = Image.Image.size.fget
@patch_property
def size(x:Image.Image): return Tuple(_old_sz(x))
Image._patched = True
#Cell
@patch_property
def n_px(x: Image.Image): return x.size[0] * x.size[1]
#Cell
@patch_property
def shape(x: Image.Image): return x.size[1],x.size[0]
#Cell
@patch_property
def aspect(x: Image.Image): return x.size[0]/x.size[1]
#Cell
@patch
def reshape(x: Image.Image, h, w, resample=0):
"`resize` `x` to `(w,h)`"
return x.resize((w,h), resample=resample)
#Cell
@patch
def resize_max(x: Image.Image, resample=0, max_px=None, max_h=None, max_w=None):
"`resize` `x` to `max_px`, or `max_h`, or `max_w`"
h,w = x.shape
if max_px and x.n_px>max_px: h,w = Tuple(h,w).mul(math.sqrt(max_px/x.n_px))
if max_h and h>max_h: h,w = (max_h ,max_h*w/h)
if max_w and w>max_w: h,w = (max_w*h/w,max_w )
return x.reshape(round(h), round(w), resample=resample)
#Cell
def load_image(fn, mode=None, **kwargs):
"Open and load a `PIL.Image` and convert to `mode`"
im = Image.open(fn, **kwargs)
im.load()
im = im._new(im.im)
return im.convert(mode) if mode else im
#Cell
class PILBase(Image.Image, metaclass=BypassNewMeta):
_bypass_type=Image.Image
default_batch_tfms = IntToFloatTensor
_show_args = {'cmap':'viridis'}
_open_args = {'mode': 'RGB'}
@classmethod
def create(cls, fn, **kwargs)->None:
"Open an `Image` from path `fn`"
if isinstance(fn,Tensor): fn = fn.numpy()
if isinstance(fn,ndarray): return cls(Image.fromarray(fn))
return cls(load_image(fn, **merge(cls._open_args, kwargs)))
def show(self, ctx=None, **kwargs):
"Show image using `merge(self._show_args, kwargs)`"
return show_image(self, ctx=ctx, **merge(self._show_args, kwargs))
#Cell
class PILImage(PILBase): pass
#Cell
class PILImageBW(PILImage): _show_args,_open_args = {'cmap':'Greys'},{'mode': 'L'}
#Cell
class PILMask(PILBase): _open_args,_show_args = {'mode':'L'},{'alpha':0.5, 'cmap':'tab20'}
#Cell
OpenMask = Transform(PILMask.create)
OpenMask.loss_func = CrossEntropyLossFlat(axis=1)
PILMask.create = OpenMask
#Cell
class TensorPoint(TensorBase):
"Basic type for points in an image"
_show_args = dict(s=10, marker='.', c='r')
@classmethod
def create(cls, t, sz=None)->None:
"Convert an array or a list of points `t` to a `Tensor`"
return cls(tensor(t).view(-1, 2).float(), sz=sz)
def show(self, ctx=None, **kwargs):
if 'figsize' in kwargs: del kwargs['figsize']
x = self.view(-1,2)
ctx.scatter(x[:, 0], x[:, 1], **{**self._show_args, **kwargs})
return ctx
#Cell
def get_annotations(fname, prefix=None):
"Open a COCO style json in `fname` and returns the lists of filenames (with maybe `prefix`) and labelled bboxes."
annot_dict = json.load(open(fname))
id2images, id2bboxes, id2cats = {}, collections.defaultdict(list), collections.defaultdict(list)
classes = {o['id']:o['name'] for o in annot_dict['categories']}
for o in annot_dict['annotations']:
bb = o['bbox']
id2bboxes[o['image_id']].append([bb[0],bb[1], bb[0]+bb[2], bb[1]+bb[3]])
id2cats[o['image_id']].append(classes[o['category_id']])
id2images = {o['id']:ifnone(prefix, '') + o['file_name'] for o in annot_dict['images'] if o['id'] in id2bboxes}
ids = list(id2images.keys())
return [id2images[k] for k in ids], [(id2bboxes[k], id2cats[k]) for k in ids]
#Cell
from matplotlib import patches, patheffects
def _draw_outline(o, lw):
o.set_path_effects([patheffects.Stroke(linewidth=lw, foreground='black'), patheffects.Normal()])
def _draw_rect(ax, b, color='white', text=None, text_size=14, hw=True, rev=False):
lx,ly,w,h = b
if rev: lx,ly,w,h = ly,lx,h,w
if not hw: w,h = w-lx,h-ly
patch = ax.add_patch(patches.Rectangle((lx,ly), w, h, fill=False, edgecolor=color, lw=2))
_draw_outline(patch, 4)
if text is not None:
patch = ax.text(lx,ly, text, verticalalignment='top', color=color, fontsize=text_size, weight='bold')
_draw_outline(patch,1)
#Cell
class TensorBBox(TensorPoint):
"Basic type for a tensor of bounding boxes in an image"
@classmethod
def create(cls, x, sz=None)->None: return cls(tensor(x).view(-1, 4).float(), sz=sz)
def show(self, ctx=None, **kwargs):
x = self.view(-1,4)
for b in x: _draw_rect(ctx, b, hw=False, **kwargs)
return ctx
#Cell
class LabeledBBox(Tuple):
"Basic type for a list of bounding boxes in an image"
def show(self, ctx=None, **kwargs):
for b,l in zip(self.bbox, self.lbl):
if l != '#na#': ctx = retain_type(b, self.bbox).show(ctx=ctx, text=l)
return ctx
@classmethod
def create(cls, x): return cls(x)
bbox,lbl = add_props(lambda i,self: self[i])
#Cell
def image2tensor(img):
"Transform image to byte tensor in `c*h*w` dim order."
res = tensor(img)
if res.dim()==2: res = res.unsqueeze(-1)
return res.permute(2,0,1)
#Cell
PILImage ._tensor_cls = TensorImage
PILImageBW._tensor_cls = TensorImageBW
PILMask ._tensor_cls = TensorMask
#Cell
@ToTensor
def encodes(self, o:PILBase): return o._tensor_cls(image2tensor(o))
@ToTensor
def encodes(self, o:PILMask): return o._tensor_cls(image2tensor(o)[0])
#Cell
def _scale_pnts(y, sz, do_scale=True, y_first=False):
if y_first: y = y.flip(1)
res = y * 2/tensor(sz).float() - 1 if do_scale else y
return TensorPoint(res, sz=sz)
def _unscale_pnts(y, sz): return TensorPoint((y+1) * tensor(sz).float()/2, sz=sz)
#Cell
class PointScaler(Transform):
"Scale a tensor representing points"
order,loss_func = 1,MSELossFlat()
def __init__(self, do_scale=True, y_first=False): self.do_scale,self.y_first = do_scale,y_first
def _grab_sz(self, x):
self.sz = [x.shape[-1], x.shape[-2]] if isinstance(x, Tensor) else x.size
return x
def _get_sz(self, x):
sz = getattr(x, '_meta', {}).get('sz', None)
assert sz is not None or self.sz is not None, "Size could not be inferred, pass it in the init of your TensorPoint with `sz=...`"
return self.sz if sz is None else sz
def setup(self, dl):
its = dl.do_item(0)
for t in its:
if isinstance(t, TensorPoint): self.c = t.numel()
def encodes(self, x:(PILBase,TensorImageBase)): return self._grab_sz(x)
def decodes(self, x:(PILBase,TensorImageBase)): return self._grab_sz(x)
def encodes(self, x:TensorPoint): return _scale_pnts(x, self._get_sz(x), self.do_scale, self.y_first)
def decodes(self, x:TensorPoint): return _unscale_pnts(x, self._get_sz(x))
TensorPoint.default_item_tfms = PointScaler
#Cell
class BBoxLabels(MultiCategory):
create = MultiCategorize(add_na=True)
default_type_tfms = None
#Cell
class BBoxLabeler(Transform):
def setup(self, dl): self.vocab = dl.vocab
def before_call(self): self.bbox,self.lbls = None,None
def decode (self, x, **kwargs):
self.bbox,self.lbls = None,None
return self._call('decodes', x, **kwargs)
def decodes(self, x:TensorMultiCategory):
self.lbls = [self.vocab[a] for a in x]
return x if self.bbox is None else LabeledBBox(self.bbox, self.lbls)
def decodes(self, x:TensorBBox):
self.bbox = x
return self.bbox if self.lbls is None else LabeledBBox(self.bbox, self.lbls)
#Cell
BBoxLabels.default_item_tfms = BBoxLabeler
#Cell
#LabeledBBox can be sent in a tl with MultiCategorize (depending on the order of the tls) but it is already decoded.
@MultiCategorize
def decodes(self, x:LabeledBBox): return x
#Cell
@PointScaler
def encodes(self, x:TensorBBox):
pnts = self.encodes(TensorPoint(x.view(-1,2), sz=x._meta.get('sz', None)))
return TensorBBox(pnts.view(-1, 4), sz=x._meta.get('sz', None))
@PointScaler
def decodes(self, x:TensorBBox):
pnts = self.decodes(TensorPoint(x.view(-1,2), sz=x._meta.get('sz', None)))
return TensorBBox(pnts.view(-1, 4), sz=x._meta.get('sz', None)) |
py | 7dfbf16bf60b38ac43cbbeb87327ba7a353ab126 | class News:
'''
News class to define News Objects
'''
def __init__(self,id,name,Author,title,description,publishedAt):
self.id =id
self.name = name
self.Author = Author
self.title= title
self.description= description
self.publishedAt=publishedAt
|
py | 7dfbf229b6a61d2796db1f726173151cf18aa289 | # Copyright (c) Yuta Saito, Yusuke Narita, and ZOZO Technologies, Inc. All rights reserved.
# Licensed under the Apache 2.0 License.
"""Useful Tools."""
from inspect import isclass
from typing import Dict, Optional, Union
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils import check_random_state
from sklearn.utils.validation import _deprecate_positional_args
def estimate_confidence_interval_by_bootstrap(
samples: np.ndarray,
alpha: float = 0.05,
n_bootstrap_samples: int = 10000,
random_state: Optional[int] = None,
) -> Dict[str, float]:
"""Estimate confidence interval by nonparametric bootstrap-like procedure.
Parameters
----------
samples: array-like
Empirical observed samples to be used to estimate cumulative distribution function.
alpha: float, default=0.05
P-value.
n_bootstrap_samples: int, default=10000
Number of resampling performed in the bootstrap procedure.
random_state: int, default=None
Controls the random seed in bootstrap sampling.
Returns
----------
estimated_confidence_interval: Dict[str, float]
Dictionary storing the estimated mean and upper-lower confidence bounds.
"""
assert (0.0 < alpha < 1.0) and isinstance(
alpha, float
), f"alpha must be a positive float, but {alpha} is given"
assert (n_bootstrap_samples > 0) and isinstance(
n_bootstrap_samples, int
), f"n_bootstrap_samples must be a positive integer, but {n_bootstrap_samples} is given"
boot_samples = list()
random_ = check_random_state(random_state)
for _ in np.arange(n_bootstrap_samples):
boot_samples.append(np.mean(random_.choice(samples, size=samples.shape[0])))
lower_bound = np.percentile(boot_samples, 100 * (alpha / 2))
upper_bound = np.percentile(boot_samples, 100 * (1.0 - alpha / 2))
return {
"mean": np.mean(boot_samples),
f"{100 * (1. - alpha)}% CI (lower)": lower_bound,
f"{100 * (1. - alpha)}% CI (upper)": upper_bound,
}
def convert_to_action_dist(
n_actions: int,
selected_actions: np.ndarray,
) -> np.ndarray:
"""Convert selected actions (output of `run_bandit_simulation`) to distribution over actions.
Parameters
----------
n_actions: int
Number of actions.
selected_actions: array-like, shape (n_rounds, len_list)
Sequence of actions selected by evaluation policy
at each round in offline bandit simulation.
Returns
----------
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities (can be deterministic).
"""
n_rounds, len_list = selected_actions.shape
action_dist = np.zeros((n_rounds, n_actions, len_list))
for pos in np.arange(len_list):
selected_actions_ = selected_actions[:, pos]
action_dist[
np.arange(n_rounds),
selected_actions_,
pos * np.ones(n_rounds, int),
] = 1
return action_dist
@_deprecate_positional_args
def check_is_fitted(
estimator: BaseEstimator, attributes=None, *, msg: str = None, all_or_any=all
) -> bool:
"""Perform is_fitted validation for estimator.
Note
----
Checks if the estimator is fitted by verifying the presence of
fitted attributes (ending with a trailing underscore) and otherwise
raises a NotFittedError with the given message.
This utility is meant to be used internally by estimators themselves,
typically in their own predict / transform methods.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : str, list or tuple of str, default=None
Attribute name(s) given as string or a list/tuple of strings
Eg.: ``["coef_", "estimator_", ...], "coef_"``
If `None`, `estimator` is considered fitted if there exist an
attribute that ends with a underscore and does not start with double
underscore.
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this
estimator."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
Returns
-------
is_fitted: bool
Whether the given estimator is fitted or not.
References
-------
https://scikit-learn.org/stable/modules/generated/sklearn.utils.validation.check_is_fitted.html
"""
if isclass(estimator):
raise TypeError("{} is a class, not an instance.".format(estimator))
if msg is None:
msg = (
"This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this estimator."
)
if not hasattr(estimator, "fit"):
raise TypeError("%s is not an estimator instance." % (estimator))
if attributes is not None:
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
attrs = all_or_any([hasattr(estimator, attr) for attr in attributes])
else:
attrs = [
v for v in vars(estimator) if v.endswith("_") and not v.startswith("__")
]
is_fitted = len(attrs) != 0
return is_fitted
def check_bandit_feedback_inputs(
context: np.ndarray,
action: np.ndarray,
reward: np.ndarray,
position: Optional[np.ndarray] = None,
pscore: Optional[np.ndarray] = None,
action_context: Optional[np.ndarray] = None,
) -> Optional[AssertionError]:
"""Check inputs for bandit learning or simulation.
Parameters
-----------
context: array-like, shape (n_rounds, dim_context)
Context vectors in each round, i.e., :math:`x_t`.
action: array-like, shape (n_rounds,)
Action sampled by a behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.
reward: array-like, shape (n_rounds,)
Observed rewards (or outcome) in each round, i.e., :math:`r_t`.
position: array-like, shape (n_rounds,), default=None
Positions of each round in the given logged bandit feedback.
pscore: array-like, shape (n_rounds,), default=None
Propensity scores, the probability of selecting each action by behavior policy,
in the given logged bandit feedback.
action_context: array-like, shape (n_actions, dim_action_context)
Context vectors characterizing each action.
"""
assert isinstance(context, np.ndarray), "context must be ndarray"
assert context.ndim == 2, "context must be 2-dimensional"
assert isinstance(action, np.ndarray), "action must be ndarray"
assert action.ndim == 1, "action must be 1-dimensional"
assert isinstance(reward, np.ndarray), "reward must be ndarray"
assert reward.ndim == 1, "reward must be 1-dimensional"
if pscore is not None:
assert isinstance(pscore, np.ndarray), "pscore must be ndarray"
assert pscore.ndim == 1, "pscore must be 1-dimensional"
assert (
context.shape[0] == action.shape[0] == reward.shape[0] == pscore.shape[0]
), "context, action, reward, and pscore must be the same size."
if position is not None:
assert isinstance(position, np.ndarray), "position must be ndarray"
assert position.ndim == 1, "position must be 1-dimensional"
assert (
context.shape[0] == action.shape[0] == reward.shape[0] == position.shape[0]
), "context, action, reward, and position must be the same size."
else:
assert (
context.shape[0] == action.shape[0] == reward.shape[0]
), "context, action, and reward must be the same size."
if action_context is not None:
assert isinstance(action_context, np.ndarray), "action_context must be ndarray"
assert action_context.ndim == 2, "action_context must be 2-dimensional"
assert (action.max() + 1) == action_context.shape[
0
], "the number of action and the size of the first dimension of action_context must be same."
def sigmoid(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""Calculate sigmoid function."""
return 1.0 / (1.0 + np.exp(-x))
def softmax(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""Calculate softmax function."""
b = np.expand_dims(np.max(x, axis=1), 1)
numerator = np.exp(x - b)
denominator = np.expand_dims(np.sum(numerator, axis=1), 1)
return numerator / denominator
|
py | 7dfbf47eef0924a2d154bc99705ec2dc57755bab | # Generated by Django 3.1.2 on 2020-10-17 19:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0006_auto_20201015_1941'),
]
operations = [
migrations.AddField(
model_name='product',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='products/'),
),
]
|
py | 7dfbf4b017bc85ac8afb4173737963bb2c743402 | import logging
# Create the Logger
loggers = logging.getLogger(__name__)
|
py | 7dfbf4de405a2d6495d865be23af272443c258e5 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ChaosContextMapItem(Model):
"""Describes an item in the ChaosContextMap in ChaosParameters.
.
:param key: The key for a ChaosContextMapItem.
:type key: str
:param value: The value for a ChaosContextMapItem.
:type value: str
"""
_validation = {
'key': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'key': {'key': 'Key', 'type': 'str'},
'value': {'key': 'Value', 'type': 'str'},
}
def __init__(self, key, value):
self.key = key
self.value = value
|
py | 7dfbf50ffe70a34fb2a22e946a7d48efc09428d5 | # start-snippet
from pathlib import Path
from dagster import graph, make_python_type_usable_as_dagster_type, op, repository
from dagster.core.definitions.no_step_launcher import no_step_launcher
from dagster_aws.emr import emr_pyspark_step_launcher
from dagster_aws.s3 import s3_pickle_io_manager, s3_resource
from dagster_pyspark import DataFrame as DagsterPySparkDataFrame
from dagster_pyspark import pyspark_resource
from pyspark.sql import DataFrame, Row
from pyspark.sql.types import IntegerType, StringType, StructField, StructType
# Make pyspark.sql.DataFrame map to dagster_pyspark.DataFrame
make_python_type_usable_as_dagster_type(python_type=DataFrame, dagster_type=DagsterPySparkDataFrame)
@op(required_resource_keys={"pyspark", "pyspark_step_launcher"})
def make_people(context) -> DataFrame:
schema = StructType([StructField("name", StringType()), StructField("age", IntegerType())])
rows = [Row(name="Thom", age=51), Row(name="Jonny", age=48), Row(name="Nigel", age=49)]
return context.resources.pyspark.spark_session.createDataFrame(rows, schema)
@op(required_resource_keys={"pyspark_step_launcher"})
def filter_over_50(people: DataFrame) -> DataFrame:
return people.filter(people["age"] > 50)
@op(required_resource_keys={"pyspark_step_launcher"})
def count_people(people: DataFrame) -> int:
return people.count()
emr_resource_defs = {
"pyspark_step_launcher": emr_pyspark_step_launcher.configured(
{
"cluster_id": {"env": "EMR_CLUSTER_ID"},
"local_pipeline_package_path": str(Path(__file__).parent),
"deploy_local_pipeline_package": True,
"region_name": "us-west-1",
"staging_bucket": "my_staging_bucket",
"wait_for_logs": True,
}
),
"pyspark": pyspark_resource.configured({"spark_conf": {"spark.executor.memory": "2g"}}),
"s3": s3_resource,
"io_manager": s3_pickle_io_manager.configured(
{"s3_bucket": "my_staging_bucket", "s3_prefix": "simple-pyspark"}
),
}
local_resource_defs = {
"pyspark_step_launcher": no_step_launcher,
"pyspark": pyspark_resource.configured({"spark_conf": {"spark.default.parallelism": 1}}),
}
@graph
def count_people_over_50():
count_people(filter_over_50(make_people()))
count_people_over_50_local = count_people_over_50.to_job(
name="local", resource_defs=local_resource_defs
)
count_people_over_50_emr = count_people_over_50.to_job(name="prod", resource_defs=emr_resource_defs)
# end-snippet
@repository
def emr_pyspark_example():
return [count_people_over_50_emr, count_people_over_50_local]
|
py | 7dfbf7a25214bde43869a6732903cbd9c980accc | #!/usr/bin/env python
from __future__ import print_function
# TODO: i18n
print("Hello world!")
def foo():
# todo: i18n
# nothing to see here
print("Happy foo day!")
if __name__ == '__main__':
foo()
|
py | 7dfbf8a034eed94ea974150f5d6054136c23a239 | #!/usr/bin/python3
from setuptools import setup
version="0.2.5"
setup(name='aioblescan',
packages=['aioblescan', 'aioblescan.plugins'],
version=version,
author='Lee Bussy',
author_email='[email protected]',
description='Scan BLEacons for Tilt in support of BrewPi.',
url='https://github.com/brewpi-remix/aioblescan',
download_url='https://github.com/brewpi-remix/aioblescan.git',
keywords = ['bluetooth', 'advertising', 'hci', 'ble'],
license='MIT',
install_requires=[],
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7'
])
|
py | 7dfbf8fa608fec96a677c3332466732d9a60b250 | """
This is the example config file
"""
# More one-char representation will be added in order to support
# other objects.
# The following a=10 is an example although it does not work now
# as I have not included a '10' object yet.
a = 10
# This is the map array that represents the map
# You have to fill the array into a (m x n) matrix with all elements
# not None. A strange shape of the array may cause malfunction.
# Currently available object indices are # they can fill more than one element in the array.
# 0: nothing
# 1: wall
# 2: ladder
# 3: coin
# 4: spike
# 5: triangle -------source
# 6: square ------ source
# 7: coin -------- target
# 8: princess -------source
# 9: player # elements(possibly more than 1) filled will be selected randomly to place the player
# unsupported indices will work as 0: nothing
map_array = [
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 9, 2, 9, 9, 2, 9, 1],
[1, 9, 2, 9, 9, 2, 9, 1],
[1, 9, 2, 9, 9, 2, 9, 1],
[1, 9, 1, 1, 2, 1, 9, 1],
[1, 9, 9, 9, 2, 9, 9, 1],
[1, 9, 9, 9, 2, 9, 9, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
]
# set to true -> win when touching the object
# 0, 1, 2, 3, 4, 9 are not possible
end_game = {
6: True,
}
rewards = {
"positive": 5, # when collecting a coin
"win": 1, # endgame (win)
"negative": -25, # endgame (die)
"tick": 0 # living
}
map_config = {
'map_array': map_array,
'rewards': rewards,
'end_game': end_game,
'init_score': 0,
'init_lives': 1, # please don't change, not going to work
# work automatically only for aigym wrapped version
'fps': 30,
'frame_skip': 1,
'force_fps': True, # set to true to make the game run as fast as possible
'display_screen': False,
'episode_length': 100,
'episode_end_sleep': 0, # sec
} |
py | 7dfbf90d5f8df83f602b39d8424e7e799165ff56 | # pylint: disable=C0114
# pylint: disable=C0301
# pylint: disable=E0401
# pylint: disable=E0401
# pylint: disable=R1710
# Copyright 2021 - 2022, Alvin Kuruvilla <[email protected]>, Dr. Rajesh Kumar <[email protected]>
# Use of this source code is governed by an MIT-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/MIT.
import os
import yaml
from prettytable import PrettyTable
from base.log import Logger
def path_is_valid(path: str) -> bool:
"""Given a path, return True if it is a valid file path"""
log = Logger()
is_file = os.path.isfile(path)
if is_file:
# Now check that the extension is YAML
if path.lower().endswith(".yaml"):
return True
log.km_fatal("The provided path is not a YAML file")
return False
log.km_fatal("The provided path was not a file")
return False
def get_value_from_key(filepath: str, key: str):
"""This function looks for the value associated with a provided key in a provided filepath"""
if path_is_valid(filepath):
with open(filepath, "r", encoding="utf8") as stream:
try:
data = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
if key in data:
return data[key]
# NOTE: Right now we are going to assume that every field in the yaml file is filled correctly, ie there are no empty fields, so that this would mean no result rather than potentially getting a value... Since we cannot guarantee correctness we should think of alternatives to empty string that would give the same indication
return ""
def get_yaml_values_from_file(filepath) -> list:
"""From a path to a YAML file load the file and return its values as a list"""
if path_is_valid(filepath):
with open(filepath, "r", encoding="utf8") as stream:
try:
data = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
return list(data.values())
def get_yaml_keys_from_file(filepath) -> list:
"""From a path to a YAML file load the file and return its keys as a list"""
if path_is_valid(filepath):
with open(filepath, "r", encoding="utf8") as stream:
try:
data = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
return list(data.keys())
def get_all_associated_values(key: str):
"""A helper function to retrieve all values for a given key across all yaml files stored in the 'users' directory"""
store = []
directory = os.path.join(os.getcwd(), "users")
for file in os.scandir(directory):
if file.path.endswith(".yaml") and file.is_file():
value = get_value_from_key(file.path, key)
# Type cast is required here to make sure str to int comparisons don't fail
store.append(str(value))
return store
def write_to_yaml_file(filename: str, data: dict) -> None:
"""This function enables the YAMLParser to write a data dictionary as a
YAML file
Note: This function will always store resulting yaml files in
the 'users' folder. A convention we may stick to is to use the first letter
of their first and last name and their user_id (because that will be
unique to each user) or just the user_id. If the file does not exist
when calling thus function it will also create it
"""
file_path = os.path.join(os.getcwd(), "users", filename + ".yaml")
with open(file_path, "w+", encoding="utf8") as file:
yaml.dump(data, file, sort_keys=False)
def print_as_table(filepath):
"""Take in a path to a YAML file and display its contents as a table"""
keys = get_yaml_keys_from_file(filepath)
values = get_yaml_values_from_file(filepath)
assert len(keys) == len(
values
), "Cannot create table if the number of rows (keys) != number of columns (values)!"
out = PrettyTable()
out.field_names = keys
out.add_row(values)
print(out)
|
py | 7dfbfa5a03ea49a90665a5cfaadbf9a95ddf1995 | # Copyright (c) 2020 fortiss GmbH
#
# Authors: Patrick Hart, Julian Bernhard, Klemens Esterle, and
# Tobias Kessler, Mansoor Nasir
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
# The code is adapted from opensource implementation - https://github.com/ku2482/fqf-iqn-qrdqn.pytorch
# MIT License -Copyright (c) 2020 Toshiki Watanabe
import torch
from torch.optim import Adam, RMSprop
from bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.model import FQF
from bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.utils \
import disable_gradients, update_params, \
calculate_quantile_huber_loss, evaluate_quantile_at_action
from .base_agent import BaseAgent
class FQFAgent(BaseAgent):
def __init__(self, *args, **kwargs):
super(FQFAgent, self).__init__(*args, **kwargs)
def reset_params(self, params):
super(FQFAgent, self).reset_params(params)
# NOTE: The author said the training of Fraction Proposal Net is
# unstable and value distribution degenerates into a deterministic
# one rarely (e.g. 1 out of 20 seeds). So you can use entropy of value
# distribution as a regularizer to stabilize (but possibly slow down)
# training.
self.ent_coef = self._params["ML"]["FQFAgent"]["Ent_coefs", "", 0]
self.N = self._params["ML"]["FQFAgent"]["N", "", 32]
self.num_cosines = self._params["ML"]["FQFAgent"]["NumCosines", "", 64]
self.kappa = self._params["ML"]["FQFAgent"]["Kappa", "", 1.0]
self.fractional_learning_rate = self._params["ML"]["FQFAgent"]["FractionalLearningRate", "",
2.5e-9]
def init_always(self):
super(FQFAgent, self).init_always()
# Online network.
self.online_net = FQF(num_channels=self.observer.observation_space.shape[0],
num_actions=self.num_actions,
N=self.N,
num_cosines=self.num_cosines,
dueling_net=self.dueling_net,
noisy_net=self.noisy_net,
params=self._params).to(self.device)
# Target network.
self.target_net = FQF(num_channels=self.observer.observation_space.shape[0],
num_actions=self.num_actions,
N=self.N,
num_cosines=self.num_cosines,
dueling_net=self.dueling_net,
noisy_net=self.noisy_net,
target=True,
params=self._params).to(self.device)
# Copy parameters of the learning network to the target network.
self.update_target()
# Disable calculations of gradients of the target network.
disable_gradients(self.target_net)
self.fraction_optim = RMSprop(
self.online_net.fraction_net.parameters(),
lr=self.fractional_learning_rate,
alpha=0.95,
eps=0.00001)
self.quantile_optim = Adam(
list(self.online_net.dqn_net.parameters()) +
list(self.online_net.cosine_net.parameters()) +
list(self.online_net.quantile_net.parameters()),
lr=self._params["ML"]["FQFAgent"]["QuantileLearningRate", "", 5e-5],
eps=1e-2 / self.batch_size)
def clean_pickables(self, pickables):
super(FQFAgent, self).clean_pickables(pickables)
del pickables["fraction_optim"]
del pickables["quantile_optim"]
def update_target(self):
self.target_net.dqn_net.load_state_dict(
self.online_net.dqn_net.state_dict())
self.target_net.quantile_net.load_state_dict(
self.online_net.quantile_net.state_dict())
self.target_net.cosine_net.load_state_dict(
self.online_net.cosine_net.state_dict())
def learn(self):
self.learning_steps += 1
self.online_net.sample_noise()
self.target_net.sample_noise()
if self.use_per:
(states, actions, rewards, next_states, dones), weights = \
self.memory.sample(self.batch_size)
else:
states, actions, rewards, next_states, dones = \
self.memory.sample(self.batch_size)
weights = None
# Calculate embeddings of current states.
state_embeddings = self.online_net.calculate_state_embeddings(states)
# Calculate fractions of current states and entropies.
taus, tau_hats, entropies = \
self.online_net.calculate_fractions(
state_embeddings=state_embeddings.detach())
# Calculate quantile values of current states and actions at tau_hats.
current_sa_quantile_hats = evaluate_quantile_at_action(
self.online_net.calculate_quantiles(tau_hats,
state_embeddings=state_embeddings),
actions)
assert current_sa_quantile_hats.shape == (self.batch_size, self.N, 1)
# NOTE: Detach state_embeddings not to update convolution layers. Also,
# detach current_sa_quantile_hats because I calculate gradients of taus
# explicitly, not by backpropagation.
fraction_loss = self.calculate_fraction_loss(
state_embeddings.detach(), current_sa_quantile_hats.detach(), taus,
actions, weights)
quantile_loss, mean_q, errors = self.calculate_quantile_loss(
state_embeddings, tau_hats, current_sa_quantile_hats, actions, rewards,
next_states, dones, weights)
entropy_loss = -self.ent_coef * entropies.mean()
update_params(self.fraction_optim,
fraction_loss + entropy_loss,
networks=[self.online_net.fraction_net],
retain_graph=True,
grad_cliping=self.grad_cliping)
update_params(self.quantile_optim,
quantile_loss,
networks=[
self.online_net.dqn_net, self.online_net.cosine_net,
self.online_net.quantile_net
],
retain_graph=False,
grad_cliping=self.grad_cliping)
if self.use_per:
self.memory.update_priority(errors)
if self.learning_steps % self.summary_log_interval == 0:
self.writer.add_scalar('loss/fraction_loss',
fraction_loss.detach().item(), 4 * self.steps)
self.writer.add_scalar('loss/quantile_loss',
quantile_loss.detach().item(), 4 * self.steps)
if self.ent_coef > 0.0:
self.writer.add_scalar('loss/entropy_loss',
entropy_loss.detach().item(), 4 * self.steps)
self.writer.add_scalar('stats/mean_Q', mean_q, 4 * self.steps)
self.writer.add_scalar('stats/mean_entropy_of_value_distribution',
entropies.mean().detach().item(), 4 * self.steps)
def calculate_fraction_loss(self, state_embeddings, sa_quantile_hats, taus,
actions, weights):
assert not state_embeddings.requires_grad
assert not sa_quantile_hats.requires_grad
batch_size = state_embeddings.shape[0]
with torch.no_grad():
sa_quantiles = evaluate_quantile_at_action(
self.online_net.calculate_quantiles(
taus=taus[:, 1:-1], state_embeddings=state_embeddings), actions)
assert sa_quantiles.shape == (batch_size, self.N - 1, 1)
# NOTE: Proposition 1 in the paper requires F^{-1} is non-decreasing.
# I relax this requirements and calculate gradients of taus even when
# F^{-1} is not non-decreasing.
values_1 = sa_quantiles - sa_quantile_hats[:, :-1]
signs_1 = sa_quantiles > torch.cat(
[sa_quantile_hats[:, :1], sa_quantiles[:, :-1]], dim=1)
assert values_1.shape == signs_1.shape
values_2 = sa_quantiles - sa_quantile_hats[:, 1:]
signs_2 = sa_quantiles < torch.cat(
[sa_quantiles[:, 1:], sa_quantile_hats[:, -1:]], dim=1)
assert values_2.shape == signs_2.shape
gradient_of_taus = (torch.where(signs_1, values_1, -values_1) +
torch.where(signs_2, values_2, -values_2)).view(
batch_size, self.N - 1)
assert not gradient_of_taus.requires_grad
assert gradient_of_taus.shape == taus[:, 1:-1].shape
# Gradients of the network parameters and corresponding loss
# are calculated using chain rule.
if weights is not None:
fraction_loss = ((
(gradient_of_taus * taus[:, 1:-1]).sum(dim=1, keepdim=True)) *
weights).mean()
else:
fraction_loss = \
(gradient_of_taus * taus[:, 1:-1]).sum(dim=1).mean()
return fraction_loss
def calculate_quantile_loss(self, state_embeddings, tau_hats,
current_sa_quantile_hats, actions, rewards,
next_states, dones, weights):
assert not tau_hats.requires_grad
with torch.no_grad():
# NOTE: Current and target quantiles share the same proposed
# fractions to reduce computations. (i.e. next_tau_hats = tau_hats)
# Calculate Q values of next states.
if self.double_q_learning:
# Sample the noise of online network to decorrelate between
# the action selection and the quantile calculation.
self.online_net.sample_noise()
next_q = self.online_net.calculate_q(states=next_states)
else:
next_state_embeddings = \
self.target_net.calculate_state_embeddings(next_states)
next_q = \
self.target_net.calculate_q(
state_embeddings=next_state_embeddings)
# Calculate greedy actions.
next_actions = torch.argmax(next_q, dim=1, keepdim=True)
assert next_actions.shape == (self.batch_size, 1)
# Calculate features of next states.
if self.double_q_learning:
next_state_embeddings = \
self.target_net.calculate_state_embeddings(next_states)
# Calculate quantile values of next states and actions at tau_hats.
next_sa_quantile_hats = evaluate_quantile_at_action(
self.target_net.calculate_quantiles(
taus=tau_hats, state_embeddings=next_state_embeddings),
next_actions).transpose(1, 2)
assert next_sa_quantile_hats.shape == (self.batch_size, 1, self.N)
# Calculate target quantile values.
target_sa_quantile_hats = rewards[..., None] + (
1.0 - dones[..., None]) * self.gamma_n * next_sa_quantile_hats
assert target_sa_quantile_hats.shape == (self.batch_size, 1, self.N)
td_errors = target_sa_quantile_hats - current_sa_quantile_hats
assert td_errors.shape == (self.batch_size, self.N, self.N)
quantile_huber_loss = calculate_quantile_huber_loss(
td_errors, tau_hats, weights, self.kappa)
return quantile_huber_loss, next_q.detach().mean().item(), \
td_errors.detach().abs()
|
py | 7dfbfb5b397a5a7609dfef8003d275de41392766 | # -*- coding: utf-8 -*-
#
# PySceneDetect: Python-Based Video Scene Detector
# ---------------------------------------------------------------
# [ Site: http://www.bcastell.com/projects/PySceneDetect/ ]
# [ Github: https://github.com/Breakthrough/PySceneDetect/ ]
# [ Documentation: http://pyscenedetect.readthedocs.org/ ]
#
# Copyright (C) 2014-2021 Brandon Castellano <http://www.bcastell.com>.
#
# PySceneDetect is licensed under the BSD 3-Clause License; see the included
# LICENSE file, or visit one of the following pages for details:
# - https://github.com/Breakthrough/PySceneDetect/
# - http://www.bcastell.com/projects/PySceneDetect/
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
""" ``scenedetect.frame_timecode`` Module
This module contains the :py:class:`FrameTimecode` object, which is used as a way for
PySceneDetect to store frame-accurate timestamps of each cut. This is done by also
specifying the video framerate with the timecode, allowing a frame number to be
converted to/from a floating-point number of seconds, or string in the form
`"HH:MM:SS[.nnn]"` where the `[.nnn]` part is optional.
See the following examples, or the :py:class:`FrameTimecode constructor <FrameTimecode>`.
Unit tests for the FrameTimecode object can be found in `tests/test_timecode.py`.
"""
# Standard Library Imports
import math
# PySceneDetect Library Imports
from scenedetect.platform import STRING_TYPE
MINIMUM_FRAMES_PER_SECOND_FLOAT = 1.0 / 1000.0
MINIMUM_FRAMES_PER_SECOND_DELTA_FLOAT = 1.0 / 100000
class FrameTimecode(object):
""" Object for frame-based timecodes, using the video framerate
to compute back and forth between frame number and second/timecode formats.
The timecode argument is valid only if it complies with one of the following
three types/formats:
1) string: standard timecode HH:MM:SS[.nnn]:
`str` in form 'HH:MM:SS' or 'HH:MM:SS.nnn', or
`list`/`tuple` in form [HH, MM, SS] or [HH, MM, SS.nnn]
2) float: number of seconds S[.SSS], where S >= 0.0:
`float` in form S.SSS, or
`str` in form 'Ss' or 'S.SSSs' (e.g. '5s', '1.234s')
3) int: Exact number of frames N, where N >= 0:
`int` in form `N`, or
`str` in form 'N'
Arguments:
timecode (str, float, int, or FrameTimecode): A timecode or frame
number, given in any of the above valid formats/types. This
argument is always required.
fps (float, or FrameTimecode, conditionally required): The framerate
to base all frame to time arithmetic on (if FrameTimecode, copied
from the passed framerate), to allow frame-accurate arithmetic. The
framerate must be the same when combining FrameTimecode objects
in operations. This argument is always required, unless **timecode**
is a FrameTimecode.
Raises:
TypeError: Thrown if timecode is wrong type/format, or if fps is None
or a type other than int or float.
ValueError: Thrown when specifying a negative timecode or framerate.
"""
def __init__(self, timecode=None, fps=None):
# type: (Union[int, float, str, FrameTimecode], float,
# Union[int, float, str, FrameTimecode])
# The following two properties are what is used to keep track of time
# in a frame-specific manner. Note that once the framerate is set,
# the value should never be modified (only read if required).
self.framerate = None
self.frame_num = None
# Copy constructor. Only the timecode argument is used in this case.
if isinstance(timecode, FrameTimecode):
self.framerate = timecode.framerate
self.frame_num = timecode.frame_num
if fps is not None:
raise TypeError('Framerate cannot be overwritten when copying a FrameTimecode.')
else:
# Ensure other arguments are consistent with API.
if fps is None:
raise TypeError('Framerate (fps) is a required argument.')
if isinstance(fps, FrameTimecode):
fps = fps.framerate
# Process the given framerate, if it was not already set.
if not isinstance(fps, (int, float)):
raise TypeError('Framerate must be of type int/float.')
elif (isinstance(fps, int) and not fps > 0) or (
isinstance(fps, float) and not fps >= MINIMUM_FRAMES_PER_SECOND_FLOAT):
raise ValueError('Framerate must be positive and greater than zero.')
self.framerate = float(fps)
# Process the timecode value, storing it as an exact number of frames.
if isinstance(timecode, (str, STRING_TYPE)):
self.frame_num = self._parse_timecode_string(timecode)
else:
self.frame_num = self._parse_timecode_number(timecode)
# Alternative formats under consideration (require unit tests before adding):
# Standard timecode in list format [HH, MM, SS.nnn]
#elif isinstance(timecode, (list, tuple)) and len(timecode) == 3:
# if any(not isinstance(x, (int, float)) for x in timecode):
# raise ValueError('Timecode components must be of type int/float.')
# hrs, mins, secs = timecode
# if not (hrs >= 0 and mins >= 0 and secs >= 0 and mins < 60
# and secs < 60):
# raise ValueError('Timecode components must be positive.')
# secs += (((hrs * 60.0) + mins) * 60.0)
# self.frame_num = int(secs * self.framerate)
def get_frames(self):
# type: () -> int
""" Get the current time/position in number of frames. This is the
equivalent of accessing the self.frame_num property (which, along
with the specified framerate, forms the base for all of the other
time measurement calculations, e.g. the :py:meth:`get_seconds` method).
If using to compare a :py:class:`FrameTimecode` with a frame number,
you can do so directly against the object (e.g. ``FrameTimecode(10, 10.0) <= 10``).
Returns:
int: The current time in frames (the current frame number).
"""
return int(self.frame_num)
def get_framerate(self):
# type: () -> float
""" Get Framerate: Returns the framerate used by the FrameTimecode object.
Returns:
float: Framerate of the current FrameTimecode object, in frames per second.
"""
return self.framerate
def equal_framerate(self, fps):
# type: (float) -> bool
""" Equal Framerate: Determines if the passed framerate is equal to that of the
FrameTimecode object.
Arguments:
fps: Framerate (float) to compare against within the precision constant
MINIMUM_FRAMES_PER_SECOND_DELTA_FLOAT defined in this module.
Returns:
bool: True if passed fps matches the FrameTimecode object's framerate, False otherwise.
"""
return math.fabs(self.framerate - fps) < MINIMUM_FRAMES_PER_SECOND_DELTA_FLOAT
def get_seconds(self):
# type: () -> float
""" Get the frame's position in number of seconds.
If using to compare a :py:class:`FrameTimecode` with a frame number,
you can do so directly against the object (e.g. ``FrameTimecode(10, 10.0) <= 1.0``).
Returns:
float: The current time/position in seconds.
"""
return float(self.frame_num) / self.framerate
def get_smpte_timecode(self):
# type: () -> str
""" Get a formatted timecode string of the form HH:MM:SS:FF.
Returns:
str: The current time in the form ``"HH:MM:SS:FF"``.
"""
fps = int(round(self.framerate))
frames_per_hour = int(round(fps * 60 * 60))
frames_per_24_hours = frames_per_hour * 24
frames_per_10_minutes = int(round(fps * 60 * 10))
frames_per_minute = int(round(fps) * 60)
frames = self.frame_num % fps
secs = int((self.frame_num // fps) % 60)
mins = int(((self.frame_num // fps) // 60) % 60)
hrs = int((((self.frame_num // fps) // 60) // 60))
return '%02d:%02d:%02d:%02d' % (hrs, mins, secs, frames)
def get_timecode(self, precision=3, use_rounding=True):
# type: (int, bool) -> str
""" Get a formatted timecode string of the form HH:MM:SS[.nnn].
Args:
precision: The number of decimal places to include in the output ``[.nnn]``.
use_rounding: True (default) to round the output to the desired precision.
Returns:
str: The current time in the form ``"HH:MM:SS[.nnn]"``.
"""
# Compute hours and minutes based off of seconds, and update seconds.
secs = self.get_seconds()
base = 60.0 * 60.0
hrs = int(secs / base)
secs -= (hrs * base)
base = 60.0
mins = int(secs / base)
secs -= (mins * base)
# Convert seconds into string based on required precision.
if precision > 0:
if use_rounding:
secs = round(secs, precision)
#secs = math.ceil(secs * (10**precision)) / float(10**precision)
msec = format(secs, '.%df' % precision)[-precision:]
secs = '%02d.%s' % (int(secs), msec)
else:
secs = '%02d' % int(round(secs, 0)) if use_rounding else '%02d' % int(secs)
# Return hours, minutes, and seconds as a formatted timecode string.
return '%02d:%02d:%s' % (hrs, mins, secs)
def previous_frame(self):
# type: () -> FrameTimecode
"""
Returns a new FrameTimecode for the frame before this one.
:return: New FrameTimeCode object, one frame earlier
"""
new_timecode = FrameTimecode(self)
new_timecode.frame_num -= 1
return new_timecode
def _seconds_to_frames(self, seconds):
# type: (float) -> int
""" Converts the passed value seconds to the nearest number of frames using
the current FrameTimecode object's FPS (self.framerate).
Returns:
Integer number of frames the passed number of seconds represents using
the current FrameTimecode's framerate property.
"""
return int(seconds * self.framerate)
def _parse_timecode_number(self, timecode):
# type: (Union[int, float]) -> int
""" Parses a timecode number, storing it as the exact number of frames.
Can be passed as frame number (int), seconds (float)
Raises:
TypeError, ValueError
"""
# Process the timecode value, storing it as an exact number of frames.
# Exact number of frames N
if isinstance(timecode, int):
if timecode < 0:
raise ValueError('Timecode frame number must be positive and greater than zero.')
return timecode
# Number of seconds S
elif isinstance(timecode, float):
if timecode < 0.0:
raise ValueError('Timecode value must be positive and greater than zero.')
return self._seconds_to_frames(timecode)
# FrameTimecode
elif isinstance(timecode, FrameTimecode):
return timecode.frame_num
elif timecode is None:
raise TypeError('Timecode/frame number must be specified!')
else:
raise TypeError('Timecode format/type unrecognized.')
def _parse_timecode_string(self, timecode_string):
# type: (str) -> int
""" Parses a string based on the three possible forms (in timecode format,
as an integer number of frames, or floating-point seconds, ending with 's').
Requires that the framerate property is set before calling this method.
Assuming a framerate of 30.0 FPS, the strings '00:05:00.000', '00:05:00',
'9000', '300s', and '300.0s' are all possible valid values, all representing
a period of time equal to 5 minutes, 300 seconds, or 9000 frames (at 30 FPS).
Raises:
TypeError, ValueError
"""
if self.framerate is None:
raise TypeError('self.framerate must be set before calling _parse_timecode_string.')
# Number of seconds S
if timecode_string.endswith('s'):
secs = timecode_string[:-1]
if not secs.replace('.', '').isdigit():
raise ValueError('All characters in timecode seconds string must be digits.')
secs = float(secs)
if secs < 0.0:
raise ValueError('Timecode seconds value must be positive.')
return int(secs * self.framerate)
# Exact number of frames N
elif timecode_string.isdigit():
timecode = int(timecode_string)
if timecode < 0:
raise ValueError('Timecode frame number must be positive.')
return timecode
# Standard timecode in string format 'HH:MM:SS[.nnn]'
else:
tc_val = timecode_string.split(':')
if not (len(tc_val) == 3 and tc_val[0].isdigit() and tc_val[1].isdigit()
and tc_val[2].replace('.', '').isdigit()):
raise ValueError('Unrecognized or improperly formatted timecode string.')
hrs, mins = int(tc_val[0]), int(tc_val[1])
secs = float(tc_val[2]) if '.' in tc_val[2] else int(tc_val[2])
if not (hrs >= 0 and mins >= 0 and secs >= 0 and mins < 60 and secs < 60):
raise ValueError('Invalid timecode range (values outside allowed range).')
secs += (((hrs * 60.0) + mins) * 60.0)
return int(secs * self.framerate)
def __iadd__(self, other):
# type: (Union[int, float, str, FrameTimecode]) -> FrameTimecode
if isinstance(other, int):
self.frame_num += other
elif isinstance(other, FrameTimecode):
if self.equal_framerate(other.framerate):
self.frame_num += other.frame_num
else:
raise ValueError('FrameTimecode instances require equal framerate for addition.')
# Check if value to add is in number of seconds.
elif isinstance(other, float):
self.frame_num += self._seconds_to_frames(other)
else:
raise TypeError('Unsupported type for performing addition with FrameTimecode.')
if self.frame_num < 0: # Required to allow adding negative seconds/frames.
self.frame_num = 0
return self
def __add__(self, other):
# type: (Union[int, float, str, FrameTimecode]) -> FrameTimecode
to_return = FrameTimecode(timecode=self)
to_return += other
return to_return
def __isub__(self, other):
# type: (Union[int, float, str, FrameTimecode]) -> FrameTimecode
if isinstance(other, int):
self.frame_num -= other
elif isinstance(other, FrameTimecode):
if self.equal_framerate(other.framerate):
self.frame_num -= other.frame_num
else:
raise ValueError('FrameTimecode instances require equal framerate for subtraction.')
# Check if value to add is in number of seconds.
elif isinstance(other, float):
self.frame_num -= self._seconds_to_frames(other)
else:
raise TypeError('Unsupported type for performing subtraction with FrameTimecode.')
if self.frame_num < 0:
self.frame_num = 0
return self
def __sub__(self, other):
# type: (Union[int, float, str, FrameTimecode]) -> FrameTimecode
to_return = FrameTimecode(timecode=self)
to_return -= other
return to_return
def __eq__(self, other):
# type: (Union[int, float, str, FrameTimecode]) -> bool
if isinstance(other, int):
return self.frame_num == other
elif isinstance(other, float):
return self.get_seconds() == other
elif isinstance(other, str):
return self.frame_num == self._parse_timecode_string(other)
elif isinstance(other, FrameTimecode):
if self.equal_framerate(other.framerate):
return self.frame_num == other.frame_num
else:
raise TypeError(
'FrameTimecode objects must have the same framerate to be compared.')
elif other is None:
return False
else:
raise TypeError('Unsupported type for performing == with FrameTimecode.')
def __ne__(self, other):
# type: (Union[int, float, str, FrameTimecode]) -> bool
return not self == other
def __lt__(self, other):
# type: (Union[int, float, str, FrameTimecode]) -> bool
if isinstance(other, int):
return self.frame_num < other
elif isinstance(other, float):
return self.get_seconds() < other
elif isinstance(other, str):
return self.frame_num < self._parse_timecode_string(other)
elif isinstance(other, FrameTimecode):
if self.equal_framerate(other.framerate):
return self.frame_num < other.frame_num
else:
raise TypeError(
'FrameTimecode objects must have the same framerate to be compared.')
#elif other is None:
# return False
else:
raise TypeError('Unsupported type for performing < with FrameTimecode.')
def __le__(self, other):
# type: (Union[int, float, str, FrameTimecode]) -> bool
if isinstance(other, int):
return self.frame_num <= other
elif isinstance(other, float):
return self.get_seconds() <= other
elif isinstance(other, str):
return self.frame_num <= self._parse_timecode_string(other)
elif isinstance(other, FrameTimecode):
if self.equal_framerate(other.framerate):
return self.frame_num <= other.frame_num
else:
raise TypeError(
'FrameTimecode objects must have the same framerate to be compared.')
#elif other is None:
# return False
else:
raise TypeError('Unsupported type for performing <= with FrameTimecode.')
def __gt__(self, other):
# type: (Union[int, float, str, FrameTimecode]) -> bool
if isinstance(other, int):
return self.frame_num > other
elif isinstance(other, float):
return self.get_seconds() > other
elif isinstance(other, str):
return self.frame_num > self._parse_timecode_string(other)
elif isinstance(other, FrameTimecode):
if self.equal_framerate(other.framerate):
return self.frame_num > other.frame_num
else:
raise TypeError(
'FrameTimecode objects must have the same framerate to be compared.')
#elif other is None:
# return False
else:
raise TypeError('Unsupported type (%s) for performing > with FrameTimecode.' %
type(other).__name__)
def __ge__(self, other):
# type: (Union[int, float, str, FrameTimecode]) -> bool
if isinstance(other, int):
return self.frame_num >= other
elif isinstance(other, float):
return self.get_seconds() >= other
elif isinstance(other, str):
return self.frame_num >= self._parse_timecode_string(other)
elif isinstance(other, FrameTimecode):
if self.equal_framerate(other.framerate):
return self.frame_num >= other.frame_num
else:
raise TypeError(
'FrameTimecode objects must have the same framerate to be compared.')
#elif other is None:
# return False
else:
raise TypeError('Unsupported type for performing >= with FrameTimecode.')
def __int__(self):
return self.frame_num
def __float__(self):
return self.get_seconds()
def __str__(self):
return self.get_timecode()
def __repr__(self):
return 'FrameTimecode(frame=%d, fps=%f)' % (self.frame_num, self.framerate)
|
py | 7dfbfbe31df9cf2f596f7f23a35611f3a63e220e | # coding=utf-8
# Copyright 2020 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mnist.util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow_gan.examples.mnist import util
mock = tf.test.mock
# pylint: disable=line-too-long
# This is a real digit `5` from MNIST.
REAL_DIGIT = [[-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -0.9765625, -0.859375, -0.859375, -0.859375, -0.015625, 0.0625, 0.3671875, -0.796875, 0.296875, 0.9921875, 0.9296875, -0.0078125, -1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -0.765625, -0.71875, -0.265625, 0.203125, 0.328125, 0.9765625, 0.9765625, 0.9765625, 0.9765625, 0.9765625, 0.7578125, 0.34375, 0.9765625, 0.890625, 0.5234375, -0.5, -1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -0.6171875, 0.859375, 0.9765625, 0.9765625, 0.9765625, 0.9765625, 0.9765625, 0.9765625, 0.9765625, 0.9765625, 0.9609375, -0.2734375, -0.359375, -0.359375, -0.5625, -0.6953125, -1.0, -1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -0.859375, 0.7109375, 0.9765625, 0.9765625, 0.9765625, 0.9765625, 0.9765625, 0.546875, 0.421875, 0.9296875, 0.8828125, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -0.375, 0.21875, -0.1640625, 0.9765625, 0.9765625, 0.6015625, -0.9140625, -1.0, -0.6640625, 0.203125, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -0.890625, -0.9921875, 0.203125, 0.9765625, -0.296875, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, 0.0859375, 0.9765625, 0.484375, -0.984375, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -0.9140625, 0.484375, 0.9765625, -0.453125, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -0.7265625, 0.8828125, 0.7578125, 0.25, -0.15625, -0.9921875, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -0.3671875, 0.875, 0.9765625, 0.9765625, -0.0703125, -0.8046875, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -0.6484375, 0.453125, 0.9765625, 0.9765625, 0.171875, -0.7890625, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -0.875, -0.2734375, 0.96875, 0.9765625, 0.4609375, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, 0.9453125, 0.9765625, 0.9453125, -0.5, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -0.640625, 0.015625, 0.4296875, 0.9765625, 0.9765625, 0.6171875, -0.984375, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -0.6953125, 0.15625, 0.7890625, 0.9765625, 0.9765625, 0.9765625, 0.953125, 0.421875, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -0.8125, -0.109375, 0.7265625, 0.9765625, 0.9765625, 0.9765625, 0.9765625, 0.5703125, -0.390625, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -0.8203125, -0.484375, 0.6640625, 0.9765625, 0.9765625, 0.9765625, 0.9765625, 0.546875, -0.3671875, -0.984375, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -0.859375, 0.3359375, 0.7109375, 0.9765625, 0.9765625, 0.9765625, 0.9765625, 0.5234375, -0.375, -0.9296875, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -0.5703125, 0.34375, 0.765625, 0.9765625, 0.9765625, 0.9765625, 0.9765625, 0.90625, 0.0390625, -0.9140625, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, 0.0625, 0.9765625, 0.9765625, 0.9765625, 0.65625, 0.0546875, 0.03125, -0.875, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0]]
ONE_HOT = [[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0]]
# Uniform noise in [-1, 1].
FAKE_DIGIT = [[0.778958797454834, 0.8792028427124023, 0.07099628448486328, 0.8518857955932617, -0.3541288375854492, -0.7431280612945557, -0.12607860565185547, 0.17328786849975586, 0.6749839782714844, -0.5402040481567383, 0.9034252166748047, 0.2420203685760498, 0.3455841541290283, 0.1937558650970459, 0.9989571571350098, 0.9039363861083984, -0.955411434173584, 0.6228537559509277, -0.33131909370422363, 0.9653763771057129, 0.864208459854126, -0.05056142807006836, 0.12686634063720703, -0.09225749969482422, 0.49758028984069824, 0.08698725700378418, 0.5533185005187988, 0.20227980613708496], [0.8400616645812988, 0.7409703731536865, -0.6215496063232422, -0.53228759765625, -0.20184636116027832, -0.8568699359893799, -0.8662903308868408, -0.8735041618347168, -0.11022663116455078, -0.8418543338775635, 0.8193502426147461, -0.901512622833252, -0.7680232524871826, 0.6209826469421387, 0.06459426879882812, 0.5341305732727051, -0.4078702926635742, -0.13658642768859863, 0.6602437496185303, 0.848508358001709, -0.23431801795959473, 0.5995683670043945, -0.9807922840118408, 0.2657158374786377, -0.8068397045135498, 0.2438051700592041, -0.2116842269897461, 0.011460304260253906], [0.00040912628173828125, -0.058798789978027344, 0.3239307403564453, 0.5040378570556641, -0.03192305564880371, -0.4816470146179199, -0.14559340476989746, -0.9231269359588623, -0.6602556705474854, -0.2537086009979248, -0.11059761047363281, -0.8174862861633301, 0.6180260181427002, 0.7245023250579834, 0.5007762908935547, -0.1575303077697754, -0.0167086124420166, 0.7173266410827637, 0.1126704216003418, -0.9878268241882324, 0.4538843631744385, -0.4422755241394043, -0.7899672985076904, 0.7349567413330078, -0.4448075294494629, -0.7548923492431641, -0.5739786624908447, 0.30504918098449707], [-0.8488152027130127, 0.43424463272094727, 0.7724254131317139, 0.43314504623413086, 0.7352848052978516, -0.26010799407958984, 0.43951940536499023, -0.7642686367034912, -0.657184362411499, -0.9933960437774658, -0.47258639335632324, 0.10390830039978027, 0.11454653739929199, -0.6156411170959473, -0.23431062698364258, -0.6897118091583252, -0.5721850395202637, -0.3574075698852539, 0.13927006721496582, -0.6530766487121582, 0.32231855392456055, 0.6294634342193604, 0.5507853031158447, -0.4867420196533203, 0.4329197406768799, 0.6168341636657715, -0.8720219135284424, 0.8639121055603027], [0.02407360076904297, -0.11185193061828613, -0.38637852668762207, -0.8244953155517578, -0.648916482925415, 0.44907593727111816, -0.368192195892334, 0.0190126895904541, -0.9450500011444092, 0.41033458709716797, -0.7877917289733887, 0.617938756942749, 0.551692008972168, -0.48288512229919434, 0.019921541213989258, -0.8765170574188232, 0.5651748180389404, 0.850874662399292, -0.5792787075042725, 0.1748213768005371, -0.6905481815338135, -0.521310567855835, 0.062479496002197266, 0.17763280868530273, -0.4628307819366455, 0.8870463371276855, -0.8685822486877441, -0.29169774055480957], [-0.14687561988830566, 0.8801963329315186, 0.11353135108947754, -0.6009430885314941, 0.8818719387054443, -0.8621203899383545, -0.48749589920043945, 0.5224916934967041, 0.7050364017486572, -0.9968757629394531, 0.7235188484191895, 0.662771463394165, 0.588390588760376, -0.9624209403991699, 0.39203453063964844, -0.2210233211517334, 0.9266352653503418, -0.9132544994354248, -0.5175333023071289, 0.7251780033111572, 0.3030557632446289, 0.5743863582611084, 0.14350247383117676, -0.3735086917877197, -0.6299927234649658, 0.5088682174682617, -0.6953752040863037, 0.23583650588989258], [0.25864362716674805, 0.5736973285675049, -0.3975222110748291, -0.6369199752807617, 0.5376672744750977, -0.19951462745666504, 0.6843924522399902, 0.6296660900115967, 0.36865997314453125, -0.7243289947509766, 0.5768749713897705, -0.5493001937866211, 0.31238412857055664, 0.21019506454467773, -0.368206262588501, -0.33148622512817383, -0.3421964645385742, -0.15616083145141602, 0.6617193222045898, 0.4400820732116699, 0.7893157005310059, -0.2935945987701416, 0.6241741180419922, -0.26036930084228516, -0.6958446502685547, 0.27047157287597656, -0.9095940589904785, 0.9525108337402344], [-0.5233585834503174, -0.45003342628479004, 0.15099048614501953, 0.6257956027984619, 0.9017877578735352, -0.18155455589294434, -0.20237135887145996, -0.014468908309936523, 0.01797318458557129, -0.5453977584838867, 0.21428155899047852, -0.9678947925567627, 0.5137600898742676, -0.1094369888305664, -0.13572359085083008, -0.1704423427581787, -0.9122319221496582, 0.8274900913238525, -0.11746454238891602, -0.8701446056365967, -0.9545385837554932, -0.6735866069793701, 0.9445557594299316, -0.3842940330505371, -0.6240942478179932, -0.1673595905303955, -0.3959221839904785, -0.05602693557739258], [0.8833198547363281, 0.14288711547851562, -0.9623878002166748, -0.26968836784362793, 0.9689288139343262, -0.3792128562927246, 0.2520296573638916, 0.1477947235107422, -0.24453139305114746, 0.94329833984375, 0.8014910221099854, 0.5443501472473145, 0.8486857414245605, -0.0795745849609375, -0.30250096321105957, -0.909733772277832, 0.8387842178344727, -0.41989898681640625, -0.8364224433898926, 0.04792976379394531, -0.38036274909973145, 0.12747883796691895, -0.5356688499450684, -0.04269552230834961, -0.2070469856262207, -0.6911153793334961, 0.33954668045043945, 0.25260138511657715], [0.3128373622894287, 0.36142778396606445, -0.2512378692626953, -0.6497259140014648, -0.21787405014038086, -0.9972476959228516, -0.026904821395874023, -0.4214746952056885, -0.3354766368865967, -0.6112637519836426, -0.7594058513641357, 0.09093379974365234, -0.7331845760345459, 0.5222046375274658, -0.8997514247894287, -0.3749384880065918, -0.0775001049041748, -0.26296448707580566, 0.404325008392334, -0.25776195526123047, 0.9136955738067627, 0.2623283863067627, -0.6411356925964355, 0.6646602153778076, -0.12833356857299805, -0.4184732437133789, -0.3663449287414551, -0.5468103885650635], [0.3770618438720703, 0.5572817325592041, -0.3657073974609375, -0.5056321620941162, 0.6555137634277344, 0.9557509422302246, -0.6900768280029297, -0.4980638027191162, 0.05510354042053223, -0.610318660736084, 0.9753992557525635, 0.7569930553436279, 0.4011664390563965, 0.8439173698425293, -0.5921270847320557, -0.2775266170501709, -0.061129093170166016, -0.49707984924316406, 0.5820951461791992, 0.008175849914550781, 0.06372833251953125, 0.3061811923980713, -0.5091361999511719, 0.9751057624816895, 0.4571402072906494, 0.6769094467163086, -0.46695923805236816, 0.44080281257629395], [-0.6510493755340576, -0.032715559005737305, 0.3482983112335205, -0.8135421276092529, 0.1506943702697754, 0.5220685005187988, -0.8834004402160645, -0.908900260925293, 0.3211519718170166, 0.896381139755249, -0.9448244571685791, -0.6193962097167969, -0.009401559829711914, 0.38227057456970215, -0.9219558238983154, -0.029483318328857422, -0.3889012336730957, 0.5242419242858887, 0.7338912487030029, -0.8713808059692383, -0.04948568344116211, -0.797940731048584, -0.9933724403381348, -0.262890100479126, -0.7165846824645996, -0.9763388633728027, -0.4105076789855957, 0.5907857418060303], [-0.6091890335083008, 0.7921168804168701, -0.033307790756225586, -0.9177074432373047, 0.4553513526916504, 0.5754055976867676, 0.6747269630432129, 0.0015664100646972656, -0.36865878105163574, -0.7999486923217773, 0.993431568145752, -0.7310445308685303, -0.49965715408325195, -0.028263330459594727, -0.20190834999084473, -0.7398116588592529, 0.10513901710510254, 0.22136950492858887, 0.42579007148742676, -0.4703383445739746, -0.8729751110076904, 0.28951215744018555, -0.3110074996948242, 0.9935362339019775, -0.29533815383911133, -0.3384673595428467, -0.07292437553405762, 0.8471579551696777], [-0.04648447036743164, -0.6876633167266846, -0.4921104907989502, -0.1925184726715088, -0.5843420028686523, -0.2852492332458496, 0.1251826286315918, -0.5709969997406006, 0.6744673252105713, 0.17812824249267578, 0.675086259841919, 0.1219022274017334, 0.6496286392211914, 0.05892682075500488, 0.33709263801574707, -0.9087743759155273, -0.4785141944885254, 0.2689247131347656, 0.3600578308105469, -0.6822943687438965, -0.0801839828491211, 0.9234893321990967, 0.5037875175476074, -0.8148105144500732, 0.6820621490478516, -0.8345451354980469, 0.9400079250335693, -0.752265453338623], [0.4599113464355469, 0.0292510986328125, -0.7475054264068604, -0.4074263572692871, 0.8800950050354004, 0.41760849952697754, 0.3225588798522949, 0.8359887599945068, -0.8655965328216553, 0.17258906364440918, 0.752063512802124, -0.48968982696533203, -0.9149389266967773, -0.46224403381347656, -0.26379919052124023, -0.9342350959777832, -0.5444085597991943, -0.4576752185821533, -0.12544608116149902, -0.3810274600982666, -0.5277583599090576, 0.3267025947570801, 0.4762594699859619, 0.09010529518127441, -0.2434844970703125, -0.8013439178466797, -0.23540687561035156, 0.8844473361968994], [-0.8922028541564941, -0.7912418842315674, -0.14429497718811035, -0.3925011157989502, 0.1870102882385254, -0.28865933418273926, 0.11481523513793945, -0.9174098968505859, -0.5264348983764648, -0.7296302318572998, 0.44644737243652344, 0.11140275001525879, 0.08105874061584473, 0.3871574401855469, -0.5030152797698975, -0.9560322761535645, -0.07085466384887695, 0.7949709892272949, 0.37600183486938477, -0.8664641380310059, -0.08428549766540527, 0.9169652462005615, 0.9479010105133057, 0.3576698303222656, 0.6677501201629639, -0.34919166564941406, -0.11635351181030273, -0.05966901779174805], [-0.9716870784759521, -0.7901370525360107, 0.9152195453643799, -0.42171406745910645, 0.20472931861877441, -0.9847748279571533, 0.9935972690582275, -0.5975158214569092, 0.9557573795318604, 0.41234254837036133, -0.26670074462890625, 0.869682788848877, 0.4443938732147217, -0.13968205451965332, -0.7745835781097412, -0.5692052841186523, 0.321591854095459, -0.3146944046020508, -0.3921670913696289, -0.36029601097106934, 0.33528995513916016, 0.029220104217529297, -0.8788590431213379, 0.6883881092071533, -0.8260040283203125, -0.43041515350341797, 0.42810845375061035, 0.40050792694091797], [0.6319584846496582, 0.32369279861450195, -0.6308813095092773, 0.07861590385437012, 0.5387494564056396, -0.902024507522583, -0.5346510410308838, 0.10787153244018555, 0.36048197746276855, 0.7801573276519775, -0.39187049865722656, 0.409869909286499, 0.5972449779510498, -0.7578165531158447, 0.30403685569763184, -0.7885205745697021, 0.01341390609741211, -0.023469209671020508, -0.11588907241821289, -0.941727876663208, -0.09618496894836426, 0.8042230606079102, 0.4683551788330078, -0.6497313976287842, 0.7443571090698242, 0.7150907516479492, -0.5759801864624023, -0.6523513793945312], [0.3150167465209961, -0.1853046417236328, 0.1839759349822998, -0.9234504699707031, -0.666614294052124, -0.740748405456543, 0.5700008869171143, 0.4091987609863281, -0.8912513256072998, 0.027419567108154297, 0.07219696044921875, -0.3128829002380371, 0.9465951919555664, 0.8715424537658691, -0.4559173583984375, -0.5862705707550049, -0.6734106540679932, 0.8419013023376465, -0.5523068904876709, -0.5019669532775879, -0.4969966411590576, -0.030994892120361328, 0.7572557926177979, 0.3824281692504883, 0.6366133689880371, -0.13271117210388184, 0.632627010345459, -0.9462625980377197], [-0.3923935890197754, 0.5466675758361816, -0.9815363883972168, -0.12867975234985352, 0.9932572841644287, -0.712486743927002, -0.17107725143432617, -0.41582536697387695, 0.10990118980407715, 0.11867499351501465, -0.7774863243103027, -0.9382553100585938, 0.4290587902069092, -0.1412363052368164, -0.498490571975708, 0.7793624401092529, 0.9015710353851318, -0.5107312202453613, 0.12394595146179199, 0.4988982677459717, -0.5731511116027832, -0.8105146884918213, 0.19758391380310059, 0.4081540107727051, -0.20432400703430176, 0.9924988746643066, -0.16952204704284668, 0.45574188232421875], [-0.023319005966186523, -0.3514130115509033, 0.4652390480041504, 0.7690563201904297, 0.7906622886657715, 0.9922001361846924, -0.6670932769775391, -0.5720524787902832, 0.3491201400756836, -0.18706512451171875, 0.4899415969848633, -0.5637645721435547, 0.8986928462982178, -0.7359066009521484, -0.6342356204986572, 0.6608924865722656, 0.6014213562011719, 0.7906208038330078, 0.19034814834594727, 0.16884255409240723, -0.8803558349609375, 0.4060337543487549, 0.9862797260284424, -0.48758840560913086, 0.3894319534301758, 0.4226539134979248, 0.00042724609375, -0.3623659610748291], [0.7065057754516602, -0.4103825092315674, 0.3343489170074463, -0.9341757297515869, -0.20749878883361816, -0.3589310646057129, -0.9470062255859375, 0.7851138114929199, -0.74678635597229, -0.05102086067199707, 0.16727972030639648, 0.12385678291320801, -0.41132497787475586, 0.5856695175170898, 0.06311273574829102, 0.8238284587860107, 0.2257852554321289, -0.6452250480651855, -0.6373245716094971, -0.1247873306274414, 0.0021851062774658203, -0.5648598670959473, 0.8261771202087402, 0.3713812828063965, -0.7185893058776855, 0.45911359786987305, 0.6722264289855957, -0.31804966926574707], [-0.2952606678009033, -0.12052011489868164, 0.9074821472167969, -0.9850583076477051, 0.6732273101806641, 0.7954013347625732, 0.938248872756958, -0.35915136337280273, -0.8291504383087158, 0.6020793914794922, -0.30096912384033203, 0.6136975288391113, 0.46443629264831543, -0.9057025909423828, 0.43993186950683594, -0.11653375625610352, 0.9514555931091309, 0.7985148429870605, -0.6911783218383789, -0.931948184967041, 0.06829452514648438, 0.711458683013916, 0.17953252792358398, 0.19076848030090332, 0.6339912414550781, -0.38822221755981445, 0.09893226623535156, -0.7663829326629639], [0.2640511989593506, 0.16821074485778809, 0.9845118522644043, 0.13789963722229004, -0.1097862720489502, 0.24889636039733887, 0.12135696411132812, 0.4419589042663574, -0.022361040115356445, 0.3793671131134033, 0.7053709030151367, 0.5814387798309326, 0.24962759017944336, -0.40136122703552246, -0.364804744720459, -0.2518625259399414, 0.25757312774658203, 0.6828348636627197, -0.08237361907958984, -0.07745933532714844, -0.9299273490905762, -0.3066704273223877, 0.22127842903137207, -0.6690163612365723, 0.7477891445159912, 0.5738682746887207, -0.9027633666992188, 0.33333301544189453], [0.44417595863342285, 0.5279359817504883, 0.36589932441711426, -0.049490928649902344, -0.3003063201904297, 0.8447706699371338, 0.8139019012451172, -0.4958505630493164, -0.3640005588531494, -0.7731854915618896, 0.4138674736022949, 0.3080577850341797, -0.5615301132202148, -0.4247474670410156, -0.25992918014526367, -0.7983508110046387, -0.4926283359527588, 0.08595061302185059, 0.9205574989318848, 0.6025278568267822, 0.37128734588623047, -0.1856670379638672, 0.9658422470092773, -0.6652145385742188, -0.2217710018157959, 0.8311929702758789, 0.9117603302001953, 0.4660191535949707], [-0.6164810657501221, -0.9509942531585693, 0.30228447914123535, 0.39792585372924805, -0.45194506645202637, -0.3315877914428711, 0.5393261909484863, 0.030223369598388672, 0.6013507843017578, 0.020318031311035156, 0.17352747917175293, -0.25470709800720215, -0.7904071807861328, 0.5383470058441162, -0.43855953216552734, -0.350492000579834, -0.5038156509399414, 0.0511777400970459, 0.9628784656524658, -0.520085334777832, 0.33083176612854004, 0.3698580265045166, 0.9121549129486084, -0.664802074432373, -0.45629024505615234, -0.44208550453186035, 0.6502475738525391, -0.597346305847168], [-0.5405080318450928, 0.2640984058380127, -0.2119138240814209, 0.04504036903381348, -0.10604047775268555, -0.8093295097351074, 0.20915007591247559, 0.08994936943054199, 0.5347979068756104, -0.550915002822876, 0.3008608818054199, 0.2416989803314209, 0.025292158126831055, -0.2790646553039551, 0.5850257873535156, 0.7020430564880371, -0.9527721405029297, 0.8273317813873291, -0.8028199672698975, 0.3686351776123047, -0.9391682147979736, -0.4261181354522705, 0.1674947738647461, 0.27993154525756836, 0.7567532062530518, 0.9245085716247559, 0.9245762825012207, -0.296356201171875], [-0.7747671604156494, 0.7632861137390137, 0.10236740112304688, 0.14044547080993652, 0.9318621158599854, -0.5622165203094482, -0.6605725288391113, -0.8286299705505371, 0.8717818260192871, -0.22177672386169434, -0.6030778884887695, 0.20917797088623047, 0.31551361083984375, 0.7741527557373047, -0.3320643901824951, -0.9014863967895508, 0.44268250465393066, 0.25649309158325195, -0.5621528625488281, -0.6077632904052734, 0.21485304832458496, -0.658627986907959, -0.9116294384002686, -0.294114351272583, 0.0452420711517334, 0.8542745113372803, 0.7148771286010742, 0.3244490623474121]]
# pylint: enable=line-too-long
def real_digit():
return tf.expand_dims(tf.expand_dims(REAL_DIGIT, 0), -1)
def fake_digit():
return tf.expand_dims(tf.expand_dims(FAKE_DIGIT, 0), -1)
def one_hot_real():
return tf.constant(ONE_HOT)
def one_hot1():
return tf.constant([[1.0] + [0.0] * 9])
def fake_logit_fn(tensor):
batch_dim = tf.shape(tensor)[0]
return tf.zeros([batch_dim, 10])
class MnistScoreTest(tf.test.TestCase):
@mock.patch.object(util.tfhub, 'load', autospec=True)
def test_any_batch_size(self, mock_tfhub_load):
mock_tfhub_load.return_value = fake_logit_fn
# Create a graph since placeholders don't work in eager execution mode.
with tf.Graph().as_default():
inputs = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])
mscore = util.mnist_score(inputs)
for batch_size in [4, 16, 30]:
with self.cached_session() as sess:
sess.run(
mscore, feed_dict={inputs: np.zeros([batch_size, 28, 28, 1])})
@mock.patch.object(util.tfhub, 'load', autospec=True)
def test_deterministic(self, mock_tfhub_load):
mock_tfhub_load.return_value = fake_logit_fn
m_score = util.mnist_score(real_digit())
with self.cached_session() as sess:
m_score1 = sess.run(m_score)
m_score2 = sess.run(m_score)
self.assertEqual(m_score1, m_score2)
with self.cached_session() as sess:
m_score3 = sess.run(m_score)
self.assertEqual(m_score1, m_score3)
@mock.patch.object(util.tfhub, 'load', autospec=True)
def test_single_example_correct(self, mock_tfhub_load):
mock_tfhub_load.return_value = fake_logit_fn
real_score = util.mnist_score(real_digit())
fake_score = util.mnist_score(fake_digit())
with self.cached_session() as sess:
self.assertNear(1.0, sess.run(real_score), 1e-6)
self.assertNear(1.0, sess.run(fake_score), 1e-6)
def _disabled_test_minibatch_correct(self):
"""Tests the correctness of the mnist_score function."""
# Disabled since it requires loading the tfhub MNIST module.
mscore = util.mnist_score(
tf.concat([real_digit(), real_digit(), fake_digit()], 0))
with self.cached_session() as sess:
self.assertNear(1.612828, sess.run(mscore), 1e-6)
def _disabled_test_batch_splitting_doesnt_change_value(self):
"""Tests the correctness of mnist_score function over different batches."""
# Disabled since it requires loading the tfhub MNIST module.
for num_batches in [1, 2, 4, 8]:
mscore = util.mnist_score(
tf.concat([real_digit()] * 4 + [fake_digit()] * 4, 0),
num_batches=num_batches)
with self.cached_session() as sess:
self.assertNear(1.649209, sess.run(mscore), 1e-6)
class MnistFrechetDistanceTest(tf.test.TestCase):
@mock.patch.object(util.tfhub, 'load', autospec=True)
def test_any_batch_size(self, mock_tfhub_load):
mock_tfhub_load.return_value = fake_logit_fn
# Create a graph since placeholders don't work in eager execution mode.
with tf.Graph().as_default():
inputs = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])
fdistance = util.mnist_frechet_distance(inputs, inputs)
for batch_size in [4, 16, 30]:
with self.cached_session() as sess:
sess.run(fdistance,
feed_dict={inputs: np.zeros([batch_size, 28, 28, 1])})
@mock.patch.object(util.tfhub, 'load', autospec=True)
def test_deterministic(self, mock_tfhub_load):
mock_tfhub_load.return_value = fake_logit_fn
fdistance = util.mnist_frechet_distance(
tf.concat([real_digit()] * 2, 0),
tf.concat([fake_digit()] * 2, 0))
with self.cached_session() as sess:
fdistance1 = sess.run(fdistance)
fdistance2 = sess.run(fdistance)
self.assertNear(fdistance1, fdistance2, 2e-1)
with self.cached_session() as sess:
fdistance3 = sess.run(fdistance)
self.assertNear(fdistance1, fdistance3, 2e-1)
@mock.patch.object(util.tfhub, 'load', autospec=True)
def test_single_example_correct(self, mock_tfhub_load):
mock_tfhub_load.return_value = fake_logit_fn
fdistance = util.mnist_frechet_distance(
tf.concat([real_digit()] * 2, 0),
tf.concat([real_digit()] * 2, 0))
with self.cached_session() as sess:
self.assertNear(0.0, sess.run(fdistance), 2e-1)
def _disabled_test_minibatch_correct(self):
"""Tests the correctness of the mnist_frechet_distance function."""
# Disabled since it requires loading the tfhub MNIST module.
fdistance = util.mnist_frechet_distance(
tf.concat([real_digit(), real_digit(), fake_digit()], 0),
tf.concat([real_digit(), fake_digit(), fake_digit()], 0))
with self.cached_session() as sess:
self.assertNear(43.5, sess.run(fdistance), 2e-1)
def _disabled_test_batch_splitting_doesnt_change_value(self):
"""Tests correctness of mnist_frechet_distance function with batch sizes."""
# Disabled since it requires loading the tfhub MNIST module.
with tf.Graph().as_default():
for num_batches in [1, 2, 4, 8]:
fdistance = util.mnist_frechet_distance(
tf.concat([real_digit()] * 6 + [fake_digit()] * 2, 0),
tf.concat([real_digit()] * 2 + [fake_digit()] * 6, 0),
num_batches=num_batches)
with self.cached_session() as sess:
self.assertNear(97.8, sess.run(fdistance), 2e-1)
class MnistCrossEntropyTest(tf.test.TestCase):
@mock.patch.object(util.tfhub, 'load', autospec=True)
def test_any_batch_size(self, mock_tfhub_load):
mock_tfhub_load.return_value = fake_logit_fn
# Create a graph since placeholders don't work in eager execution mode.
with tf.Graph().as_default():
num_classes = 10
one_label = np.array([[1] + [0] * (num_classes - 1)])
inputs = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])
one_hot_label = tf.placeholder(tf.int32, shape=[None, num_classes])
entropy = util.mnist_cross_entropy(inputs, one_hot_label)
for batch_size in [4, 16, 30]:
with self.cached_session() as sess:
sess.run(entropy, feed_dict={
inputs: np.zeros([batch_size, 28, 28, 1]),
one_hot_label: np.concatenate([one_label] * batch_size)})
@mock.patch.object(util.tfhub, 'load', autospec=True)
def test_deterministic(self, mock_tfhub_load):
mock_tfhub_load.return_value = fake_logit_fn
xent = util.mnist_cross_entropy(real_digit(), one_hot_real())
with self.cached_session() as sess:
ent1 = sess.run(xent)
ent2 = sess.run(xent)
self.assertEqual(ent1, ent2)
with self.cached_session() as sess:
ent3 = sess.run(xent)
self.assertEqual(ent1, ent3)
def _disabled_test_single_example_correct(self):
"""Tests correctness of the mnist_cross_entropy function."""
# Disabled since it requires loading the tfhub MNIST module.
# The correct label should have low cross entropy.
correct_xent = util.mnist_cross_entropy(real_digit(), one_hot_real())
# The incorrect label should have high cross entropy.
wrong_xent = util.mnist_cross_entropy(real_digit(), one_hot1())
# A random digit should have medium cross entropy for any label.
fake_xent1 = util.mnist_cross_entropy(fake_digit(), one_hot_real())
fake_xent6 = util.mnist_cross_entropy(fake_digit(), one_hot1())
with self.cached_session() as sess:
self.assertNear(0.00996, sess.run(correct_xent), 1e-5)
self.assertNear(18.63073, sess.run(wrong_xent), 1e-5)
self.assertNear(2.2, sess.run(fake_xent1), 1e-1)
self.assertNear(2.2, sess.run(fake_xent6), 1e-1)
def _disabled_test_minibatch_correct(self):
"""Tests correctness of the mnist_cross_entropy function with batches."""
# Disabled since it requires loading the tfhub MNIST module.
# Reorded minibatches should have the same value.
xent1 = util.mnist_cross_entropy(
tf.concat([real_digit(), real_digit(), fake_digit()], 0),
tf.concat([one_hot_real(), one_hot1(), one_hot1()], 0))
xent2 = util.mnist_cross_entropy(
tf.concat([real_digit(), fake_digit(), real_digit()], 0),
tf.concat([one_hot_real(), one_hot1(), one_hot1()], 0))
with self.cached_session() as sess:
self.assertNear(6.972539, sess.run(xent1), 1e-5)
self.assertNear(sess.run(xent1), sess.run(xent2), 1e-5)
class GetNoiseTest(tf.test.TestCase):
def test_get_noise_categorical_syntax(self):
util.get_eval_noise_categorical(
noise_samples=4,
categorical_sample_points=np.arange(0, 10),
continuous_sample_points=np.linspace(-2.0, 2.0, 10),
unstructured_noise_dims=62,
continuous_noise_dims=2)
def test_get_noise_continuous_dim1_syntax(self):
util.get_eval_noise_continuous_dim1(
noise_samples=4,
categorical_sample_points=np.arange(0, 10),
continuous_sample_points=np.linspace(-2.0, 2.0, 10),
unstructured_noise_dims=62,
continuous_noise_dims=2)
def test_get_noise_continuous_dim2_syntax(self):
util.get_eval_noise_continuous_dim2(
noise_samples=4,
categorical_sample_points=np.arange(0, 10),
continuous_sample_points=np.linspace(-2.0, 2.0, 10),
unstructured_noise_dims=62,
continuous_noise_dims=2)
def test_get_infogan_syntax(self):
util.get_infogan_noise(
batch_size=4,
categorical_dim=10,
structured_continuous_dim=3,
total_continuous_noise_dims=62)
if __name__ == '__main__':
tf.test.main()
|
py | 7dfbfc6594919be5aa12f47119429e8f90dc2ea9 | def longest_slide_down(pyramid):
pyramid.reverse()
for i in range(1, len(pyramid)):
sums = []
for j in range(len(pyramid[i])):
sums.append(max(pyramid[i][j] + pyramid[i-1][j],
pyramid[i][j] + pyramid[i-1][j+1]))
pyramid[i] = sums
return pyramid[-1][0]
|
py | 7dfbfd03d19816567cdc4998b360ec3b93b0e62d | import os
from typing import Dict, List
import uvicorn
from dotenv import load_dotenv
from extractor import extractor
from fastapi import FastAPI, Security, Depends, HTTPException
from fastapi.security.api_key import APIKeyHeader, APIKey
from pydantic import BaseModel
from starlette.status import HTTP_403_FORBIDDEN
load_dotenv()
app = FastAPI()
class Values(BaseModel):
values: List = []
class Value(Values):
recordId: str
data: Dict[str, str] = None
API_KEY = os.environ['KEY']
API_KEY_NAME = "Ocp-Apim-Subscription-Key"
api_key_header = APIKeyHeader(name=API_KEY_NAME, auto_error=False)
async def get_api_key(
api_key_header: str = Security(api_key_header),
):
if api_key_header == API_KEY:
return api_key_header
else:
raise HTTPException(
status_code=HTTP_403_FORBIDDEN, detail="Translation CogSvc Key not present"
)
@app.post('/api/extraction')
def extract(values: Values, api_key: APIKey = Depends(get_api_key)):
body = values.dict()
if not body:
return 'Expected text within body of request. No text found.', status.HTTP_400_BAD_REQUEST
else:
return extractor.go_extract(body)
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=5000)
|
py | 7dfbfd3f72952e0601703cec81ce7a9cbd16a355 | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 4
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_8_0_1.models.antivirus_policy import AntivirusPolicy # noqa: F401,E501
class AntivirusPolicyCreateParams(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'description': 'str',
'enabled': 'bool',
'force_run': 'bool',
'impact': 'str',
'name': 'str',
'paths': 'list[str]',
'recursion_depth': 'int',
'schedule': 'str'
}
attribute_map = {
'description': 'description',
'enabled': 'enabled',
'force_run': 'force_run',
'impact': 'impact',
'name': 'name',
'paths': 'paths',
'recursion_depth': 'recursion_depth',
'schedule': 'schedule'
}
def __init__(self, description=None, enabled=None, force_run=None, impact=None, name=None, paths=None, recursion_depth=None, schedule=None): # noqa: E501
"""AntivirusPolicyCreateParams - a model defined in Swagger""" # noqa: E501
self._description = None
self._enabled = None
self._force_run = None
self._impact = None
self._name = None
self._paths = None
self._recursion_depth = None
self._schedule = None
self.discriminator = None
if description is not None:
self.description = description
if enabled is not None:
self.enabled = enabled
if force_run is not None:
self.force_run = force_run
if impact is not None:
self.impact = impact
self.name = name
if paths is not None:
self.paths = paths
if recursion_depth is not None:
self.recursion_depth = recursion_depth
if schedule is not None:
self.schedule = schedule
@property
def description(self):
"""Gets the description of this AntivirusPolicyCreateParams. # noqa: E501
A description for the policy. # noqa: E501
:return: The description of this AntivirusPolicyCreateParams. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this AntivirusPolicyCreateParams.
A description for the policy. # noqa: E501
:param description: The description of this AntivirusPolicyCreateParams. # noqa: E501
:type: str
"""
self._description = description
@property
def enabled(self):
"""Gets the enabled of this AntivirusPolicyCreateParams. # noqa: E501
Whether the policy is enabled. # noqa: E501
:return: The enabled of this AntivirusPolicyCreateParams. # noqa: E501
:rtype: bool
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""Sets the enabled of this AntivirusPolicyCreateParams.
Whether the policy is enabled. # noqa: E501
:param enabled: The enabled of this AntivirusPolicyCreateParams. # noqa: E501
:type: bool
"""
self._enabled = enabled
@property
def force_run(self):
"""Gets the force_run of this AntivirusPolicyCreateParams. # noqa: E501
Forces the scan to run regardless of whether the files were recently scanned. # noqa: E501
:return: The force_run of this AntivirusPolicyCreateParams. # noqa: E501
:rtype: bool
"""
return self._force_run
@force_run.setter
def force_run(self, force_run):
"""Sets the force_run of this AntivirusPolicyCreateParams.
Forces the scan to run regardless of whether the files were recently scanned. # noqa: E501
:param force_run: The force_run of this AntivirusPolicyCreateParams. # noqa: E501
:type: bool
"""
self._force_run = force_run
@property
def impact(self):
"""Gets the impact of this AntivirusPolicyCreateParams. # noqa: E501
The priority of the antivirus scan job. Must be a valid job engine impact policy, or null to use the default impact. # noqa: E501
:return: The impact of this AntivirusPolicyCreateParams. # noqa: E501
:rtype: str
"""
return self._impact
@impact.setter
def impact(self, impact):
"""Sets the impact of this AntivirusPolicyCreateParams.
The priority of the antivirus scan job. Must be a valid job engine impact policy, or null to use the default impact. # noqa: E501
:param impact: The impact of this AntivirusPolicyCreateParams. # noqa: E501
:type: str
"""
self._impact = impact
@property
def name(self):
"""Gets the name of this AntivirusPolicyCreateParams. # noqa: E501
The name of the policy. # noqa: E501
:return: The name of this AntivirusPolicyCreateParams. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this AntivirusPolicyCreateParams.
The name of the policy. # noqa: E501
:param name: The name of this AntivirusPolicyCreateParams. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def paths(self):
"""Gets the paths of this AntivirusPolicyCreateParams. # noqa: E501
Paths to include in the scan. # noqa: E501
:return: The paths of this AntivirusPolicyCreateParams. # noqa: E501
:rtype: list[str]
"""
return self._paths
@paths.setter
def paths(self, paths):
"""Sets the paths of this AntivirusPolicyCreateParams.
Paths to include in the scan. # noqa: E501
:param paths: The paths of this AntivirusPolicyCreateParams. # noqa: E501
:type: list[str]
"""
self._paths = paths
@property
def recursion_depth(self):
"""Gets the recursion_depth of this AntivirusPolicyCreateParams. # noqa: E501
The depth to recurse in directories. The default of -1 gives unlimited recursion. # noqa: E501
:return: The recursion_depth of this AntivirusPolicyCreateParams. # noqa: E501
:rtype: int
"""
return self._recursion_depth
@recursion_depth.setter
def recursion_depth(self, recursion_depth):
"""Sets the recursion_depth of this AntivirusPolicyCreateParams.
The depth to recurse in directories. The default of -1 gives unlimited recursion. # noqa: E501
:param recursion_depth: The recursion_depth of this AntivirusPolicyCreateParams. # noqa: E501
:type: int
"""
self._recursion_depth = recursion_depth
@property
def schedule(self):
"""Gets the schedule of this AntivirusPolicyCreateParams. # noqa: E501
The schedule for running scans in isi date format. Examples include: 'every Friday' or 'every day at 4:00'. A null value means the policy is manually scheduled. # noqa: E501
:return: The schedule of this AntivirusPolicyCreateParams. # noqa: E501
:rtype: str
"""
return self._schedule
@schedule.setter
def schedule(self, schedule):
"""Sets the schedule of this AntivirusPolicyCreateParams.
The schedule for running scans in isi date format. Examples include: 'every Friday' or 'every day at 4:00'. A null value means the policy is manually scheduled. # noqa: E501
:param schedule: The schedule of this AntivirusPolicyCreateParams. # noqa: E501
:type: str
"""
self._schedule = schedule
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AntivirusPolicyCreateParams):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 7dfbfda0527048bddb0ca259c3b4aa77ca37b675 | import io
from enum import IntEnum
from uuid import UUID
from .nettypecode import NetTypeCode
from . import utils
from . import parsers
BlockFactories = {}
class EventTag(IntEnum):
NULL_REFERENCE = 0x01
BEGIN_PRIVATE_OBJECT = 0x05
END_OBJECT = 0x06
class EventObject:
def __init_subclass__(cls, is_factory=True, factory_name=None, **kwargs):
super().__init_subclass__(**kwargs)
if not is_factory: return
factory_name = factory_name or cls.__name__
if factory_name not in BlockFactories.keys(): BlockFactories[factory_name] = cls
def __init__(self, name=None, version=0, min_reader_ver=0):
self.name = name or self.__class__.__name__
self.version = version
self.min_reader_ver = min_reader_ver
class TraceObject(EventObject, factory_name='Trace'):
def __init__(self, name=None, version=0, min_reader_ver=0):
super().__init__(name, version, min_reader_ver)
self.year = 0
self.month = 0
self.day_of_week = 0
self.day = 0
self.hour = 0
self.minute = 0
self.second = 0
self.millisecond = 0
self.sync_time_qpc = 0
self.qpc_freq = 0
self.pointer_size = 0
self.process_id = 0
self.number_of_proc = 0
self.expected_cpu_sampling_rate = 0
def read(self, buf):
self.year = int.from_bytes(buf.read(2), byteorder='little')
self.month = int.from_bytes(buf.read(2), byteorder='little')
self.day_of_week = int.from_bytes(buf.read(2), byteorder='little')
self.day = int.from_bytes(buf.read(2), byteorder='little')
self.hour = int.from_bytes(buf.read(2), byteorder='little')
self.minute = int.from_bytes(buf.read(2), byteorder='little')
self.second = int.from_bytes(buf.read(2), byteorder='little')
self.millisecond = int.from_bytes(buf.read(2), byteorder='little')
self.sync_time_qpc = int.from_bytes(buf.read(8), byteorder='little')
self.qpc_freq = int.from_bytes(buf.read(8), byteorder='little')
self.pointer_size = int.from_bytes(buf.read(4), byteorder='little')
self.process_id = int.from_bytes(buf.read(4), byteorder='little')
self.number_of_proc = int.from_bytes(buf.read(4), byteorder='little')
self.expected_cpu_sampling_rate = int.from_bytes(buf.read(4), byteorder='little')
class Block(EventObject, is_factory=False):
def __init__(self, name=None, version=0, min_reader_ver=0):
super().__init__(name, version, min_reader_ver)
def read(self, buf):
self.block_size = int.from_bytes(buf.read(4), byteorder='little')
self.align(buf, 4)
self.end_of_block = buf.tell() + self.block_size
def align(self, buf, bound):
align = buf.tell() % bound
if align != 0: buf.seek(bound - align, io.SEEK_CUR)
def decode_payload(self, payload):
pass
@staticmethod
def read_var_int(buf):
ret = 0
shift = 0
while True:
b = buf.read(1)[0]
ret |= ((b & 0x7F) << shift)
shift += 7
if not (b & 0x80): break
return ret
class EventBlob:
def __init__(self):
self.event_size = 0
self.metadata_id = 0
self.seq = 0
self.thread_id = 0
self.capture_thread_id = 0
self.processor_num = 0
self.stack_id = 0
self.timestamp = 0
self.activity_id = UUID(int=0)
self.related_activity_id = UUID(int=0)
self.is_sorted = False
self.payload_size = 0
self.payload = None
self.payload_decoded = False
class EventBlock(Block):
def __init__(self, name=None, version=0, min_reader_ver=0):
super().__init__(name, version, min_reader_ver)
self.flags = 0x00
self.min_timestamp = 0
self.max_timestamp = 0
self.events = []
def read(self, buf):
super().read(buf)
header_size = int.from_bytes(buf.read(2), byteorder='little')
end_of_header = (buf.tell() - 2) + header_size
self.flags = int.from_bytes(buf.read(2), byteorder='little')
self.header_compressed = self.flags & 0x01 == 0x01
self.min_timestamp = int.from_bytes(buf.read(8), byteorder='little')
self.max_timestamp = int.from_bytes(buf.read(8), byteorder='little')
buf.seek(end_of_header, io.SEEK_SET)
while buf.tell() < self.end_of_block:
prev_event = self.events[-1] if len(self.events) != 0 else EventBlob()
if self.header_compressed:
self.events.append(self.read_compressed_event(buf, prev_event))
else:
self.events.append(self.read_event(buf))
def read_event(self, buf):
event = EventBlob()
event.event_size = int.from_bytes(buf.read(4), byteorder='little')
event.metadata_id = int.from_bytes(buf.read(4), byteorder='little')
event.is_sorted = event.metadata_id & 0x80000000
event.metadata_id &= 0x7FFFFFFF
event.seq = int.from_bytes(buf.read(4), byteorder='little')
event.thread_id = int.from_bytes(buf.read(8), byteorder='little')
event.capture_thread_id = int.from_bytes(buf.read(8), byteorder='little')
event.processor_num = int.from_bytes(buf.read(4), byteorder='little')
event.stack_id = int.from_bytes(buf.read(4), byteorder='little')
event.timestamp = int.from_bytes(buf.read(8), byteorder='little')
event.activity_id = UUID(bytes_le=buf.read(16))
event.related_activity_id = UUID(bytes_le=buf.read(16))
event.payload_size = int.from_bytes(buf.read(4), byteorder='little')
event.payload = self.decode_payload(buf.read(event.payload_size))
event.align(buf, 4)
return event
def read_compressed_event(self, buf, prev_event):
event = EventBlob()
flags = buf.read(1)[0]
event.metadata_id = Block.read_var_int(buf) if flags & 0x01 else prev_event.metadata_id
event.seq = prev_event.seq + Block.read_var_int(buf) if flags & 0x02 else prev_event.seq
if not flags & 0x02 and event.metadata_id != 0: event.seq += 1
event.capture_thread_id = Block.read_var_int(buf) if flags & 0x02 else prev_event.capture_thread_id
event.processor_num = Block.read_var_int(buf) if flags & 0x02 else prev_event.processor_num
event.thread_id = Block.read_var_int(buf) if flags & 0x04 else prev_event.thread_id
event.stack_id = Block.read_var_int(buf) if flags & 0x08 else prev_event.stack_id
event.timestamp = prev_event.timestamp + Block.read_var_int(buf)
event.activity_id = UUID(bytes_le=buf.read(16)) if flags & 0x10 else prev_event.activity_id
event.related_activity_id = UUID(bytes_le=buf.read(16)) if flags & 0x20 else prev_event.related_activity_id
if flags & 0x40: event.is_sorted = True
event.payload_size = Block.read_var_int(buf) if flags & 0x80 else prev_event.payload_size
event.payload = self.decode_payload(buf.read(event.payload_size))
return event
def decode_payload(self, buf):
return buf
class Metadata:
def __init__(self):
self.id = 0
self.provider_name = None
self.event_id = 0
self.event_name = None
self.keywords = 0
self.version = 0
self.level = 0
self.field_count = 0
self.fields = []
class MetadataField:
def __init__(self):
self.type_code = None
self.name = None
self.field_count = 0
self.fields = []
def read(self, buf):
self.type_code = int.from_bytes(buf.read(4), byteorder='little')
if self.type_code == NetTypeCode.OBJECT:
self.read_fields(buf)
self.name = utils.bytes_to_nuluni(buf)
def read_fields(self, buf):
self.field_count = int.from_bytes(buf.read(4), byteorder='little')
for i in range(self.field_count):
fields.append(MetadataField())
fields[-1].read(buf)
class MetadataBlock(EventBlock):
def __init__(self, name=None, version=0, min_reader_ver=0):
super().__init__(name, version, min_reader_ver)
def read(self, buf):
super().read(buf)
def decode_payload(self, payload):
metadata = Metadata()
buf = io.BytesIO(payload)
metadata.id = int.from_bytes(buf.read(4), byteorder='little')
metadata.provider_name = utils.bytes_to_nuluni(buf)
metadata.event_id = int.from_bytes(buf.read(4), byteorder='little')
metadata.event_name = utils.bytes_to_nuluni(buf)
metadata.keywords = int.from_bytes(buf.read(8), byteorder='little')
metadata.version = int.from_bytes(buf.read(4), byteorder='little')
metadata.level = int.from_bytes(buf.read(4), byteorder='little')
metadata.field_count = int.from_bytes(buf.read(4), byteorder='little')
for i in range(metadata.field_count):
field = MetadataField()
field.read(buf)
metadata.fields.append(field)
if metadata.field_count > 0:
#Add a dynamicly created parser function
code = f"def temp_meth(buf):\n"
code += "\tret = {}\n"
for field in metadata.fields:
code += self.get_metadata_field_code(field)
code += "\treturn ret\n"
code += f"parsers.read_{metadata.provider_name.replace('-', '_')}_{metadata.event_id}_payload = temp_meth"
exec(code)
self.payload_decoded = True
return metadata
def get_metadata_field_code(self, field, in_object=False):
code = ""
if field.type_code == NetTypeCode.OBJECT:
code += f"\tret['{field.name}'] = {{"
for subfield in field.fields:
code += self.get_metadata_field_code(subfield, in_object=True) + ","
code = code[:-1] + "}\n"
elif in_object:
code += f"'{field.name}': utils.read_type({field.type_code}, buf)"
else:
code += f"\tret['{field.name}'] = utils.read_type({field.type_code}, buf)\n"
return code
class StackBlock(Block):
def __init__(self, name=None, version=0, min_reader_ver=0):
super().__init__(name, version, min_reader_ver)
self.first_id = 0
self.stack_count = 0
self.stacks = []
def read(self, buf):
super().read(buf)
self.first_id = int.from_bytes(buf.read(4), byteorder='little')
self.stack_count = int.from_bytes(buf.read(4), byteorder='little')
for i in range(self.stack_count):
stack_size = int.from_bytes(buf.read(4), byteorder='little')
self.stacks.append(buf.read(stack_size))
buf.seek(self.end_of_block, io.SEEK_SET)
class SPThread:
def __init__(self, thread_id, seq_num):
self.thread_id = thread_id
self.seq_num = seq_num
class SequencePointBlock(Block, factory_name='SPBlock'):
def __init__(self, name=None, version=0, min_reader_ver=0):
super().__init__(name, version, min_reader_ver)
self.timestamp = 0
self.thread_count = 0
self.threads = []
def read(self, buf):
super().read(buf)
self.timestamp = int.from_bytes(buf.read(8), byteorder='little')
self.thread_count = int.from_bytes(buf.read(4), byteorder='little')
for t in range(self.thread_count):
self.threads.append(SPThread(int.from_bytes(buf.read(8), byteorder='little'), int.from_bytes(buf.read(4), byteorder='little')))
|
py | 7dfbfe2bd6f71f6438610c3ccb835f1a79972498 | from src.algorithms.runner import AlgorithmRunner
def main():
ar = AlgorithmRunner()
ar.run()
if __name__ == "__main__":
main() |
py | 7dfbfff599ee2204980bd09772e7272c081126c2 | from datetime import datetime
from calendar import timegm
from rest_framework_jwt.settings import api_settings
def jwt_payload_handler(user):
"""
Custom payload handler
Token encrypts the dictionary returned by this function, and can be decoded by rest_framework_jwt.utils.jwt_decode_handler
:param user: user instance to create a JWT token
:return: create JWT token dict
"""
return dict(
username=user.username,
email=user.email,
admin=(user.is_staff or user.is_superuser),
exp=datetime.utcnow() + api_settings.JWT_EXPIRATION_DELTA,
orig_iat=timegm(datetime.utcnow().utctimetuple())
)
def jwt_response_payload_handler(token, user=None, request=None):
"""
Custom response payload handler.
This function controls the custom payload after login or token refresh. This data is returned through the web API.
:param token: JWT token to validate
:param user: user the token is created
:param request: request instance
:return:
"""
return {
'token': token,
'user': {
'username': user.username,
}
} |
py | 7dfc000943f408837ee35480790dd185d50a8694 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'image_window_ui.ui'
#
# Created by: PyQt5 UI code generator 5.10.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_ImageDialog(object):
def setupUi(self, ImageDialog):
ImageDialog.setObjectName("ImageDialog")
ImageDialog.resize(791, 641)
font = QtGui.QFont()
font.setFamily("Arial")
ImageDialog.setFont(font)
self.image = QtWidgets.QWidget(ImageDialog)
self.image.setGeometry(QtCore.QRect(5, 130, 781, 506))
self.image.setAutoFillBackground(True)
self.image.setObjectName("image")
self.btn_expose = QtWidgets.QPushButton(ImageDialog)
self.btn_expose.setGeometry(QtCore.QRect(15, 15, 76, 26))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Ignored)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btn_expose.sizePolicy().hasHeightForWidth())
self.btn_expose.setSizePolicy(sizePolicy)
self.btn_expose.setMinimumSize(QtCore.QSize(76, 0))
font = QtGui.QFont()
font.setPointSize(10)
self.btn_expose.setFont(font)
self.btn_expose.setObjectName("btn_expose")
self.groupBox_2 = QtWidgets.QGroupBox(ImageDialog)
self.groupBox_2.setGeometry(QtCore.QRect(710, 10, 71, 106))
self.groupBox_2.setObjectName("groupBox_2")
self.btn_strechLow = QtWidgets.QRadioButton(self.groupBox_2)
self.btn_strechLow.setGeometry(QtCore.QRect(10, 20, 61, 21))
self.btn_strechLow.setObjectName("btn_strechLow")
self.btn_strechMid = QtWidgets.QRadioButton(self.groupBox_2)
self.btn_strechMid.setGeometry(QtCore.QRect(10, 40, 61, 21))
self.btn_strechMid.setObjectName("btn_strechMid")
self.btn_strechHigh = QtWidgets.QRadioButton(self.groupBox_2)
self.btn_strechHigh.setGeometry(QtCore.QRect(10, 60, 61, 21))
self.btn_strechHigh.setObjectName("btn_strechHigh")
self.btn_strechSuper = QtWidgets.QRadioButton(self.groupBox_2)
self.btn_strechSuper.setGeometry(QtCore.QRect(10, 80, 61, 21))
self.btn_strechSuper.setObjectName("btn_strechSuper")
self.groupBox_3 = QtWidgets.QGroupBox(ImageDialog)
self.groupBox_3.setGeometry(QtCore.QRect(630, 10, 76, 106))
self.groupBox_3.setObjectName("groupBox_3")
self.btn_size25 = QtWidgets.QRadioButton(self.groupBox_3)
self.btn_size25.setGeometry(QtCore.QRect(10, 40, 61, 21))
self.btn_size25.setObjectName("btn_size25")
self.btn_size50 = QtWidgets.QRadioButton(self.groupBox_3)
self.btn_size50.setGeometry(QtCore.QRect(10, 60, 61, 21))
self.btn_size50.setObjectName("btn_size50")
self.btn_size100 = QtWidgets.QRadioButton(self.groupBox_3)
self.btn_size100.setGeometry(QtCore.QRect(10, 80, 61, 21))
self.btn_size100.setObjectName("btn_size100")
self.btn_size12 = QtWidgets.QRadioButton(self.groupBox_3)
self.btn_size12.setGeometry(QtCore.QRect(10, 20, 61, 21))
self.btn_size12.setObjectName("btn_size12")
self.btn_solve = QtWidgets.QPushButton(ImageDialog)
self.btn_solve.setGeometry(QtCore.QRect(175, 15, 76, 26))
font = QtGui.QFont()
font.setPointSize(10)
self.btn_solve.setFont(font)
self.btn_solve.setObjectName("btn_solve")
self.groupBox_4 = QtWidgets.QGroupBox(ImageDialog)
self.groupBox_4.setGeometry(QtCore.QRect(530, 10, 96, 106))
self.groupBox_4.setObjectName("groupBox_4")
self.btn_colorGrey = QtWidgets.QRadioButton(self.groupBox_4)
self.btn_colorGrey.setGeometry(QtCore.QRect(10, 20, 71, 21))
self.btn_colorGrey.setObjectName("btn_colorGrey")
self.btn_colorCool = QtWidgets.QRadioButton(self.groupBox_4)
self.btn_colorCool.setGeometry(QtCore.QRect(10, 40, 71, 21))
self.btn_colorCool.setObjectName("btn_colorCool")
self.btn_colorRainbow = QtWidgets.QRadioButton(self.groupBox_4)
self.btn_colorRainbow.setGeometry(QtCore.QRect(10, 60, 81, 21))
self.btn_colorRainbow.setObjectName("btn_colorRainbow")
self.btn_colorSpectral = QtWidgets.QRadioButton(self.groupBox_4)
self.btn_colorSpectral.setGeometry(QtCore.QRect(10, 80, 81, 21))
self.btn_colorSpectral.setObjectName("btn_colorSpectral")
self.imageBackground = QtWidgets.QLabel(ImageDialog)
self.imageBackground.setGeometry(QtCore.QRect(0, 0, 790, 126))
self.imageBackground.setText("")
self.imageBackground.setObjectName("imageBackground")
self.btn_loadFits = QtWidgets.QPushButton(ImageDialog)
self.btn_loadFits.setEnabled(True)
self.btn_loadFits.setGeometry(QtCore.QRect(95, 15, 76, 26))
font = QtGui.QFont()
font.setPointSize(10)
self.btn_loadFits.setFont(font)
self.btn_loadFits.setObjectName("btn_loadFits")
self.btn_cancel = QtWidgets.QPushButton(ImageDialog)
self.btn_cancel.setGeometry(QtCore.QRect(15, 85, 76, 26))
font = QtGui.QFont()
font.setPointSize(10)
self.btn_cancel.setFont(font)
self.btn_cancel.setObjectName("btn_cancel")
self.le_imageFile = QtWidgets.QLineEdit(ImageDialog)
self.le_imageFile.setEnabled(False)
self.le_imageFile.setGeometry(QtCore.QRect(95, 50, 156, 26))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.le_imageFile.setFont(font)
self.le_imageFile.setMouseTracking(False)
self.le_imageFile.setFocusPolicy(QtCore.Qt.NoFocus)
self.le_imageFile.setAcceptDrops(False)
self.le_imageFile.setText("")
self.le_imageFile.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.le_imageFile.setReadOnly(True)
self.le_imageFile.setObjectName("le_imageFile")
self.btn_exposeCont = QtWidgets.QPushButton(ImageDialog)
self.btn_exposeCont.setGeometry(QtCore.QRect(15, 50, 76, 26))
font = QtGui.QFont()
font.setPointSize(10)
self.btn_exposeCont.setFont(font)
self.btn_exposeCont.setObjectName("btn_exposeCont")
self.checkShowCrosshairs = QtWidgets.QCheckBox(ImageDialog)
self.checkShowCrosshairs.setGeometry(QtCore.QRect(105, 90, 131, 16))
font = QtGui.QFont()
font.setPointSize(10)
self.checkShowCrosshairs.setFont(font)
self.checkShowCrosshairs.setChecked(False)
self.checkShowCrosshairs.setObjectName("checkShowCrosshairs")
self.imageMarker = QtWidgets.QWidget(ImageDialog)
self.imageMarker.setGeometry(QtCore.QRect(5, 130, 781, 506))
self.imageMarker.setAutoFillBackground(True)
self.imageMarker.setObjectName("imageMarker")
self.groupBox = QtWidgets.QGroupBox(ImageDialog)
self.groupBox.setGeometry(QtCore.QRect(260, 10, 141, 106))
self.groupBox.setObjectName("groupBox")
self.le_astrometrySolvingTime = QtWidgets.QLineEdit(self.groupBox)
self.le_astrometrySolvingTime.setEnabled(False)
self.le_astrometrySolvingTime.setGeometry(QtCore.QRect(95, 70, 36, 21))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.le_astrometrySolvingTime.setFont(font)
self.le_astrometrySolvingTime.setMouseTracking(False)
self.le_astrometrySolvingTime.setAcceptDrops(False)
self.le_astrometrySolvingTime.setLayoutDirection(QtCore.Qt.RightToLeft)
self.le_astrometrySolvingTime.setText("")
self.le_astrometrySolvingTime.setMaxLength(15)
self.le_astrometrySolvingTime.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.le_astrometrySolvingTime.setReadOnly(False)
self.le_astrometrySolvingTime.setObjectName("le_astrometrySolvingTime")
self.le_astrometryStatusText = QtWidgets.QLineEdit(self.groupBox)
self.le_astrometryStatusText.setEnabled(False)
self.le_astrometryStatusText.setGeometry(QtCore.QRect(10, 70, 81, 21))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(8)
font.setBold(False)
font.setWeight(50)
self.le_astrometryStatusText.setFont(font)
self.le_astrometryStatusText.setMouseTracking(False)
self.le_astrometryStatusText.setFocusPolicy(QtCore.Qt.NoFocus)
self.le_astrometryStatusText.setAcceptDrops(False)
self.le_astrometryStatusText.setText("")
self.le_astrometryStatusText.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.le_astrometryStatusText.setReadOnly(True)
self.le_astrometryStatusText.setObjectName("le_astrometryStatusText")
self.le_cameraExposureTime = QtWidgets.QLineEdit(self.groupBox)
self.le_cameraExposureTime.setEnabled(False)
self.le_cameraExposureTime.setGeometry(QtCore.QRect(95, 30, 36, 21))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.le_cameraExposureTime.setFont(font)
self.le_cameraExposureTime.setMouseTracking(False)
self.le_cameraExposureTime.setAcceptDrops(False)
self.le_cameraExposureTime.setLayoutDirection(QtCore.Qt.RightToLeft)
self.le_cameraExposureTime.setText("")
self.le_cameraExposureTime.setMaxLength(15)
self.le_cameraExposureTime.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.le_cameraExposureTime.setReadOnly(False)
self.le_cameraExposureTime.setObjectName("le_cameraExposureTime")
self.label_84 = QtWidgets.QLabel(self.groupBox)
self.label_84.setGeometry(QtCore.QRect(10, 15, 81, 16))
font = QtGui.QFont()
font.setPointSize(10)
self.label_84.setFont(font)
self.label_84.setObjectName("label_84")
self.label_83 = QtWidgets.QLabel(self.groupBox)
self.label_83.setGeometry(QtCore.QRect(10, 55, 81, 16))
font = QtGui.QFont()
font.setPointSize(10)
self.label_83.setFont(font)
self.label_83.setObjectName("label_83")
self.le_cameraStatusText = QtWidgets.QLineEdit(self.groupBox)
self.le_cameraStatusText.setEnabled(False)
self.le_cameraStatusText.setGeometry(QtCore.QRect(10, 30, 81, 21))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(8)
font.setBold(False)
font.setWeight(50)
self.le_cameraStatusText.setFont(font)
self.le_cameraStatusText.setMouseTracking(False)
self.le_cameraStatusText.setFocusPolicy(QtCore.Qt.NoFocus)
self.le_cameraStatusText.setAcceptDrops(False)
self.le_cameraStatusText.setText("")
self.le_cameraStatusText.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.le_cameraStatusText.setReadOnly(True)
self.le_cameraStatusText.setObjectName("le_cameraStatusText")
self.groupBox_5 = QtWidgets.QGroupBox(ImageDialog)
self.groupBox_5.setGeometry(QtCore.QRect(405, 10, 120, 106))
self.groupBox_5.setObjectName("groupBox_5")
self.label_86 = QtWidgets.QLabel(self.groupBox_5)
self.label_86.setGeometry(QtCore.QRect(10, 45, 26, 21))
font = QtGui.QFont()
font.setPointSize(10)
self.label_86.setFont(font)
self.label_86.setObjectName("label_86")
self.le_AngleJ2000 = QtWidgets.QLineEdit(self.groupBox_5)
self.le_AngleJ2000.setEnabled(False)
self.le_AngleJ2000.setGeometry(QtCore.QRect(55, 70, 51, 21))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.le_AngleJ2000.setFont(font)
self.le_AngleJ2000.setMouseTracking(False)
self.le_AngleJ2000.setFocusPolicy(QtCore.Qt.NoFocus)
self.le_AngleJ2000.setAcceptDrops(False)
self.le_AngleJ2000.setText("")
self.le_AngleJ2000.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.le_AngleJ2000.setReadOnly(True)
self.le_AngleJ2000.setObjectName("le_AngleJ2000")
self.label_87 = QtWidgets.QLabel(self.groupBox_5)
self.label_87.setGeometry(QtCore.QRect(10, 70, 36, 21))
font = QtGui.QFont()
font.setPointSize(10)
self.label_87.setFont(font)
self.label_87.setObjectName("label_87")
self.le_RaJ2000 = QtWidgets.QLineEdit(self.groupBox_5)
self.le_RaJ2000.setEnabled(False)
self.le_RaJ2000.setGeometry(QtCore.QRect(40, 20, 66, 21))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.le_RaJ2000.setFont(font)
self.le_RaJ2000.setMouseTracking(False)
self.le_RaJ2000.setFocusPolicy(QtCore.Qt.NoFocus)
self.le_RaJ2000.setAcceptDrops(False)
self.le_RaJ2000.setText("")
self.le_RaJ2000.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.le_RaJ2000.setReadOnly(True)
self.le_RaJ2000.setObjectName("le_RaJ2000")
self.le_DecJ2000 = QtWidgets.QLineEdit(self.groupBox_5)
self.le_DecJ2000.setEnabled(False)
self.le_DecJ2000.setGeometry(QtCore.QRect(40, 45, 66, 21))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.le_DecJ2000.setFont(font)
self.le_DecJ2000.setMouseTracking(False)
self.le_DecJ2000.setFocusPolicy(QtCore.Qt.NoFocus)
self.le_DecJ2000.setAcceptDrops(False)
self.le_DecJ2000.setText("")
self.le_DecJ2000.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.le_DecJ2000.setReadOnly(True)
self.le_DecJ2000.setObjectName("le_DecJ2000")
self.label_85 = QtWidgets.QLabel(self.groupBox_5)
self.label_85.setGeometry(QtCore.QRect(10, 20, 21, 21))
font = QtGui.QFont()
font.setPointSize(10)
self.label_85.setFont(font)
self.label_85.setObjectName("label_85")
self.imageBackground.raise_()
self.image.raise_()
self.btn_expose.raise_()
self.groupBox_2.raise_()
self.groupBox_3.raise_()
self.btn_solve.raise_()
self.groupBox_4.raise_()
self.btn_loadFits.raise_()
self.btn_cancel.raise_()
self.le_imageFile.raise_()
self.btn_exposeCont.raise_()
self.checkShowCrosshairs.raise_()
self.imageMarker.raise_()
self.groupBox.raise_()
self.groupBox_5.raise_()
self.retranslateUi(ImageDialog)
QtCore.QMetaObject.connectSlotsByName(ImageDialog)
def retranslateUi(self, ImageDialog):
_translate = QtCore.QCoreApplication.translate
ImageDialog.setWindowTitle(_translate("ImageDialog", "Imaging"))
self.btn_expose.setToolTip(_translate("ImageDialog", "<html><head/><body><p>Single exposure</p></body></html>"))
self.btn_expose.setText(_translate("ImageDialog", "Expose 1"))
self.groupBox_2.setTitle(_translate("ImageDialog", "Strech"))
self.btn_strechLow.setText(_translate("ImageDialog", "Low"))
self.btn_strechMid.setText(_translate("ImageDialog", "Mid"))
self.btn_strechHigh.setText(_translate("ImageDialog", "High"))
self.btn_strechSuper.setText(_translate("ImageDialog", "Super"))
self.groupBox_3.setTitle(_translate("ImageDialog", "Zoom"))
self.btn_size25.setText(_translate("ImageDialog", "4x"))
self.btn_size50.setText(_translate("ImageDialog", "2x"))
self.btn_size100.setText(_translate("ImageDialog", "1x"))
self.btn_size12.setText(_translate("ImageDialog", "8x"))
self.btn_solve.setToolTip(_translate("ImageDialog", "<html><head/><body><p>Single plate solve of the actual image</p></body></html>"))
self.btn_solve.setText(_translate("ImageDialog", "Solve"))
self.groupBox_4.setTitle(_translate("ImageDialog", "Colors"))
self.btn_colorGrey.setToolTip(_translate("ImageDialog", "<html><head/><body><p>Color scheme black /white</p></body></html>"))
self.btn_colorGrey.setText(_translate("ImageDialog", "Grey"))
self.btn_colorCool.setToolTip(_translate("ImageDialog", "<html><head/><body><p>Color scheme red/blue</p></body></html>"))
self.btn_colorCool.setText(_translate("ImageDialog", "Cool"))
self.btn_colorRainbow.setToolTip(_translate("ImageDialog", "<html><head/><body><p>Color scheme rainbow</p></body></html>"))
self.btn_colorRainbow.setText(_translate("ImageDialog", "Rainbow"))
self.btn_colorSpectral.setToolTip(_translate("ImageDialog", "<html><head/><body><p>Color scheme rainbow</p></body></html>"))
self.btn_colorSpectral.setText(_translate("ImageDialog", "Spectral"))
self.imageBackground.setProperty("color", _translate("ImageDialog", "blue"))
self.btn_loadFits.setToolTip(_translate("ImageDialog", "<html><head/><body><p><span style=\" font-size:10pt;\">Load a fits file and display is</span></p></body></html>"))
self.btn_loadFits.setText(_translate("ImageDialog", "Load FITS"))
self.btn_cancel.setToolTip(_translate("ImageDialog", "<html><head/><body><p>Cancels an imaging or plate solving action or stops continous exposures</p></body></html>"))
self.btn_cancel.setText(_translate("ImageDialog", "Cancel"))
self.le_imageFile.setToolTip(_translate("ImageDialog", "<html><head/><body><p>name of image which is shown</p></body></html>"))
self.btn_exposeCont.setToolTip(_translate("ImageDialog", "<html><head/><body><p><span style=\" font-size:10pt;\">Continous exposures</span></p></body></html>"))
self.btn_exposeCont.setText(_translate("ImageDialog", "Expose N"))
self.checkShowCrosshairs.setToolTip(_translate("ImageDialog", "<html><head/><body><p>Show crosshairs on image</p></body></html>"))
self.checkShowCrosshairs.setText(_translate("ImageDialog", "Show crosshairs"))
self.groupBox.setTitle(_translate("ImageDialog", "Device Stats"))
self.le_astrometrySolvingTime.setToolTip(_translate("ImageDialog", "<html><head/><body><p>Time elapsed for plate solving</p></body></html>"))
self.le_astrometryStatusText.setToolTip(_translate("ImageDialog", "<html><head/><body><p><span style=\" font-size:10pt;\">Status feedback from astrometry</span></p></body></html>"))
self.le_cameraExposureTime.setToolTip(_translate("ImageDialog", "<html><head/><body><p>Time left for image integration</p></body></html>"))
self.label_84.setText(_translate("ImageDialog", "Camera"))
self.label_83.setText(_translate("ImageDialog", "Astrometry"))
self.le_cameraStatusText.setToolTip(_translate("ImageDialog", "<html><head/><body><p><span style=\" font-size:10pt;\">Status feedback from camera</span></p></body></html>"))
self.groupBox_5.setTitle(_translate("ImageDialog", "Solve Result"))
self.label_86.setText(_translate("ImageDialog", "DEC"))
self.le_AngleJ2000.setToolTip(_translate("ImageDialog", "<html><head/><body><p>Shows the solved angle of image in degrees</p></body></html>"))
self.label_87.setText(_translate("ImageDialog", "Angle"))
self.le_RaJ2000.setToolTip(_translate("ImageDialog", "<html><head/><body><p><span style=\" font-weight:400;\">Shows the solved RA of image in J2000 coordinates</span></p></body></html>"))
self.le_DecJ2000.setToolTip(_translate("ImageDialog", "<html><head/><body><p><span style=\" font-weight:400;\">Shows the solved DEC of image in J2000 coordinates</span></p></body></html>"))
self.label_85.setText(_translate("ImageDialog", "RA"))
|
py | 7dfc00a14d846c1baf78c143ee4583fedc706c9b | from from_3b1b.old.sphere_area import *
from manimlib.imports import *
class MadAtMathologer(PiCreatureScene):
def create_pi_creature(self):
return Mortimer().to_corner(DR)
def construct(self):
morty = self.pi_creature
self.play(morty.change, "angry")
self.wait(3)
self.play(morty.change, "heistant")
self.wait(2)
self.play(morty.change, "shruggie")
self.wait(3)
class JustTheIntegral(Scene):
def construct(self):
tex = TexMobject("\\int_0^{\\pi / 2} \\cos(\\theta)d\\theta")
tex.scale(2)
self.add(tex)
class SphereVideoWrapper(Scene):
def construct(self):
title = TextMobject("Surface area of a sphere")
title.scale(1.5)
title.to_edge(UP)
rect = ScreenRectangle(height=6)
rect.next_to(title, DOWN)
self.add(title)
self.play(ShowCreation(rect))
self.wait()
class SphereRings(SecondProof):
CONFIG = {
"sphere_config": {
"resolution": (60, 60),
},
}
def construct(self):
self.setup_shapes()
self.grow_rings()
self.show_one_ring()
self.show_radial_line()
self.show_thickness()
self.flash_through_rings()
def grow_rings(self):
sphere = self.sphere
rings = self.rings
north_rings = rings[:len(rings) // 2]
sphere.set_fill(opacity=0)
sphere.set_stroke(WHITE, 0.5, opacity=0.5)
southern_mesh = VGroup(*[
face.copy() for face in sphere
if face.get_center()[2] < 0
])
southern_mesh.set_stroke(WHITE, 0.1, 0.5)
self.play(Write(sphere))
self.wait()
self.play(
FadeOut(sphere),
FadeIn(southern_mesh),
FadeIn(north_rings),
)
self.wait(4)
self.north_rings = north_rings
self.southern_mesh = southern_mesh
def show_one_ring(self):
north_rings = self.north_rings
index = len(north_rings) // 2
ring = north_rings[index]
to_fade = VGroup(*[
nr for nr in north_rings
if nr is not ring
])
north_rings.save_state()
circle = Circle()
circle.set_stroke(PINK, 5)
circle.set_width(ring.get_width())
circle.move_to(ring, IN)
thickness = ring.get_depth() * np.sqrt(2)
brace = Brace(Line(ORIGIN, 0.2 * RIGHT), UP)
brace.set_width(thickness)
brace.rotate(90 * DEGREES, RIGHT)
brace.rotate(45 * DEGREES, UP)
brace.move_to(1.5 * (RIGHT + OUT))
brace.set_stroke(WHITE, 1)
word = TextMobject("Thickness")
word.rotate(90 * DEGREES, RIGHT)
word.next_to(brace, RIGHT + OUT, buff=0)
self.play(
to_fade.set_fill, {"opacity": 0.2},
to_fade.set_stroke, {"opacity": 0.0},
)
self.move_camera(
phi=0, theta=-90 * DEGREES,
run_time=2,
)
self.stop_ambient_camera_rotation()
self.play(ShowCreation(circle))
self.play(FadeOut(circle))
self.move_camera(
phi=70 * DEGREES,
theta=-100 * DEGREES,
run_time=2,
)
self.begin_ambient_camera_rotation(0.02)
self.play(
GrowFromCenter(brace),
Write(word),
)
self.wait(2)
self.play(FadeOut(VGroup(brace, word)))
self.circum_circle = circle
self.thickness_label = VGroup(brace, word)
self.ring = ring
def show_radial_line(self):
ring = self.ring
point = ring.get_corner(RIGHT + IN)
R_line = Line(ORIGIN, point)
xy_line = Line(ORIGIN, self.sphere.get_right())
theta = np.arccos(np.dot(
normalize(R_line.get_vector()),
normalize(xy_line.get_vector())
))
arc = Arc(angle=theta, radius=0.5)
arc.rotate(90 * DEGREES, RIGHT, about_point=ORIGIN)
theta = TexMobject("\\theta")
theta.rotate(90 * DEGREES, RIGHT)
theta.next_to(arc, RIGHT)
theta.shift(SMALL_BUFF * (LEFT + OUT))
R_label = TexMobject("R")
R_label.rotate(90 * DEGREES, RIGHT)
R_label.next_to(
R_line.get_center(), OUT + LEFT,
buff=SMALL_BUFF
)
VGroup(R_label, R_line).set_color(YELLOW)
z_axis_point = np.array(point)
z_axis_point[:2] = 0
r_line = DashedLine(z_axis_point, point)
r_line.set_color(RED)
r_label = TexMobject("R\\cos(\\theta)")
r_label.rotate(90 * DEGREES, RIGHT)
r_label.scale(0.7)
r_label.match_color(r_line)
r_label.set_stroke(width=0, background=True)
r_label.next_to(r_line, OUT, 0.5 * SMALL_BUFF)
VGroup(
R_label, xy_line, arc, R_label,
r_line, r_label,
).set_shade_in_3d(True)
# self.stop_ambient_camera_rotation()
self.move_camera(
phi=85 * DEGREES,
theta=-100 * DEGREES,
added_anims=[
ring.set_fill, {"opacity": 0.5},
ring.set_stroke, {"opacity": 0.1},
ShowCreation(R_line),
FadeInFrom(R_label, IN),
]
)
self.wait()
self.play(
FadeIn(xy_line),
ShowCreation(arc),
Write(theta),
)
self.wait()
self.play(
ShowCreation(r_line),
FadeInFrom(r_label, IN),
)
self.wait()
self.move_camera(
phi=70 * DEGREES,
theta=-110 * DEGREES,
run_time=3
)
self.wait(2)
def show_thickness(self):
brace, word = self.thickness_label
R_dtheta = TexMobject("R \\, d\\theta")
R_dtheta.rotate(90 * DEGREES, RIGHT)
R_dtheta.move_to(word, LEFT)
self.play(
GrowFromCenter(brace),
Write(R_dtheta)
)
self.wait(3)
def flash_through_rings(self):
rings = self.north_rings.copy()
rings.fade(1)
rings.sort(lambda p: p[2])
for x in range(8):
self.play(LaggedStartMap(
ApplyMethod, rings,
lambda m: (m.set_fill, PINK, 0.5),
rate_func=there_and_back,
lag_ratio=0.1,
run_time=2,
))
class IntegralSymbols(Scene):
def construct(self):
int_sign = TexMobject("\\displaystyle \\int")
int_sign.set_height(1.5)
int_sign.move_to(5 * LEFT)
circumference, times, thickness = ctt = TextMobject(
"circumference", "$\\times$", "thickness"
)
circumference.set_color(MAROON_B)
ctt.next_to(int_sign, RIGHT, SMALL_BUFF)
area_brace = Brace(ctt, DOWN)
area_text = area_brace.get_text("Area of a ring")
all_rings = TextMobject("All rings")
all_rings.scale(0.5)
all_rings.next_to(int_sign, DOWN, SMALL_BUFF)
all_rings.shift(SMALL_BUFF * LEFT)
circum_formula = TexMobject(
"2\\pi", "R\\cos(\\theta)",
)
circum_formula[1].set_color(RED)
circum_formula.move_to(circumference)
circum_brace = Brace(circum_formula, UP)
R_dtheta = TexMobject("R \\, d\\theta")
R_dtheta.move_to(thickness, LEFT)
R_dtheta_brace = Brace(R_dtheta, UP)
zero, pi_halves = bounds = TexMobject("0", "\\pi / 2")
bounds.scale(0.5)
zero.move_to(all_rings)
pi_halves.next_to(int_sign, UP, SMALL_BUFF)
pi_halves.shift(SMALL_BUFF * RIGHT)
self.add(int_sign)
self.play(
GrowFromCenter(area_brace),
FadeInFrom(area_text, UP),
)
self.wait()
self.play(FadeInFromDown(circumference))
self.play(
FadeInFromDown(thickness),
Write(times)
)
self.play(Write(all_rings))
self.wait()
self.play(
circumference.next_to, circum_brace, UP, MED_SMALL_BUFF,
circumference.shift, SMALL_BUFF * UR,
GrowFromCenter(circum_brace),
)
self.play(FadeInFrom(circum_formula, UP))
self.wait()
self.play(
thickness.next_to, circumference, RIGHT, MED_SMALL_BUFF,
GrowFromCenter(R_dtheta_brace),
area_brace.stretch, 0.84, 0, {"about_edge": LEFT},
MaintainPositionRelativeTo(area_text, area_brace),
)
self.play(FadeInFrom(R_dtheta, UP))
self.wait()
self.play(ReplacementTransform(all_rings, bounds))
self.wait()
# RHS
rhs = TexMobject(
"\\displaystyle =", "2\\pi R^2", "\\int_0^{\\pi / 2}",
"\\cos(\\theta)", "d\\theta",
)
rhs.set_color_by_tex("cos", RED)
rhs.next_to(R_dtheta, RIGHT)
int_brace = Brace(rhs[2:], DOWN)
q_marks = int_brace.get_text("???")
one = TexMobject("1")
one.move_to(q_marks)
self.play(FadeInFrom(rhs, 4 * LEFT))
self.wait()
self.play(ShowCreationThenFadeAround(rhs[1]))
self.wait()
self.play(ShowCreationThenFadeAround(rhs[2:]))
self.wait()
self.play(
GrowFromCenter(int_brace),
LaggedStartMap(
FadeInFrom, q_marks,
lambda m: (m, UP),
)
)
self.wait()
self.play(ReplacementTransform(q_marks, one))
self.wait()
class ShamelessPlug(TeacherStudentsScene):
def construct(self):
self.student_says(
"But why $4\\pi R^2$?",
target_mode="maybe"
)
self.change_student_modes(
"erm", "maybe", "happy",
added_anims=[self.teacher.change, "happy"]
)
self.wait(3)
|
py | 7dfc00e9ffa11bdd15a62c8db92d33beeaf504f6 | # Copyright © 2020 Interplanetary Database Association e.V.,
# Planetmint and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
import pytest
BLOCKS_ENDPOINT = '/api/v1/blocks/'
@pytest.mark.bdb
@pytest.mark.usefixtures('inputs')
def test_get_block_returns_404_if_not_found(client):
res = client.get(BLOCKS_ENDPOINT + '123')
assert res.status_code == 404
res = client.get(BLOCKS_ENDPOINT + '123/')
assert res.status_code == 404
res = client.get(BLOCKS_ENDPOINT + 'latest')
assert res.status_code == 200
res = client.get(BLOCKS_ENDPOINT + 'latest/')
assert res.status_code == 200
@pytest.mark.bdb
def test_get_blocks_by_txid_endpoint_returns_empty_list_not_found(client):
res = client.get(BLOCKS_ENDPOINT + '?transaction_id=')
assert res.status_code == 200
assert len(res.json) == 0
res = client.get(BLOCKS_ENDPOINT + '?transaction_id=123')
assert res.status_code == 200
assert len(res.json) == 0
@pytest.mark.bdb
def test_get_blocks_by_txid_endpoint_returns_400_bad_query_params(client):
res = client.get(BLOCKS_ENDPOINT)
assert res.status_code == 400
res = client.get(BLOCKS_ENDPOINT + '?ts_id=123')
assert res.status_code == 400
assert res.json == {
'message': {
'transaction_id': 'Missing required parameter in the JSON body or the post body or the query string'
}
}
res = client.get(BLOCKS_ENDPOINT + '?transaction_id=123&foo=123')
assert res.status_code == 400
assert res.json == {
'message': 'Unknown arguments: foo'
}
res = client.get(BLOCKS_ENDPOINT + '?transaction_id=123&status=123')
assert res.status_code == 400
assert res.json == {
'message': 'Unknown arguments: status'
}
|
py | 7dfc03014282edf013d86d7d47b3ea735e434edc | from pathlib import Path
from vnpy.trader.app import BaseApp
from .base import APP_NAME
# 期权CTA策略引擎
from .engine import CtaOptionEngine
from .template import (
Direction,
Offset,
Exchange,
Status,
Color,
ContractData,
HistoryRequest,
TickData,
BarData,
TradeData,
OrderData,
CtaTemplate,
CtaOptionTemplate,
CtaOptionPolicy
) # noqa
from vnpy.trader.utility import BarGenerator, ArrayManager # noqa
class CtaOptionApp(BaseApp):
"""期权引擎App"""
app_name = APP_NAME
app_module = __module__
app_path = Path(__file__).parent
display_name = "CTA期权策略"
engine_class = CtaOptionEngine
widget_name = "CtaOption"
icon_name = "cta.ico"
|
py | 7dfc034f70a55921c38738b20881ea4a45bb495d | {
"CDPCQ04700": "계좌 거래내역", # order / 5 / 현물
"CEXAQ21100": "유렉스 주문체결내역조회", # order / 5 / 유렉스
"CEXAQ21200": "유렉스 주문가능 수량/금액 조회", # order / 5 / 유렉스
"CEXAQ31100": "유렉스 야간장잔고및 평가현황", # order / 4 / 유렉스
"CEXAQ31200": "유렉스 예탁금 및 통합잔고조회", # order / 4 / 유렉스
"CEXAQ44200": "EUREX 야간옵션 기간주문체결조회", # order / 3 / 유렉스
"CEXAT11100": "유렉스 매수/매도주문", # order / 5 / 유렉스
"CEXAT11200": "유렉스 정정주문", # order / 4 / 유렉스
"CEXAT11300": "유렉스 취소주문", # order / 4 / 유렉스
"CFOAQ00600": "선물옵션 계좌주문체결내역조회", # order / 5 / 선물
"CFOAQ10100": "선물옵션 주문가능수량조회", # order / 4 / 선물
"CFOAT00100": "선물옵션 정상주문", # order / 5 / 선물
"CFOAT00200": "선물옵션 정정주문", # order / 3 / 선물
"CFOAT00300": "선물옵션 취소주문", # order / 3 / 선물
"CFOBQ10500": "선물옵션 계좌예탁금증거금조회", # order / 2 / 선물
"CFOBQ10800": "선물옵션 옵션매도시 주문증거금조회", # order / 2 / 선물
"CFOEQ11100": "선물옵션가정산예탁금상세", # order / 2 / 선물
"CFOEQ82600": "선물옵션 일별 계좌손익내역", # order / 3 / 선물
"CFOFQ02400": "계좌 미결제 약정현황(평균가)", # order / 3 / 현물
"ChartExcel": "챠트엑셀데이터조회", # data / 2 / 현물
"ChartIndex": "챠트지표데이터조회", # data / 2 / 현물
"CLNAQ00100": "예탁담보융자가능종목현황조회", # order / 1 / 현물
"CSPAQ00600": "계좌별신용한도조회", # order / 3 / 현물
"CSPAQ12200": "현물계좌예수금 주문가능금액 총평가 조회", # order / 3 / 현물
"CSPAQ12300": "BEP단가조회", # order / 3 / 현물
"CSPAQ13700": "현물계좌주문체결내역조회", # order / 3 / 현물
"CSPAQ22200": "현물계좌예수금 주문가능금액 총평가2", # order / 3 / 현물
"CSPAT00600": "현물주문", # order / 5 / 현물
"CSPAT00700": "현물정정주문", # order / 4 / 현물
"CSPAT00800": "현물취소주문", # order / 4 / 현물
"CSPBQ00200": "현물계좌증거금률별주문가능수량조회", # order / 2 / 현물
"FOCCQ33600": "주식계좌 기간별수익률 상세", # order / 4 / 현물
"FOCCQ33700": "선물옵션 기간별 계좌 수익률 현황", # order / 3 / 선물
"MMDAQ91200": "파생상품증거금율조회", # order / 3 / 파생
"t0150": "주식당일매매일지/수수료", # basic / 4 / 현물
"t0151": "주식당일매매일지/수수료(전일)", # basic / 4 / 현물
"t0167": "서버시간조회", # data / 2 / 현물
"t0424": "주식잔고2", # order / 5 / 현물
"t0425": "주식체결/미체결", # order / 5 / 현물
"t0434": "선물/옵션체결/미체결", # order / 5 / 선물
"t0441": "선물/옵션잔고평가(이동평균)", # order / 4 / 선물
"t1101": "주식현재가호가조회", # data / 5 / 현물
"t1102": "주식현재가(시세)조회", # data / 5 / 현물
"t1104": "주식현재가시세메모", # data / 3 / 현물
"t1105": "주식피못/디마크조회", # data / 3 / 현물
"t1301": "주식시간대별체결조회", # data / 4 / 현물
"t1302": "주식분별주가조회", # data / 4 / 현물
"t1305": "기간별주가", # basic / 3 / 현물
"t1308": "주식시간대별체결조회챠트", # data / 4 / 현물
"t1310": "주식당일전일분틱조회", # data / 4 / 현물
"t1403": "신규상장종목조회", # basic / 3 / 현물
"t1404": "관리/불성실/투자유의조회", # basic / 4 / 현물
"t1405": "투자경고/매매정지/정리매매조회", # basic / 4 / 현물
"t1410": "초저유동성조회", # data / 3 / 현물
"t1411": "증거금율별종목조회", # data / 3 / 현물
"t1422": "상/하한", # data / 4 / 현물
"t1427": "상/하한가직전", # data / 4 / 현물
"t1442": "신고/신저가", # data / 4 / 현물
"t1444": "시가총액상위", # basic / 4 / 현물
"t1449": "가격대별매매비중조회", # data / 4 / 현물
"t1452": "거래량상위", # basic / 4 / 현물
"t1463": "거래대금상위", # basic / 4 / 현물
"t1471": "시간대별호가잔량추이", # data / 4 / 현물
"t1475": "체결강도추이", # data / 4 / 현물
"t1485": "예상지수", # data / 4 / 현물
"t1486": "시간별예상체결가", # data / 3 / 현물
"t1488": "예상체결가등락율상위조회", # data / 3 / 현물
"t1489": "예상체결량상위조회", # data / 3 / 현물
"t1511": "업종현재가", # data / 4 / 현물
"t1514": "업종기간별추이", # data / 3 / 현물
"t1516": "업종별종목시세", # data / 3 / 현물
"t1531": "테마별종목", # basic / 5 / 현물
"t1532": "종목별테마", # basic / 5 / 현물
"t1533": "특이테마", # basic / 5 / 현물
"t1537": "테마종목별시세조회", # data / 5 / 현물
"t1601": "투자자별종합", # data / 4 / 현물
"t1602": "시간대별투자자매매추이", # data / 4 / 현물
"t1603": "시간대별투자자매매추이상세", # data / 4 / 현물
"t1615": "투자자매매종합1", # data / 3 / 현물
"t1617": "투자자매매종합2", # data / 3 / 현물
"t1621": "업종별분별투자자매매동향(챠트용)", # data / 4 / 현물
"t1631": "프로그램매매종합조회", # data / 3 / 현물
"t1632": "시간대별프로그램매매추이", # data / 3 / 현물
"t1633": "기간별프로그램매매추이", # data / 3 / 현물
"t1636": "종목별프로그램매매동향", # data / 3 / 현물
"t1637": "종목별프로그램매매추이", # data / 3 / 현물
"t1638": "종목별잔량/사전공시", # data / 3 / 현물
"t1640": "프로그램매매종합조회(미니)", # data / 3 / 현물
"t1662": "시간대별프로그램매매추이(차트)", # data / 3 / 현물
"t1664": "투자자매매종합(챠트)", # data / 3 / 현물
"t1665": "기간별투자자매매추이(챠트)", # data / 3 / 현물
"t1701": "외인기관종목별동향", # data / 4 / 현물
"t1702": "외인기관종목별동향", # data / 4 / 현물
"t1717": "외인기관종목별동향", # data / 4 / 현물
"t1752": "종목별상위회원사", # data / 3 / 현물
"t1764": "회원사리스트", # basic / 3 / 현물
"t1771": "종목별회원사추이", # data / 3 / 현물
"t1809": "신호조회", # data / 3 / 현물
"t1825": "종목Q클릭검색(씽큐스마트)", # Qclick / 3 / 현물
"t1826": "종목Q클릭검색리스트조회(씽큐스마트)", # Qclick / 3 / 현물
"t1857": "e종목검색(신버전API용)", # Catch / 3 / 현물
"t1866": "서버저장조건리스트조회(API)", # basic / 3 / 현물
"t1901": "ETF현재가(시세)조회", # data / 3 / ETF
"t1902": "ETF시간별추이", # data / 3 / ETF
"t1903": "ETF일별추이", # data / 3 / ETF
"t1904": "ETF구성종목조회", # data / 3 / ETF
"t1906": "ETFLP호가", # data / 3 / ETF
"t1921": "신용거래동향", # basic / 3 / 현물
"t1926": "종목별신용정보", # data / 3 / 현물
"t1927": "공매도일별추이", # data / 3 / 현물
"t1941": "종목별대차거래일간추이", # data / 3 / 현물
"t1950": "ELW현재가(시세)조회", # data / 3 / ELW
"t1951": "ELW시간대별체결조회", # data / 3 / ELW
"t1954": "ELW일별주가", # data / 3 / ELW
"t1955": "ELW지표검색", # data / 3 / ELW
"t1956": "ELW현재가(확정지급액)조회", # data / 3 / ELW
"t1958": "ELW종목비교", # data / 3 / ELW
"t1959": "LP대상종목정보조회", # data / 2 / ELW
"t1960": "ELW등락율상위", # data / 3 / ELW
"t1961": "ELW거래량상위", # data / 3 / ELW
"t1964": "ELW전광판", # data / 3 / ELW
"t1966": "ELW거래대금상위", # data / 3 / ELW
"t1971": "ELW현재가호가조회", # data / 3 / ELW
"t1972": "ELW현재가(거래원)조회", # data / 3 / ELW
"t1973": "ELW시간대별예상체결조회", # data / 3 / ELW
"t1974": "ELW기초자산동일종목", # data / 3 / ELW
"t1981": "기초자산리스트조회", # data / 3 / ELW
"t2101": "선물/옵션현재가(시세)조회", # data / 3 / 선물
"t2105": "선물/옵션현재가호가조회", # data / 3 / 선물
"t2106": "선물/옵션현재가시세메모", # data / 3 / 선물
"t2201": "선물옵션시간대별체결조회", # data / 3 / 선물
"t2203": "기간별주가", # data / 3 / 선물
"t2209": "선물옵션틱분별체결조회챠트", # data / 3 / 선물
"t2210": "선물옵션시간대별체결조회(단일출력용)", # data / 3 / 선물
"t2301": "옵션전광판", # data / 3 / 선물
"t2405": "선물옵션호가잔량비율챠트", # data / 3 / 선물
"t2421": "미결제약정추이", # data / 3 / 선물
"t2541": "상품선물투자자매매동향(실시간)", # data / 3 / 선물
"t2545": "상품선물투자자매매동향(챠트용)", # data / 3 / 선물
"t2830": "EUREXKOSPI200옵션선물현재가(시세)조회", # data / 3 / 유렉스
"t2831": "EUREXKOSPI200옵션선물호가조회", # data / 3 / 유렉스
"t2832": "EUREX야간옵션선물시간대별체결조회", # data / 3 / 유렉스
"t2833": "EUREX야간옵션선물기간별추이", # data / 3 / 유렉스
"t2835": "EUREX옵션선물시세전광판", # data / 3 / 유렉스
"t3102": "뉴스본문", # data / 2 / 현물
"t3202": "종목별증시일정", # basic / 3 / 현물
"t3320": "FNG_요약", # basic / 3 / 현물
"t3341": "재무순위종합", # basic / 3 / 현물
"t3401": "투자의견", # basic / 3 / 현물
"t3518": "해외실시간지수", # data / 3 / 해외
"t3521": "해외지수조회(API용)", # basic / 3 / 해외
"t4201": "주식챠트(종합)", # data / 5 / 현물
"t4203": "업종챠트(종합)", # data / 5 / 현물
"t8401": "주식선물마스터조회(API용)", # data / 3 / 선물
"t8402": "주식선물현재가조회(API용)", # data / 3 / 선물
"t8403": "주식선물호가조회(API용)", # data / 3 / 선물
"t8404": "주식선물시간대별체결조회(API용)", # data / 3 / 선물
"t8405": "주식선물기간별주가(API용)", # data / 3 / 선물
"t8406": "주식선물틱분별체결조회(API용)", # data / 3 / 선물
"t8407": "API용주식멀티현재가조회", # data / 3 / 현물
"t8411": "주식챠트(틱/n틱)", # data / 5 / 현물
"t8412": "주식챠트(N분)", # data / 5 / 현물
"t8413": "주식챠트(일주월)", # data / 5 / 현물
"t8414": "선물옵션차트(틱/n틱)", # data / 3 / 선물
"t8415": "선물/옵션챠트(N분)", # data / 3 / 선물
"t8416": "선물/옵션챠트(일주월)", # data / 3 / 선물
"t8417": "업종차트(틱/n틱)", # data / 5 / 현물
"t8418": "업종챠트(N분)", # data / 5 / 현물
"t8419": "업종챠트(일주월)", # data / 5 / 현물
"t8424": "전체업종", # basic / 5 / 현물
"t8425": "전체테마", # basic / 4 / 현물
"t8426": "상품선물마스터조회(API용)", # data / 3 / 선물
"t8427": "과거데이터시간대별조회", # data / 3 / 선물
"t8428": "증시주변자금추이", # basic / 3 / 현물
"t8429": "EUREX야간옵션선물틱분별체결조회챠트", # data / 3 / 유렉스
"t8430": "주식종목조회", # basic / 5 / 현물
"t8431": "ELW종목조회", # data / 3 / ELW
"t8432": "지수선물마스터조회API용", # data / 3 / 선물
"t8433": "지수옵션마스터조회API용", # data / 3 / 선물
"t8434": "선물/옵션멀티현재가조회", # data / 3 / 선물
"t8435": "파생종목마스터조회API용", # data / 3 / 파생
"t8436": "주식종목조회 API용", # basic / 5 / 현물
"t8437": "CME/EUREX마스터조회(API용)", # data / 3 / 유렉스
"t9905": "기초자산리스트조회", # data / 3 / ELW
"t9907": "만기월조회", # data / 3 / ELW
"t9942": "ELW마스터조회API용", # data / 3 / ELW
"t9943": "지수선물마스터조회API용", # data / 3 / 선물
"t9944": "지수옵션마스터조회API용", # data / 3 / 선물
"t9945": "주식마스터조회API용-종목명40bytes" # data / 5 / 현물
} |
py | 7dfc03c3ba4b24d7cc92a455d2054ed66eaba723 | import numpy as np
class Mesh:
def __init__(self, x_start, x_end, n_cells):
self.x_start = x_start
self.x_end = x_end
self.n_cells = n_cells
self.n_faces = n_cells + 1
self.face_x = np.linspace(x_start, x_end, self.n_faces)
self.cell_x = 0.5 * (self.face_x[:-1] + self.face_x[1:])
self.cell_distance = np.diff(self.cell_x)
self.cell_size = np.diff(self.face_x)
def __str__(self):
cell_x = self.cell_x
face_x = self.face_x
return f"Mesh Information: \n" \
f"X start: {self.x_start} \nX end: {self.x_end} \nCells no.: {self.n_cells} \n" \
f"Cells co.: {cell_x} \nFaces co.: {face_x} \n"
|
py | 7dfc047123204578bac29c5cbfa18b7e3484ee78 | #!/usr/bin/env python2
import argparse
import collections
import hashlib
import os
import random
import re
import string
import time
import cPickle as pickle
import arxiv
import tqdm
INDEX_FOLDER = 'indices'
BATCH_SIZE = 100
CATEGORIES = {
'cs.AI': "Artificial Intelligence",
'cs.CL': "Computation and Language",
'cs.CC': "Computational Complexity",
'cs.CE': "Computational Engineering, Finance, and Science",
'cs.CG': "Computational Geometry",
'cs.GT': "Computer Science and Game Theory",
'cs.CV': "Computer Vision and Pattern Recognition",
'cs.CY': "Computers and Society",
'cs.CR': "Cryptography and Security",
'cs.DS': "Data Structures and Algorithms",
'cs.DB': "Databases",
'cs.DL': "Digital Libraries",
'cs.DM': "Discrete Mathematics",
'cs.DC': "Distributed, Parallel, and Cluster Computing",
'cs.ET': "Emerging Technologies",
'cs.FL': "Formal Languages and Automata Theory",
'cs.GL': "General Literature",
'cs.GR': "Graphics",
'cs.AR': "Hardware Architecture",
'cs.HC': "Human-Computer Interaction",
'cs.IR': "Information Retrieval",
'cs.IT': "Information Theory",
'cs.LG': "Learning",
'cs.LO': "Logic in Computer Science",
'cs.MS': "Mathematical Software",
'cs.MA': "Multiagent Systems",
'cs.MM': "Multimedia",
'cs.NI': "Networking and Internet Architecture",
'cs.NE': "Neural and Evolutionary Computing",
'cs.NA': "Numerical Analysis",
'cs.OS': "Operating Systems",
'cs.OH': "Other Computer Science",
'cs.PF': "Performance",
'cs.PL': "Programming Languages",
'cs.RO': "Robotics",
'cs.SI': "Social and Information Networks",
'cs.SE': "Software Engineering",
'cs.SD': "Sound",
'cs.SC': "Symbolic Computation",
'cs.SY': "Systems and Control"
}
class Index(object):
def __init__(self, query=None, max_results=None):
if query is None:
query = ''
if max_results is None:
max_results=1000
self.query = query
self.max_results = max_results
# maps letters to word frequencies
self.letter_map = collections.defaultdict(collections.Counter)
# maps lowercase acronym to (acronym, expansion) tuples
self.acronyms = collections.defaultdict(set)
# counts frequencies of pairs of words TODO: lowercase
self.word_pair_map = collections.defaultdict(collections.Counter)
# Utils for saving and loading to a file
@property
def _file_name(self):
return os.path.join(
INDEX_FOLDER, '%s.index' % hashlib.md5(
'%s#%s' % (self.query, self.max_results)).hexdigest())
def save(self):
if not os.path.exists(INDEX_FOLDER):
os.mkdir(INDEX_FOLDER)
pickle.dump(self, open(self._file_name, 'wb'))
def already_saved(self):
"""Return whether or not this index already exists on disk."""
return os.path.isfile(self._file_name)
def load(self):
"""Return a new Index from file."""
return pickle.load(open(self._file_name, 'rb'))
def _query_results(self):
# TODO: generator?
q_string = ' OR '.join(["cat:%s" % c for c in CATEGORIES.keys()])
if self.query:
q_string = "%s AND (%s)" % (self.query, q_string)
results = []
prev_results = []
with tqdm.tqdm(desc="Fetching results from arXiv",
total=self.max_results) as pbar:
for i in range(self.max_results / BATCH_SIZE + (
1 if self.max_results % BATCH_SIZE else 0)):
start = i * BATCH_SIZE
num = min(BATCH_SIZE, self.max_results - start)
failed_attempts = 0
new_results = []
max_failed_attempts = 2
while failed_attempts < max_failed_attempts:
new_results = arxiv.query(
search_query=q_string, start=start, max_results=num)
# Check to see if we found all results
if len(new_results) == num and new_results != prev_results:
prev_results = new_results
break
failed_attempts += 1
time.sleep(1)
results += new_results
pbar.update(len(new_results))
if failed_attempts >= max_failed_attempts:
break
return results
@staticmethod
def _words(s):
return re.findall(r'\w+', s)
@staticmethod
def _one_line(s):
return ' '.join(s.splitlines())
@staticmethod
def _title(result):
return Index._one_line(result['title'])
@staticmethod
def _abstract(result):
return Index._one_line(result['summary'])
@staticmethod
def _is_acronym(s, acr):
if not acr or not s:
return False
if not acr[0].isupper():
return False
if acr[0] != s[0].upper():
return False
return True
@staticmethod
def _acronyms(s):
results = []
# First, grab all phrases in parentheses
parens = re.finditer(r'\([^\)]+\)', s)
parens = [(m.start(), m.group()) for m in (parens if parens else [])]
for start, term in parens:
term = term[1:-1] # strip the parentheses
ws = Index._words(term)
# First, check if the parenthetical is expansion of preceeding word
if len(ws) > 1:
m = re.search(r'(\w+)[^\w]+$', s[:start])
if not m:
continue
preceeding_word = m.group(1)
if Index._is_acronym(term, preceeding_word):
results.append((preceeding_word, term))
# Next, check if this word is the acronym
if len(ws) == 1:
acr = ws[0]
fl = acr[0] # first letter
# Grab the preceeding 2x words, check each of them
preceeding_words = re.finditer(r'\w+', s[:start])
if not preceeding_words: # TODO: is this necessary?
continue
preceeding_words = list(preceeding_words)[::-1][:len(acr) * 2]
if not preceeding_words:
continue
last_word_end = (preceeding_words[0].start() +
len(preceeding_words[0].group()))
for m in preceeding_words:
w = m.group(0)
if not w or w[0].upper() != fl.upper():
continue
phrase = s[m.start():last_word_end]
# TODO: maybe break early or check all possibilities and
# break ties with scores, potentially try to see if
# unmatched letters are prefix of previous word
if Index._is_acronym(phrase, acr):
results.append((acr, phrase))
return results
@staticmethod
def _all_acronyms(query_results):
acrs = []
for result in query_results:
acrs += Index._acronyms(Index._title(result))
acrs += Index._acronyms(Index._abstract(result))
return acrs
def _add_word_pairs(self, s):
ws = self._words(s)
for i, w1 in enumerate(ws):
if i + 1 < len(ws):
w2 = ws[i+1]
self.word_pair_map[w1][w2] += 1
def _add_acronym(self, acr, exp):
# First, add the acronym to the map
self.acronyms[acr.lower()].add((acr, exp))
ws = self._words(exp)
taken = 0
for l in acr:
for w in ws[taken:]:
if w and w[0].upper() == l.upper():
self.letter_map[l][w] += 1
taken += 1
def build(self):
"""Queries the arXiv and builds the Index."""
results = self._query_results()
for a, e in self._all_acronyms(results):
self._add_acronym(a, e)
for r in results:
self._add_word_pairs(self._abstract(r))
def find_acronyms(self, acronym):
"""Finds all instances of acronym in the data."""
return self.acronyms[acronym.lower()]
@staticmethod
def _sample(counter):
return random.sample(list(counter.elements()), 1)[0]
@staticmethod
def _cap_words(words):
for i in range(len(words)):
w = words[i]
if w == w.lower() and (i == 0 or len(w) > 3):
words[i] = string.capwords(w)
return ' '.join(words)
def gen_acronym(self, acronym):
"""Randomly generates the given acronym using the Index."""
words = []
previous = None
for l in acronym:
possibilities = self.letter_map[l]
# Delete all the already used words
for w in words:
del possibilities[w]
if not possibilities:
continue
if previous is not None:
# TODO: intersect better ??? normalize and add ???
seeded = self.word_pair_map[previous] & possibilities
if seeded:
possibilities = seeded
previous = self._sample(possibilities)
words.append(previous)
return self._cap_words(words)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-q', '--query', type=str, nargs='?', default=None,
help="the keywords to search the arXiv for")
parser.add_argument('-r', '--max-results', type=int, nargs='?',
default=None,
help="maximum results to fetch from arXiv")
parser.add_argument('-p', '--print-all', action='store_true',
help="print all acronyms found")
parser.add_argument('-f', '--find', action='store_true',
help="finds instances of the acronym")
parser.add_argument('acronym', type=str, nargs='?', default=None,
metavar='A', help="the acronym to create")
args = parser.parse_args()
index = Index(query=args.query, max_results=args.max_results)
if index.already_saved():
index = index.load()
else:
index.build()
index.save()
if args.find:
for acr, exp in index.find_acronyms(args.acronym):
print acr, exp
elif args.print_all:
acrs = list(reduce(lambda a, b: a | b, index.acronyms.values(), set()))
acrs.sort(key=lambda x: x[0].lower())
for acr, exp in acrs:
print acr, exp
else:
print index.gen_acronym(args.acronym)
if __name__ == '__main__':
main()
|
py | 7dfc050e4eb03e1f267d27d412a9c682563c0afb | from abc import ABC, abstractmethod
from collections import OrderedDict
from threading import Lock
from copy import deepcopy
from ..distributed import RpcGroup, debug_with_process
class OrderedServerBase(ABC): # pragma: no cover
"""
Descendent classes of OrderedServer does not have to guarantee strong
consistency, that is, even if :meth:`.OrderedServerBase.push_service``
has returned True, there are possibilities that these acknowledged
push are discarded.
"""
@abstractmethod
def push(self, key, value, version, prev_version):
"""
Push a new ``version`` of ``value`` in ``key`` to the ordered server.
Note:
If ``version = prev_version`` then there is no order guarantee. But
you may exploit this feature.
Args:
key: Key.
value: value.
version: New version.
prev_version: Previous version.
Returns:
``True`` if success, and ``False`` if not.
"""
pass
@abstractmethod
def pull(self, key, version=None):
"""
Pull a value with the specified ``version`` in ``key``.
Args:
key: Key.
version: Target version, if ``None``, then the newest version
of value of key will be pulled.
Returns:
``None`` if version is not found, auto-deleted, or key is not found,
otherwise returns value with the specified ``version``
in ``key``, and then ``version``
"""
pass
class OrderedServerSimple(OrderedServerBase):
def __init__(self, server_name: str, group: RpcGroup):
self._push_service = server_name + "/_push_service"
self._pull_service = server_name + "/_pull_service"
self.group = group
def push(self, key, value, version, prev_version):
# DOC INHERITED
debug_with_process(
f"calling push service {self._push_service} "
f"on group [{self.group.group_name}]"
)
return self.group.registered_sync(
self._push_service, args=(key, value, version, prev_version)
)
def pull(self, key, version=None):
# DOC INHERITED
debug_with_process(
f"calling pull service {self._push_service} "
f"on group [{self.group.group_name}]"
)
return self.group.registered_sync(self._pull_service, args=(key, version))
class OrderedServerSimpleImpl:
"""
A simple key-value server, with strict ordered update
"""
def __init__(self, server_name: str, group: RpcGroup, version_depth: int = 1, **__):
"""
This init function must be only invoked on the runner process,
and the runner process must be a member process of ``group``.
Args:
server_name: Name of this server, used to registered
the server as a paired class of ``group``.
group: Rpc group where server locates.
server_runner: Name of the process serving the ordered server.
By default is the first member of the group.
version_depth: Storage depth of old versions of the same
key. If ``depth = 1``, then only the newest version
of the key will be saved.
"""
assert group.is_member()
assert version_depth > 0 and isinstance(version_depth, int)
self.server_name = server_name
self.group = group
self.storage = {}
self.lock = Lock()
self.version_depth = version_depth
# pair an accessor to group
self.group.pair(server_name, OrderedServerSimple(self.server_name, self.group))
self.group.register(server_name + "/_push_service", self._push_service)
self.group.register(server_name + "/_pull_service", self._pull_service)
def _push_service(self, key, value, version, prev_version):
success = False
with self.lock:
if key in self.storage:
ref = self.storage[key]
# Check previous version consistency.
if next(reversed(ref)) == prev_version:
ref[version] = value
success = True
if len(ref) > self.version_depth + 1:
ref.popitem(last=False)
else:
# Create a new key.
ref = self.storage[key] = OrderedDict()
ref[version] = value
success = True
return success
def _pull_service(self, key, version=None):
result = None
with self.lock:
if key in self.storage:
ref = self.storage[key]
# Try to find the target version.
if version is not None and version in ref:
result = (deepcopy(ref[version]), version)
# Find the newest version.
elif version is None:
version = next(reversed(ref))
result = (deepcopy(ref[version]), version)
return result
|
py | 7dfc05113f8f7805f49d82930b3132425394ab0b | import json
import pandas as pd
from pyfmpcloud import settings
from urllib.request import urlopen
def rss_feed():
"""RSS Feed API from https://fmpcloud.io/documentation#rssFeed
Returns:
Returns any filings of the day over the last week
"""
urlroot = settings.get_urlroot()
apikey = settings.get_apikey()
localurl = "rss_feed?apikey="
url = urlroot + localurl + apikey
response = urlopen(url)
data = response.read().decode("utf-8")
return json.loads(data)
def balance_sheet(ticker, period = 'annual', ftype = 'full'):
"""Balance sheet API from https://fmpcloud.io/documentation#balanceSheet
Input:
ticker : ticker for which we need the balance sheet values
period : 'annual', 'quarter'. Periodicity of requested balance sheet. Defaults to annual
ftype : 'full', 'growth'. Defines input sheet type. Defaults to full.
Returns:
Balance sheet info for selected ticker
"""
urlroot = settings.get_urlroot()
apikey = settings.get_apikey()
typeurl = ''
try:
if ftype == 'full':
typeurl = 'balance-sheet-statement/'
elif ftype == 'growth':
typeurl = 'balance-sheet-statement-growth/'
# elif ftype == 'short':
# typeurl = 'balance-sheet-statement-shorten/'
# elif ftype == 'growth-short':
# typeurl = 'balance-sheet-statement-growth-shorten/'
except KeyError:
print('Balance sheet type not correct')
url = urlroot + typeurl + ticker.upper() + "?" + "&period=" + period + "&apikey=" + apikey
data = safe_read_json(url)
return data
def income_statement(ticker, period = 'annual', ftype = 'full'):
"""Income statement API from https://fmpcloud.io/documentation#incomeStatement
Input:
ticker : ticker for which we need the income statement
period : 'annual', 'quarter'. Periodicity of requested balance sheet. Defaults to annual
ftype : 'full', 'growth'. Defines input sheet type. Defaults to full.
Returns:
Income statement info for selected ticker
"""
urlroot = settings.get_urlroot()
apikey = settings.get_apikey()
typeurl = ''
try:
if ftype == 'full':
typeurl = 'income-statement/'
elif ftype == 'growth':
typeurl = 'income-statement-growth/'
# elif bstype == 'short':
# typeurl = 'income-statement-shorten/'
# elif bstype == 'growth-short':
# typeurl = 'income-statement-growth-shorten/'
except KeyError:
raise KeyError('Income statement type not correct')
url = urlroot + typeurl + ticker.upper() + "?" + "period=" + period + "&apikey=" + apikey
return safe_read_json(url)
def cash_flow_statement(ticker, period = 'annual', ftype = 'full'):
"""Cash Flow Statement API from https://fmpcloud.io/documentation#cashFlowStatement
Input:
ticker : ticker for which we need the cash flow statement
period : 'annual', 'quarter'. Periodicity of requested balance sheet. Defaults to annual
ftype : 'full', 'growth'. Defines input sheet type. Defaults to full.
Returns:
Income statement info for selected ticker
"""
urlroot = settings.get_urlroot()
apikey = settings.get_apikey()
typeurl = ''
try:
if ftype == 'full':
typeurl = 'cash-flow-statement/'
elif ftype == 'growth':
typeurl = 'cash-flow-statement-growth/'
# elif bstype == 'short':
# typeurl = 'income-statement-shorten/'
# elif bstype == 'growth-short':
# typeurl = 'income-statement-growth-shorten/'
except KeyError:
raise KeyError('Cash Flow Statement type not correct')
url = urlroot + typeurl + ticker.upper() + "?" + "period=" + period + "&apikey=" + apikey
return safe_read_json(url)
def financial_ratios(ticker, period = 'annual', ttm = False):
"""Financial Ratios API from https://fmpcloud.io/documentation#financialRatios
Input:
ticker : ticker for which we need the financial ratios
period : 'annual', 'quarter'. Periodicity of requested balance sheet. Defaults to annual
ttm: trailing twelve months financial ratios. Default is False
Returns:
Financial ratios info for selected ticker
"""
urlroot = settings.get_urlroot()
apikey = settings.get_apikey()
if ttm:
typeurl = "ratios-ttm/"
else:
typeurl = "ratios/"
url = urlroot + typeurl + ticker.upper() + "?" + "period=" + period + "&apikey=" + apikey
return safe_read_json(url)
def key_metrics(ticker, period = 'annual'):
"""Key Metrics API from https://fmpcloud.io/documentation#keyMetrics
Input:
ticker : ticker for which we need the key metrics
period : 'annual', 'quarter'. Periodicity of requested balance sheet. Defaults to annual
Returns:
Key metrics info for selected ticker
"""
urlroot = settings.get_urlroot()
apikey = settings.get_apikey()
typeurl = "key-metrics/"
url = urlroot + typeurl + ticker.upper() + "?" + "period=" + period + "&apikey=" + apikey
response = urlopen(url)
data = response.read().decode("utf-8")
return safe_read_json(data)
def enterprise_value(ticker, period = 'annual'):
"""Enterprise value API from https://fmpcloud.io/documentation#enterpriseValue
Input:
ticker : ticker for which we need the enterprise value
period : 'annual', 'quarter'. Periodicity of requested balance sheet. Defaults to annual
Returns:
Enterprise value info for selected ticker
"""
urlroot = settings.get_urlroot()
apikey = settings.get_apikey()
typeurl = "enterprise-values/"
url = urlroot + typeurl + ticker.upper() + "?" + "period=" + period + "&apikey=" + apikey
return safe_read_json(url)
def financial_statements_growth(ticker, period = 'annual'):
"""Financial Statements Growth API from https://fmpcloud.io/documentation#financialStatementGrowth
Input:
ticker : ticker for which we need the financial growth
period : 'annual', 'quarter'. Periodicity of requested balance sheet. Defaults to annual
Returns:
Financial Statements Growth info for selected ticker
"""
urlroot = settings.get_urlroot()
apikey = settings.get_apikey()
typeurl = "financial-growth/"
url = urlroot + typeurl + ticker.upper() + "?" + "period=" + period + "&apikey=" + apikey
return safe_read_json(url)
def dcf(ticker, history = 'today'):
"""Discounted Cash Flow Valuation API from https://fmpcloud.io/documentation#dcf
Input:
ticker : ticker for which we need the dcf
history: 'today','daily', 'quarter', 'annual'. Periodicity of requested DCF valuations. Defaults to single value of today
Returns:
Discounted Cash Flow Valuation info for selected ticker
"""
urlroot = settings.get_urlroot()
apikey = settings.get_apikey()
try:
if history == 'today':
typeurl = 'discounted-cash-flow/'
url = urlroot + typeurl + ticker.upper() + "?" + "apikey=" + apikey
elif history == 'daily':
typeurl = 'historical-daily-discounted-cash-flow/'
url = urlroot + typeurl + ticker.upper() + "?" + "apikey=" + apikey
elif history == 'annual':
typeurl = 'historical-discounted-cash-flow-statement/'
url = urlroot + typeurl + ticker.upper() + "?" + "apikey=" + apikey
elif history == 'quarter':
typeurl = 'historical-discounted-cash-flow-statement/'
url = urlroot + typeurl + ticker.upper() + "?" + "period=" + history + "&apikey=" + apikey
except KeyError:
raise KeyError('Discounted Cash Flow history requested not correct. ' + history + ' is not an accepted key')
return safe_read_json(url)
def market_capitalization(ticker, history = 'today'):
"""Market Capitalization API from https://fmpcloud.io/documentation#marketCapitalization
Input:
ticker : ticker for which we need the Market Cap
history: 'today','daily'. Periodicity of requested Market Caps. Defaults to single value of today
Returns:
Market Cap info for selected ticker
"""
urlroot = settings.get_urlroot()
apikey = settings.get_apikey()
try:
if history == 'today':
typeurl = 'market-capitalization/'
elif history == 'daily':
typeurl = 'historical-market-capitalization/'
except KeyError:
print('Market Cap history requested not correct')
url = urlroot + typeurl + ticker.upper() + "?" + "apikey=" + apikey
return safe_read_json(url)
def rating(ticker, history = 'today'):
"""Rating API from https://fmpcloud.io/documentation#rating
Input:
ticker : ticker for which we need the rating info
history: 'today','daily'. Periodicity of requested ratings. Defaults to single value of today
Returns:
rating info for selected ticker
"""
urlroot = settings.get_urlroot()
apikey = settings.get_apikey()
try:
if history == 'today':
typeurl = 'rating/'
elif history == 'daily':
typeurl = 'historical-rating/'
except KeyError:
print('Rating history requested not correct')
url = urlroot + typeurl + ticker.upper() + "?" + "apikey=" + apikey
return safe_read_json(url)
def stock_screener(mcgt = None, mclt = None, bgt = None, blt = None, divgt = None, divlt = None, volgt = None, vollt = None, sector = None, limit = 100):
"""Stock Screener API from https://fmpcloud.io/documentation#rating
Input:
mcgt: stocks with market cap greater than this value
mclt: stocks with market cap less than this value
bgt: stocks with beta greater than this value
blt: stocks with beta less than this value
divgt: stock with dividends per share greater than this value
divlt: stocks with dividends per share less than this value
volgt: stocks with average trading volume greater than this value
vollt: stocks with average trading volume less than this value
sector: stocks within this
limit: number of return results
Returns:
List of stocks meeting the screening criteria
"""
urlroot = settings.get_urlroot()
apikey = settings.get_apikey()
urlss = 'stock-screener?'
urlbase = urlroot + urlss
url = urlroot + urlss
if sector is not None:
urlsector = 'sector=' + sector #API call adds the %20 automatically
url = url + urlsector
if mcgt is not None:
urlmcgt = "marketCapMoreThan=" + str(mcgt)
if url == urlbase:
url = url + urlmcgt
else:
url = url + '&' + urlmcgt
if mclt is not None:
urlmclt = "marketCapLowerThan=" + str(mclt)
if url == urlbase:
url = url + urlmclt
else:
url = url + '&' + urlmclt
if bgt is not None:
urlbgt = "betaMoreThan=" + str(bgt)
if url == urlbase:
url = url + urlbgt
else:
url = url + '&' + urlbgt
if blt is not None:
urlblt = "betaLowerThan=" + str(blt)
if url == urlbase:
url = url + urlblt
else:
url = url + '&' + urlblt
if divgt is not None:
urldivgt = "dividendMoreThan=" + str(divgt)
if url == urlbase:
url = url + urldivgt
else:
url = url + '&' + urldivgt
if divlt is not None:
urldivlt = "dividendLowerThan=" + str(divlt)
if url == urlbase:
url = url + urldivlt
else:
url = url + '&' + urldivlt
if volgt is not None:
urlvolgt = "volumeMoreThan=" + str(volgt)
if url == urlbase:
url = url + urlvolgt
else:
url = url + '&' + urlvolgt
if vollt is not None:
urlvollt = "volumeLowerThan=" + str(vollt)
if url == urlbase:
url = url + urlvollt
else:
url = url + '&' + urlvollt
try:
if url != urlbase:
url = url + '&limit=' + str(limit) +'&apikey=' + apikey
except ValueError('Please check screening values provided'):
print('Exiting')
url = "20%".join(url.split(" "))
return safe_read_json(url)
def safe_read_json(data):
if (data.find("Error Message") != -1):
raise Exception(data[20:-3])
else:
return pd.read_json(data)
|
py | 7dfc0538afad866de5f21bf662e0bfcecbcfb9b6 | #!/usr/bin/env python
#
# An example that presents CAPTCHA tests in a web environment
# and gives the user a chance to solve them. Run it, optionally
# specifying a port number on the command line, then point your web
# browser at the given URL.
#
from Captcha.Visual import Tests
from Captcha import Factory
import BaseHTTPServer, urlparse, sys
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
scheme, host, path, parameters, query, fragment = urlparse.urlparse(self.path)
# Split the path into segments
pathSegments = path.split('/')[1:]
# Split the query into key-value pairs
args = {}
for pair in query.split("&"):
if pair.find("=") >= 0:
key, value = pair.split("=", 1)
args.setdefault(key, []).append(value)
else:
args[pair] = []
# A hack so it works with a proxy configured for VHostMonster :)
if pathSegments[0] == "vhost":
pathSegments = pathSegments[3:]
if pathSegments[0] == "":
self.handleRootPage(args.get('test', Tests.__all__)[0])
elif pathSegments[0] == "images":
self.handleImagePage(pathSegments[1])
elif pathSegments[0] == "solutions":
self.handleSolutionPage(pathSegments[1], args['word'][0])
else:
self.handle404()
def handle404(self):
self.send_response(404)
self.send_header("Content-Type", "text/html")
self.end_headers()
self.wfile.write("<html><body><h1>No such resource</h1></body></html>")
def handleRootPage(self, testName):
self.send_response(200)
self.send_header("Content-Type", "text/html")
self.end_headers()
test = self.captchaFactory.new(getattr(Tests, testName))
# Make a list of tests other than the one we're using
others = []
for t in Tests.__all__:
if t != testName:
others.append('<li><a href="/?test=%s">%s</a></li>' % (t,t))
others = "\n".join(others)
self.wfile.write("""<html>
<head>
<title>PyCAPTCHA Example</title>
</head>
<body>
<h1>PyCAPTCHA Example</h1>
<p>
<b>%s</b>:
%s
</p>
<p><img src="/images/%s"/></p>
<p>
<form action="/solutions/%s" method="get">
Enter the word shown:
<input type="text" name="word"/>
</form>
</p>
<p>
Or try...
<ul>
%s
</ul>
</p>
</body>
</html>
""" % (test.__class__.__name__, test.__doc__, test.id, test.id, others))
def handleImagePage(self, id):
test = self.captchaFactory.get(id)
if not test:
return self.handle404()
self.send_response(200)
self.send_header("Content-Type", "image/jpeg")
self.end_headers()
test.render().save(self.wfile, "JPEG")
def handleSolutionPage(self, id, word):
test = self.captchaFactory.get(id)
if not test:
return self.handle404()
if not test.valid:
# Invalid tests will always return False, to prevent
# random trial-and-error attacks. This could be confusing to a user...
result = "Test invalidated, try another test"
elif test.testSolutions([word]):
result = "Correct"
else:
result = "Incorrect"
self.send_response(200)
self.send_header("Content-Type", "text/html")
self.end_headers()
self.wfile.write("""<html>
<head>
<title>PyCAPTCHA Example</title>
</head>
<body>
<h1>PyCAPTCHA Example</h1>
<h2>%s</h2>
<p><img src="/images/%s"/></p>
<p><b>%s</b></p>
<p>You guessed: %s</p>
<p>Possible solutions: %s</p>
<p><a href="/">Try again</a></p>
</body>
</html>
""" % (test.__class__.__name__, test.id, result, word, ", ".join(test.solutions)))
def main(port):
print "Starting server at http://localhost:%d/" % port
handler = RequestHandler
handler.captchaFactory = Factory()
BaseHTTPServer.HTTPServer(('', port), RequestHandler).serve_forever()
if __name__ == "__main__":
# The port number can be specified on the command line, default is 8080
if len(sys.argv) >= 2:
port = int(sys.argv[1])
else:
port = 8080
main(port)
### The End ###
|
py | 7dfc06e56faa83e42e8f636894bfe3c130e8afe9 | """
A Stage to load data from a PISA style hdf5 file into a PISA pi ContainerSet
"""
#TODO This class is become dcereasingly "simple"! Make it into a more specific stage for our purposes and recreate a much more simple HDF5 file loader that is generic for any PISA task
from __future__ import absolute_import, print_function, division
import numpy as np
from pisa import FTYPE
from pisa.core.pi_stage import PiStage
from pisa.utils import vectorizer
from pisa.utils.profiler import profile
from pisa.core.container import Container
from pisa.core.events_pi import EventsPi
class simple_data_loader(PiStage):
"""
HDF5 file loader PISA Pi class
Parameters
----------
events_file : hdf5 file path
output from make_events, including flux weights
and Genie systematics coefficients
mc_cuts : cut expr
e.g. '(true_coszen <= 0.5) & (true_energy <= 70)'
data_dict : str of a dict
Dictionary to specify what keys from the hdf5 files to be loaded
under what name. Entries can be strings that point to the right
key in the hdf5 file or lists of keys, and the data will be
stacked into a 2d array.
neutrinos : bool
Flag indicating whether data events represent neutrinos
In this case, special handling for e.g. nu/nubar, CC vs NC, ...
fraction_events_to_keep : float
Fraction of loaded events to use (use to downsample).
Must be in range [0.,1.], or disable by setting to `None`.
Default in None.
Notes
-----
Looks for `initial_weights` fields in events file, which will serve
as nominal weights for all events included.
No fields named `weights` may already be present.
"""
def __init__(self,
events_file,
mc_cuts,
data_dict,
neutrinos=True,
data=None,
params=None,
input_names=None,
output_names=None,
debug_mode=None,
input_specs=None,
calc_specs=None,
output_specs=None,
fraction_events_to_keep=None,
):
# instantiation args that should not change
self.events_file = events_file
self.mc_cuts = mc_cuts
self.data_dict = data_dict
self.neutrinos = neutrinos
self.fraction_events_to_keep = fraction_events_to_keep
# instead of adding params here, consider making them instantiation
# args so nothing external will inadvertently try to change
# their values
expected_params = ()
# created as ones if not already present
input_apply_keys = (
'initial_weights',
)
# copy of initial weights, to be modified by later stages
output_apply_keys = (
'weights',
)
# init base class
super(simple_data_loader, self).__init__(
data=data,
params=params,
expected_params=expected_params,
input_names=input_names,
output_names=output_names,
debug_mode=debug_mode,
input_specs=input_specs,
calc_specs=calc_specs,
output_specs=output_specs,
input_apply_keys=input_apply_keys,
output_apply_keys=output_apply_keys,
)
# doesn't calculate anything
if self.calc_mode is not None:
raise ValueError(
'There is nothing to calculate for this event loading service.'
' Hence, `calc_mode` must not be set.'
)
# check output names
if len(self.output_names) != len(set(self.output_names)):
raise ValueError(
'Found duplicates in `output_names`, but each name must be'
' unique.'
)
self.load_events()
self.apply_cuts_to_events()
def load_events(self):
'''Loads events from events file'''
# Create the events structure
self.evts = EventsPi(name='Events',neutrinos=self.neutrinos, fraction_events_to_keep=self.fraction_events_to_keep)
# Parse the variable mapping string if one exists
if self.data_dict is not None:
self.data_dict = eval(self.data_dict)
# Load the event file into the events structure
self.evts.load_events_file(
events_file=self.events_file,
variable_mapping=self.data_dict
)
#TODO Add option to define eventual binning here so that can cut events now that will be cut later anyway (use EventsPi.keep_inbounds)
def apply_cuts_to_events(self):
'''Just apply any cuts that the user defined'''
if self.mc_cuts:
self.evts = self.evts.apply_cut(self.mc_cuts)
def record_event_properties(self):
'''Adds fields present in events file and selected in `self.data_dict`
into containers for the specified output names. Also ensures the
presence of a set of nominal weights.
'''
# define which categories to include in the data
# user can manually specify what they want using `output_names`, or else just use everything
output_keys = self.output_names if len(self.output_names) > 0 else self.evts.keys()
# create containers from the events
for name in output_keys:
# make container
container = Container(name)
container.data_specs = 'events'
event_groups = self.evts.keys()
if name not in event_groups:
raise ValueError(
'Output name "%s" not found in events. Only found %s.'
% (name, event_groups)
)
# add the events data to the container
for key, val in self.evts[name].items():
container.add_array_data(key, val)
# create weights arrays:
# * `initial_weights` as starting point (never modified)
# * `weights` to be initialised from `initial_weights`
# and modified by the stages
# * user can also provide `initial_weights` in input file
#TODO Maybe add this directly into EventsPi
if 'weights' in container.array_data:
# raise manually to give user some helpful feedback
raise KeyError(
'Found an existing `weights` array in "%s"'
' which would be overwritten. Consider renaming it'
' to `initial_weights`.' % name
)
container.add_array_data(
'weights',
np.ones(container.size, dtype=FTYPE)
)
if 'initial_weights' not in container.array_data:
container.add_array_data(
'initial_weights',
np.ones(container.size, dtype=FTYPE)
)
# add neutrino flavor information for neutrino events
#TODO Maybe add this directly into EventsPi
if self.neutrinos :
# this determination of flavour is the worst possible coding, ToDo
nubar = -1 if 'bar' in name else 1
if name.startswith('nutau'):
flav = 2
elif name.startswith('numu'):
flav = 1
elif name.startswith('nue'):
flav = 0
else:
raise ValueError('Cannot determine flavour of %s'%name)
container.add_scalar_data('nubar', nubar)
container.add_scalar_data('flav', flav)
self.data.add_container(container)
# check created at least one container
if len(self.data.names) == 0:
raise ValueError(
'No containers created during data loading for some reason.'
)
# test
if self.output_mode == 'binned':
for container in self.data:
container.array_to_binned('weights', self.output_specs)
def setup_function(self):
'''Store event properties from events file at
service initialisation. Cf. `PiStage` docs.
'''
self.record_event_properties()
@profile
def apply_function(self):
'''Cf. `PiStage` docs.'''
# TODO: do we need following line? Isn't this handled universally
# by the base class (in PiStage's apply)?
self.data.data_specs = self.output_specs
# reset weights to initial weights prior to downstream stages running
for container in self.data:
vectorizer.set(container['initial_weights'],
out=container['weights'])
|
py | 7dfc06e6e01af8f50be29854079d3b9bb2d8bf40 | # -*- coding: utf-8 -*-
__author__ = "Lukas Pfeifenberger"
import numpy as np
import glob
import sys
import os
sys.path.append(os.path.abspath('../'))
from algorithms.audio_processing import *
# loader class for mono wav files, i.e. wsj0
class audio_loader(object):
# --------------------------------------------------------------------------
def __init__(self, config, set):
self.fs = config['fs']
self.wlen = config['wlen']
self.shift = config['shift']
self.samples = int(self.fs*config['duration'])
self.nfram = int(np.ceil( (self.samples-self.wlen+self.shift)/self.shift ))
self.nbin = int(self.wlen/2+1)
if set == 'train':
path = config['train_path']
elif set == 'test':
path = config['test_path']
elif set == 'eval':
path = config['eval_path']
else:
print('unknown set name: ', set)
quit(0)
self.file_list = glob.glob(path+'*.wav')
self.numof_files = len(self.file_list)
print('*** audio_loader found %d files in: %s' % (self.numof_files, path))
#-------------------------------------------------------------------------
def concatenate_random_files(self,):
x = np.zeros((self.samples,), dtype=np.float32)
n = 0
while n<self.samples:
f = np.random.choice(self.file_list)
s, fs = audioread(f)
length = s.shape[0]
n1 = min(n+length, self.samples)
x[n:n1] = s[0:n1-n]
n = n1
return x
|
py | 7dfc0764c96fc923cf03dfd76691214876e37147 | # Oxi 08/09/2021 - 05/02/2022 (CURRENT)
import math
import tkinter as tk
import tkinter.filedialog
from tkinter import ttk
from time import sleep
import time
import os
import subprocess
import sys
# Install Vector2D.py if it has not been installed.
try:
from vector2d import Vector2D
except ImportError:
os.system(f'python -m pip install vector2d.py')
import platform
import random
# ------------[CONFIG VARS]------------
# EXPERIMENTAl
intercollision = False
args = sys.argv
if "-configured" not in args and False:
DETACHED_PROCESS = 8
subprocess.Popen(f'python "{os.getcwd()}/options.py"', creationflags=DETACHED_PROCESS, close_fds=True)
sleep(0.5)
os._exit(1)
else:
if "-intercollision" in args or True:
intercollision = True
# ------------[CONFIG VARS]------------
window = tk.Tk()
window.title("TKinter Physics Sim - V2")
window.geometry("1920x1080")
canvas = tk.Canvas(window, width=1000, height=1000)
canvas.pack(fill="both", expand=True)
def resize(event):
global canvas
w,h = event.width, event.height-80
canvas.config(width=w, height=h)
window.bind('<Configure>', resize)
# ------------[SETTINGS]------------
# Physics
gravity = 2000
numIterations = 2
weakStickStrength = 25
# Display
circleRadius = 5
stickThickness = 3
# ------------[SETTINGS]------------
# ------------[DATA]------------
leftMouseDown = False
rightMouseDown = False
middleMouseDown = False
mouseX = 0
mouseY = 0
lastFrameTime = (time.time())
prevPoint = 0
snapResolution=10
simNow = False
currentTempStick = 0
shiftHeld = False
heldPoint = 0
grabPoint = 0
pauseSim = True
statusBar = 0
statusText = "Ready"
canClick = False
gridX=8
gridY=8
windowCollide = True
currentFile = ""
simColour = True
dragDeleting = False
lastMousePos = Vector2D.Zero()
selectedStick = 0
camPos = Vector2D.Zero()
camScale = 1
# ------------[DATA]------------
# ------------[GUI DATA]------------
grav=0
iters=0
weakstrength=0
snapresolution=0
gridx=0
gridy=0
simparampopup=0
gridparampopup=0
snapparampopup=0
controlsPopup=0
savepromptpopup=0
savepromptreturn=None
menubar=0
# ------------[GUI DATA]------------
# ------------[STORAGE]------------
pointsBeforeSim = []
objectPointsBeforeSim = []
sticksBeforeSim = []
points = []
objectPoints = []
sticks = []
# ------------[STORAGE]------------
# ------------[CLASSES]------------
class Point(object):
def __init__(self, pos, tlocked, render=True, join=True, tsave=True):
global canvas, circleRadius, points, camPos
self.position = Point.SnapPosition(pos)
self.previousPosition = Point.SnapPosition(pos)
self.locked = tlocked
self.references = []
self.save = tsave
colour = "black"
if tlocked:
colour = "pink"
if render:
self.renderObject = canvas.create_oval(self.position.x-circleRadius-camPos.x, self.position.y-circleRadius-camPos.y, self.position.x+circleRadius-camPos.x, self.position.y+circleRadius-camPos.y, fill=colour)
canvas.tag_raise(self.renderObject)
if join:
points.append(self)
def Move(self, pos):
if not self.locked:
self.position = pos
def ToggleLock(self):
global canvas
self.locked = not self.locked
colour = "black"
if self.locked:
colour = "pink"
canvas.itemconfigure(self.renderObject, fill=colour)
def Remove(self):
global canvas, points
if hasattr(self, 'renderObject'):
canvas.delete(self.renderObject)
refIndex = 0
referencesCopy = self.references.copy()
while refIndex < len(referencesCopy):
referencesCopy[refIndex].Remove()
refIndex+=1
if self in points:
points.remove(self)
def Parse(self):
global camPos
txt = ""
dataCache = [self.position.x-camPos.x, self.position.y-camPos.y, int(self.locked)]
for data in dataCache:
txt += str(data)+ ","
return txt[:-1]
@staticmethod
def SnapPosition(targetLoc):
global snapResolution
return (targetLoc//snapResolution) * snapResolution
def Simulate(self):
global gravity, windowCollide, camPos, intercollision
if not self.locked:
# Store previous position
posBefore = self.position
# Keep velocity from last update
posdelta = (self.position - self.previousPosition)
self.position = self.position + posdelta
# Calculate frame delta time
delta = max((time.time())-lastFrameTime, 1/120)
# Simulate Gravity based upon frame time
self.position.y += gravity * delta * delta
# Check for Window Collision enabled
if windowCollide:
# Apply drag if on window floor
if self.position.y > window.winfo_height()-30+camPos.y:
self.position -= posdelta / 20
if len(self.references) == 0:
# Clamp positions to window bounds
self.position.x = Clamp(self.position.x, 10+camPos.x, window.winfo_width()-10+camPos.x)
self.position.y = Clamp(self.position.y, 10+camPos.y, window.winfo_height()-30+camPos.y)
# Assign posBefore to previous position cache
self.previousPosition = posBefore
if intercollision:
self.InterCollision()
def InterCollision(self):
global circleRadius, points
raycast = Raycast(self.previousPosition, self.position)
veloA = Vector2D.Distance(self.position, self.previousPosition)
data = raycast.TracePoints(self)
if data:
veloB = Vector2D.Distance(data.obj.position, data.obj.previousPosition)
velo = veloA - veloB
velo /= 1.25
# POINT COLLISION
points[data.objIndex].Move((data.normal * -velo) + data.obj.position)
self.Move((data.normal * velo) + self.position) #((loc - data.raycast.start).getNormalised() * (circleRadius/2)) + loc
#self.previousPosition = data.loc
data = raycast.TraceSticks(self.references)
if data:
# STICK COLLISION
veloB = (Vector2D.Distance(data.obj.pointA.position, data.obj.pointA.previousPosition) + Vector2D.Distance(data.obj.pointB.position, data.obj.pointB.previousPosition))/2
velo = veloA - veloB
velo /= 1.25
self.Move(data.hitLoc + (data.normal*velo))
veloVector = self.previousPosition - self.position
projVelo = Vector2D.Project(veloVector, (data.obj.pointB.position - data.obj.pointA.position).getNormalised())
self.previousPosition = data.hitLoc+projVelo
fullForce = data.normal*velo
aAlpha = Vector2D.InverseLerp(data.obj.pointB.position, data.obj.pointA.position, data.hitLoc)
bAlpha = 1-aAlpha
data.obj.pointA.Move(data.obj.pointA.position - (fullForce*aAlpha))
data.obj.pointB.Move(data.obj.pointB.position - (fullForce*bAlpha))
#self.previousPosition = data.hitLoc
class ObjectPoint(Point):
def __init__(self, pos, tlocked, render=True, join=True, tsave=True, towner=None, tnewSpawned=False):
global canvas, circleRadius, points
self.position = pos
self.previousPosition = pos
self.locked = tlocked
self.references = []
self.owner = towner
self.save = tsave
self.newlySpawned=tnewSpawned
self.raycast = Raycast(self.previousPosition, self.position)
colour = "red"
if render:
self.renderObject = canvas.create_oval(pos.x-circleRadius, pos.y-circleRadius, pos.x+circleRadius, pos.y+circleRadius, fill=colour)
canvas.tag_raise(self.renderObject)
if join:
objectPoints.append(self)
def Remove(self, skipRefs=False):
global canvas, objectPoints
if hasattr(self, 'renderObject'):
canvas.delete(self.renderObject)
if not skipRefs:
refIndex = 0
referencesCopy = self.references.copy()
while refIndex < len(referencesCopy):
referencesCopy[refIndex].Remove()
refIndex+=1
if not skipRefs:
if hasattr(self, 'owner'):
if self.owner:
self.owner.Remove()
if self in objectPoints:
objectPoints.remove(self)
def Parse(self):
global sticks, camPos
txt = ""
dataCache = [self.position.x-camPos.x, self.position.y-camPos.y, int(self.locked), sticks.index(self.owner)]
for data in dataCache:
txt += str(data)+ ","
return txt[:-1]
class Stick:
def __init__(self, tpointA, tpointB, tlength, tbackground, render=True, standin=False, tsave=True, tstickType=0):
global canvas, sticks, stickThickness, camPos
self.pointA = tpointA
self.pointB = tpointB
self.save = tsave
self.stickType = tstickType
if not standin:
self.pointA.references.append(self)
self.pointB.references.append(self)
self.length = tlength
self.background = tbackground
colour = self.CalcColour()
if not standin:
if render:
self.renderObject = canvas.create_line(self.pointA.position.x - camPos.x, self.pointA.position.y - camPos.y, self.pointB.position.x - camPos.x, self.pointB.position.y - camPos.y, width=stickThickness, fill=colour)
canvas.tag_lower(self.renderObject)
sticks.append(self)
def Render(self):
# Override
debug = True
def CalcColour(self):
colour = "black"
if self.background:
colour = "gray89"
return colour
def Remove(self):
global canvas, sticks
if hasattr(self, 'renderObject'):
canvas.delete(self.renderObject)
if self in self.pointA.references:
self.pointA.references.remove(self)
if self in self.pointB.references:
self.pointB.references.remove(self)
if self in sticks:
sticks.remove(self)
def Parse(self):
global points, objectPoints
txt = ""
dataCache = [(points+objectPoints).index(self.pointA), (points+objectPoints).index(self.pointB), self.length, int(self.background)]
for data in dataCache:
txt += str(data) + ","
return txt[:-1]
def Simulate(self, onlyClamp=False):
global windowCollide, camPos
# onlyClamp means if the stick should not apply constraints, and only clamp the point to the window
# Calculate stick data
if not onlyClamp:
stickCenter = (self.pointA.position + self.pointB.position) / 2
stickDir = (self.pointA.position - self.pointB.position).getNormalised()
# If pointA is not a fixed point
if not self.pointA.locked:
# Set pointA's position to where the stick expects it to be.
if not onlyClamp:
self.pointA.Move(stickCenter + (stickDir * self.length/2))
# Clamp pointA to the window bounds
if windowCollide:
self.pointA.Move(Vector2D(Clamp(self.pointA.position.x, 10+camPos.x, window.winfo_width()-10+camPos.x),Clamp(self.pointA.position.y, 10+camPos.y, window.winfo_height()-30+camPos.y)))
else:
if self.pointB.locked:
self.length = Vector2D.Distance(self.pointA.position, self.pointB.position)
debug = True
# If pointB is not a fixed point
if not self.pointB.locked:
# Set pointB's position to where the stick expects it to be.
if not onlyClamp:
self.pointB.Move(stickCenter - (stickDir * (self.length/2)))
# Clamp pointB to the window bounds
if windowCollide:
self.pointB.Move(Vector2D(Clamp(self.pointB.position.x, 10+camPos.x, window.winfo_width()-10+camPos.x),Clamp(self.pointB.position.y, 10+camPos.y, window.winfo_height()-30+camPos.y)))
class WeakStick(Stick):
def CalcColour(self):
rgbStrong = (25,200,25)
rgbWeak = (200, 25, 255)
dist = Vector2D.Distance(self.pointA.position, self.pointB.position)
maxLength = (self.length + weakStickStrength)
minLength = (self.length - weakStickStrength)
alpha = abs(Map(dist, minLength, maxLength, -1, 1))
lerped = RGBLerp(rgbStrong, rgbWeak, alpha)
return FromRGB(lerped)
def Render(self):
global canvas
self.colour = self.CalcColour()
canvas.itemconfigure(self.renderObject, fill=self.colour)
super().Render()
def Break(self):
# stickA + (normalize(stickB-stickA) * ((Distance(stickA, stickB)/2)+-10)
dist = Vector2D.Distance(self.pointA.position, self.pointB.position)/ 2
stickDir = ((self.pointB.position / self.pointA.position)).getNormalised()
newPoint = Point((self.pointA.position + (stickDir * (dist-10))), False)
Stick(self.pointA, newPoint, Vector2D.Distance(self.pointA.position, newPoint.position), False)
newPoint = Point((self.pointA.position + (stickDir * (dist+10))), False)
Stick(self.pointB, newPoint, Vector2D.Distance(self.pointB.position, newPoint.position), False)
self.Remove()
def Simulate(self):
global weakStickStrength
dist = Vector2D.Distance(self.pointA.position, self.pointB.position)
maxLength = (self.length + weakStickStrength)
minLength = (self.length - weakStickStrength)
alpha = abs(Map(dist, minLength, maxLength, -1, 1))
#print(alpha)
if alpha >= 1:
self.Break()
super().Simulate()
class RopeStick(Stick):
def CalcColour(self):
if Vector2D.Distance(self.pointA.position, self.pointB.position) > self.length and simColour:
return "Blue"
else:
return "Purple"
def Simulate(self):
global canvas
if hasattr(self, 'renderObject'):
canvas.itemconfig(self.renderObject, fill=self.CalcColour())
currentLength = Vector2D.Distance(self.pointA.position, self.pointB.position)
super().Simulate(not currentLength > self.length)
class SpringyStick(Stick):
def Simulate(self, onlyClamp=False):
global windowCollide, camPos
spring = 0.1
# Calculate stick data
if not onlyClamp:
stickCenter = (self.pointA.position + self.pointB.position) / 2
stickDir = (self.pointA.position - self.pointB.position).getNormalised()
# If pointA is not a fixed point
if not self.pointA.locked:
# Set pointA's position to where the stick expects it to be.
if not onlyClamp:
self.pointA.position = Vector2D.Lerp(self.pointA.position, stickCenter + (stickDir * self.length/2), spring)
# Clamp pointA to the window bounds
if windowCollide:
self.pointA.position.x = Clamp(self.pointA.position.x, 10+camPos.x, window.winfo_width()-10+camPos.x)
self.pointA.position.y = Clamp(self.pointA.position.y, 10+camPos.y, window.winfo_height()-30+camPos.y)
# If pointB is not a fixed point
if not self.pointB.locked:
# Set pointB's position to where the stick expects it to be.
if not onlyClamp:
self.pointB.position = Vector2D.Lerp(self.pointB.position, stickCenter - (stickDir * self.length/2), spring)
# Clamp pointB to the window bounds
if windowCollide:
self.pointB.position.x = Clamp(self.pointB.position.x, 10+camPos.x, window.winfo_width()-10+camPos.x)
self.pointB.position.y = Clamp(self.pointB.position.y, 10+camPos.y, window.winfo_height()-30+camPos.y)
class SlideStick(Stick):
def __init__(self, tpointA, tpointB, tlength, tbackground, render=True):
global canvas, sticks, stickThickness
self.middlePoint = ObjectPoint((((tpointA.position + tpointB.position) / 2)).AsInt(), False, True, True, True)
self.stick1 = RopeStick(tpointA, self.middlePoint, tlength, False, True, False, False)
self.stick2 = RopeStick(tpointB, self.middlePoint, tlength, False, True, False, False)
self.save = True
self.pointA = tpointA
self.pointB = tpointB
self.length = tlength
self.background = tbackground
self.middlePoint.owner = self
self.pointA.references.append(self)
self.pointB.references.append(self)
self.middlePoint.references.append(self)
sticks.append(self)
def Simulate(self):
newDist = Vector2D.Distance(self.pointA.position, self.pointB.position)
self.stick1.length = newDist-10
self.stick2.length = newDist-10
self.stick1.Simulate()
self.stick2.Simulate()
middlePointVect = (self.middlePoint.position - self.pointA.position)
stickVect = (self.pointA.position - self.pointB.position)
# Project
projected = Vector2D.Project(middlePointVect, stickVect)
# middlePoint = pointA + projected
self.middlePoint.position = (self.pointA.position + projected)
super().Simulate()
def Remove(self):
global canvas, sticks
if hasattr(self, 'renderObject'):
canvas.delete(self.renderObject)
if self in self.pointA.references:
self.pointA.references.remove(self)
if self in self.pointB.references:
self.pointB.references.remove(self)
if self in self.middlePoint.references:
self.middlePoint.references.remove(self)
self.middlePoint.owner = None
self.middlePoint.Remove()
self.stick1.Remove()
self.stick2.Remove()
if self in sticks:
sticks.remove(self)
def ChangeMiddlePoint(self, point):
oldPoint = self.middlePoint
self.middlePoint = point
self.stick1.pointB = point
self.stick2.pointB = point
point.owner = self
if self in oldPoint.references:
oldPoint.references.remove(self)
oldPoint.owner = None
oldPoint.Remove(True)
def CalcMiddlePoint(self):
self.middlePoint.position = (((self.pointA.position + self.pointB.position) / 2)).AsInt()
class TempStick:
def __init__(self, tpointA, mousePos, tbackground, ttype):
global canvas, sticks, currentTempStick, stickThickness, camPos
self.pointA = tpointA
colour = "black"
if tbackground:
colour = "gray89"
if ttype == 1:
colour = "purple"
self.background = tbackground
self.renderObject = canvas.create_line(self.pointA.position.x - camPos.x, self.pointA.position.y - camPos.y, mousePos.x, mousePos.y, width=stickThickness, fill=colour)
currentTempStick = self
def Cleanup(self):
global currentTempStick, canvas
canvas.delete(currentTempStick.renderObject)
currentTempStick = 0
# ------------[CLASSES]------------
# ------------[UTIL CLASSES]----------
class RaycastData(object):
def __init__(self, obj, index, loc, raycast, normal):
self.obj = obj
self.objIndex = index
self.hitLoc = loc
self.raycast = raycast
self.normal = normal
def __str__(self):
return f"Raycast Data {{obj:{str(self.obj)}, loc:{str(self.hitLoc)}, raycast:{str(self.raycast)}}}"
class Raycast(object):
def __init__(self, _start, _stop):
self.start = _start
self.stop = _stop
self.dir = (_stop - _start).getNormalised()
def TracePoints(self, ignore=None):
global points, circleRadius
ray = self.stop - self.start
i = 0
for point in points:
if not point == ignore:
# Save performance by checking if point is valid for raycast
if min(Vector2D.Distance(self.start, point.position), Vector2D.Distance(self.stop, point.position)) <= Vector2D.Distance(self.start, self.stop)/2:
# Calculate delta to point
center = (self.start + self.stop) / 2
delta = point.position - center
# Project delta to ray
projected = Vector2D.Project(delta, ray)
# Clamp distance check to the ray
projected = projected.getNormalised() * min(projected.length, ray.length/2)
# Check if point distance to point on ray is smaller than circleRadius
if Vector2D.Distance(center + projected, point.position) <= circleRadius:
# Calculate normal from actual point to ray point.
normal = ((center + projected) - point.position).getNormalised()
# Calculate hit location by multiplying normal by circle radius and adding actual point position
hitLoc = (normal * circleRadius) + point.position
return RaycastData(point, i, hitLoc, self, normal)
i += 1
return None
def TraceSticks(self, ignores=None):
global sticks, circleRadius
rayDir = (self.stop - self.start).getNormalised()
i = 0
for stick in sticks:
if not stick in ignores:
rayBox = Rect(self.start.x, self.stop.x, self.start.y, self.stop.y)
stickBox = Rect(stick.pointA.position.x, stick.pointB.position.x, stick.pointA.position.y, stick.pointB.position.y)
# Save performance by doing rough intersection check
if rayBox.IsIntersecting(stickBox):
inter = Vector2D.Intersection(self.start, self.stop, stick.pointA.position, stick.pointB.position)
if inter:
stickDir = (stick.pointB.position - stick.pointA.position).getNormalised()
normal = None
stickRight = Vector2D(stickDir.y, -stickDir.x)
dot = Vector2D.DotProduct(stickRight, rayDir)
#print(dot)
if dot <= 0:
# Clockwise perpendicular
normal = stickRight
else:
# Counter-Clockwise perpendicular
normal = Vector2D(-stickDir.y, stickDir.x)
return RaycastData(stick, i, inter, self, normal)
i += 1
return None
def __str__(self):
return f"Raycast {{start:{str(self.start)}, stop:{str(self.stop)}}}"
class Rect(object):
def __init__(self, left, right, top, bottom):
self.left = min(left, right)
self.right = max(left, right)
self.top = min(top, bottom)
self.bottom = max(bottom, top)
def IsIntersecting(self, other):
return self.left <= other.right and self.right >= other.left and self.top <= other.bottom and self.bottom >= other.top
# ------------[UTIL CLASSES]----------
# ------------[UTIL FUNCTIONS]------------
def GetClosestPoint(pos):
global points, objectPoints, camPos
closest = 0
closestDist = 1000000
for point in points:
if Vector2D.Distance(pos, point.position - camPos) < closestDist:
closest = point
closestDist = Vector2D.Distance(pos, point.position - camPos)
for point in objectPoints:
if Vector2D.Distance(pos, point.position - camPos) < closestDist:
closest = point
closestDist = Vector2D.Distance(pos, point.position - camPos)
return closest
def GetClosestPointThreshold(pos, thresh):
global points, objectPoints
closest = 0
closestDist = 1000000
for point in points:
if Vector2D.Distance(pos, point.position - camPos) < closestDist and Vector2D.Distance(pos, point.position - camPos) < thresh:
closest = point
closestDist = Vector2D.Distance(pos, point.position - camPos)
for point in objectPoints:
if Vector2D.Distance(pos, point.position - camPos) < closestDist and Vector2D.Distance(pos, point.position - camPos) < thresh:
closest = point
closestDist = Vector2D.Distance(pos, point.position - camPos)
return closest
def Clear(overrideClick=False):
global statusText, canClick, camPos
if canClick or overrideClick:
canClick = False
statusText = "Clearing"
for point in points:
point.Remove()
sleep(0.1)
camPos = Vector2D.Zero()
for point in points:
canvas.delete(point.renderObject)
for point in objectPoints:
canvas.delete(point.renderObject)
points.clear()
objectPoints.clear()
for stick in sticks:
stick.Remove()
sleep(0.1)
for stick in sticks:
if hasattr(stick, 'renderObject'):
canvas.delete(stick.renderObject)
sticks.clear()
statusText = "Ready"
canClick = True
def Clamp(val, mn, mx):
if val > mx:
val = mx
if val < mn:
val = mn
return val
def CalculateMainCenter(width, height):
global window
x = window.winfo_x() + (window.winfo_width()/2) - (width/2)
y = window.winfo_y() + (window.winfo_height()/2) - (height/2)
return Vector2D(x,y)
def ToggleWindowCollision():
global windowCollide
windowCollide = not windowCollide
def StickType(stick):
typ = 0
if stick.__class__.__name__ == 'RopeStick':
typ = 1
if stick.__class__.__name__ == 'SlideStick':
typ = 2
if stick.__class__.__name__ == 'WeakStick':
typ = 3
if stick.__class__.__name__ == 'SpringyStick':
typ = 4
return typ
def StickTypeClass(classNum):
stickClass = None
if classNum == 0:
stickClass = Stick
elif classNum == 1:
stickClass = RopeStick
elif classNum == 2:
stickClass = SlideStick
elif classNum == 3:
stickClass = WeakStick
elif classNum == 4:
stickClass = SpringyStick
return stickClass
def StickTypeName(num):
stickName = ""
if num == 0:
stickName = "Fixed"
elif num == 1:
stickName = "Rope"
elif num == 2:
stickName = "Slide"
elif num == 3:
stickName = "Weak"
elif num == 4:
stickName = "Springy"
return stickName
def PointType(point):
typ = 0
if point.__class__.__name__ == 'ObjectPoint':
typ = 1
return typ
def PointTypeClass(classNum):
pointClass = None
if classNum == 0:
pointClass = Point
elif classNum == 1:
pointClass = ObjectPoint
return pointClass
def FromRGB(rgb):
return "#%02x%02x%02x" % rgb
def FLerp(a, b, t):
return a + t * (b - a)
def RGBLerp(a, b, t):
rgb = [0,0,0]
i=0
while i <= 2:
rgb[i] = Clamp(int(FLerp(a[i], b[i], t)), 0, 255)
i+=1
return (rgb[0], rgb[1], rgb[2])
def Map(x, min1, max1, min2, max2):
return (x - min1) * (max2 - min2) / (max1 - min1) + min2
# ------------[UTIL FUNCTIONS]------------
# ------------[INPUT HANDLERS]------------
def Mouse1DownHandler(event):
global leftMouseDown, window, prevPoint, heldPoint, simNow, grabPoint, canClick, camPos
if not leftMouseDown and canClick:
mouseX = int(window.winfo_pointerx()-window.winfo_rootx())
mouseY = int(window.winfo_pointery()-window.winfo_rooty())
closest = GetClosestPointThreshold(Vector2D(mouseX,mouseY), circleRadius * 5)
if closest == 0:
newPoint = Point(Vector2D(mouseX + camPos.x, mouseY + camPos.y), False)
prevPoint = newPoint
else:
if closest.locked == True or simNow == False:
heldPoint = closest
elif simNow == True:
grabPoint = Point(Vector2D(mouseX + camPos.x, mouseY + camPos.y), True, False, False)
Stick(grabPoint, closest, Vector2D.Distance(grabPoint.position, closest.position), False, False)
leftMouseDown = True
def Mouse1UpHandler(event):
global leftMouseDown, heldPoint, grabPoint, canClick
if canClick:
if simNow == False and not heldPoint == 0:
heldPoint.previousPosition = heldPoint.position
refIndex = 0
referencesCopy = heldPoint.references.copy()
while refIndex < len(referencesCopy):
referencesCopy[refIndex].length = Vector2D.Distance(referencesCopy[refIndex].pointA.position, referencesCopy[refIndex].pointB.position)
refIndex += 1
if not grabPoint == 0:
grabPoint.Remove()
grabPoint = 0
heldPoint = 0
leftMouseDown = False
def Mouse2DownHandler(event, shift=False, alt=False):
global rightMouseDown, window, prevPoint, shiftHeld, canClick, selectedStick, dragDeleting, lastMousePos
stickType = 0
if shift:
stickType = 1
if not rightMouseDown and canClick and not alt:
mouseX = int(window.winfo_pointerx()-window.winfo_rootx())
mouseY = int(window.winfo_pointery()-window.winfo_rooty())
closest = GetClosestPoint(Vector2D(mouseX,mouseY))
TempStick(closest, Vector2D(mouseX,mouseY), shift, selectedStick)
if not rightMouseDown and canClick and alt:
dragDeleting = True
mouseX = int(window.winfo_pointerx()-window.winfo_rootx())
mouseY = int(window.winfo_pointery()-window.winfo_rooty())
lastMousePos = Vector2D(mouseX,mouseY)
rightMouseDown = True
def Mouse2UpHandler(event, shift=False, alt=False):
global rightMouseDown, currentTempStick, shiftHeld, canClick, dragDeleting
if canClick:
mouseX = int(window.winfo_pointerx()-window.winfo_rootx())
mouseY = int(window.winfo_pointery()-window.winfo_rooty())
closest = GetClosestPoint(Vector2D(mouseX,mouseY))
if currentTempStick:
if not closest == currentTempStick.pointA:
stickClass = None
stickClass = StickTypeClass(selectedStick)
newStick = stickClass(currentTempStick.pointA, closest, Vector2D.Distance(currentTempStick.pointA.position, closest.position), currentTempStick.background)
dragDeleting = False
if currentTempStick:
currentTempStick.Cleanup()
rightMouseDown = False
def MiddleMouseDownHandler(event):
global middleMouseDown, lastMousePos
middleMouseDown = True
mouseX = int(window.winfo_pointerx()-window.winfo_rootx())
mouseY = int(window.winfo_pointery()-window.winfo_rooty())
lastMousePos = Vector2D(mouseX,mouseY)
def MiddleMouseUpHandler(event):
global middleMouseDown
middleMouseDown = False
def ShiftDownHandler(event):
Mouse2DownHandler(event, True)
def ShiftUpHandler(event):
Mouse2UpHandler(event, True)
def AltDownHandler(event):
Mouse2DownHandler(event, False, True)
def AltUpHandler(event):
Mouse2UpHandler(event, False, True)
# ----[SIMULATION RESET]----
def SpaceHandler(event=None):
# WORST FUNCTION. I had to just keep adding random clears and stuff for it to actually clear. I dont know why though because the point class isnt printing or showing errors. This works for now i guess...
global canClick
if canClick:
global simNow, pauseSim, pointsBeforeSim, points, sticksBeforeSim, sticks, canvas, pauseSim, statusText, objectPointsBeforeSim, objectPoints
simNow = not simNow
pauseSim = False
if simNow:
statusText = "Simulating"
pointsBeforeSim.clear()
pointIndex = 0
while pointIndex < len(points):
if points[pointIndex].save:
pointsBeforeSim.append(Point(points[pointIndex].position, points[pointIndex].locked, False))
pointIndex +=1
sticksBeforeSim.clear()
stickIndex = 0
while stickIndex < len(sticks):
if sticks[stickIndex].save:
stickType = StickType(sticks[stickIndex])
pointAIndex = (points+objectPoints).index(sticks[stickIndex].pointA)
pointBIndex = (points+objectPoints).index(sticks[stickIndex].pointB)
sticksBeforeSim.append(Stick(pointAIndex, pointBIndex, sticks[stickIndex].length, sticks[stickIndex].background, False, True, False, stickType))
stickIndex += 1
objectPointsBeforeSim.clear()
objectPointIndex = 0
while objectPointIndex < len(objectPoints):
if objectPoints[objectPointIndex].save:
objectPoint = objectPoints[objectPointIndex]
objectPointsBeforeSim.append(ObjectPoint(objectPoint.position, objectPoint.locked, False, False, False, sticks.index(objectPoint.owner)))
objectPointIndex += 1
else:
if not pauseSim:
canClick = False
statusText = "Restoring"
Render()
Clear(True)
pointBeforeIndex = 0
while pointBeforeIndex < len(pointsBeforeSim):
points.append(Point(pointsBeforeSim[pointBeforeIndex].position, pointsBeforeSim[pointBeforeIndex].locked, True, False))
pointBeforeIndex += 1
percent = ((pointBeforeIndex) / (len(pointsBeforeSim) + len(sticksBeforeSim) + len(objectPointsBeforeSim)))*100
statusText = "Restoring " + str(int(percent)) + "%"
statusBar['text'] = statusText
window.update()
objectPointBeforeIndex = 0
while objectPointBeforeIndex < len(objectPointsBeforeSim):
objectPoint = objectPointsBeforeSim[objectPointBeforeIndex]
newObjectPoint = ObjectPoint(objectPoint.position, objectPoint.locked, True, True, True, objectPoint.owner, True)
objectPointBeforeIndex += 1
percent = ((len(pointsBeforeSim) + objectPointBeforeIndex) / (len(pointsBeforeSim) + len(sticksBeforeSim) + len(objectPointsBeforeSim)))*100
statusText = "Restoring " + str(int(percent)) + "%"
statusBar['text'] = statusText
window.update()
stickBeforeIndex = 0
while stickBeforeIndex < len(sticksBeforeSim):
stickClass = None
stickType = sticksBeforeSim[stickBeforeIndex].stickType
stickClass = StickTypeClass(stickType)
combined = points+objectPoints
stickClass(combined[sticksBeforeSim[stickBeforeIndex].pointA], combined[sticksBeforeSim[stickBeforeIndex].pointB], sticksBeforeSim[stickBeforeIndex].length, sticksBeforeSim[stickBeforeIndex].background)
stickBeforeIndex += 1
percent = ((stickBeforeIndex + len(pointsBeforeSim) + len(objectPointsBeforeSim)) / (len(pointsBeforeSim) + len(sticksBeforeSim) + len(objectPointsBeforeSim)))*100
statusText = "Restoring " + str(int(percent)) + "%"
statusBar['text'] = statusText
window.update()
for objectPoint in objectPoints:
if objectPoint.newlySpawned == True:
sticks[objectPoint.owner].ChangeMiddlePoint(objectPoint)
objectPoint.newlySpawned = False
statusText = "Ready"
canClick = True
# ----[SIMULATION RESET]-----
def LockHandler(event):
global canClick
if canClick:
mouseX = int(window.winfo_pointerx()-window.winfo_rootx())
mouseY = int(window.winfo_pointery()-window.winfo_rooty())
closest = GetClosestPoint(Vector2D(mouseX,mouseY))
closest.ToggleLock()
def DeleteHandler(event):
global heldPoint, canClick
if canClick:
mouseX = int(window.winfo_pointerx()-window.winfo_rootx())
mouseY = int(window.winfo_pointery()-window.winfo_rooty())
closest = GetClosestPoint(Vector2D(mouseX,mouseY))
heldPoint = 0
closest.Remove()
def GridSpawnHandler(event):
global canClick
if canClick:
canClick = False
#Spawns a connected grid
mouseX = int(window.winfo_pointerx()-window.winfo_rootx())
mouseY = int(window.winfo_pointery()-window.winfo_rooty())
previousYPoints = []
xIndex = 0
for xIndex in range(gridX):
currentYPoints = []
yIndex = 0
for yIndex in range(gridY):
currentYPoints.append(Point(Vector2D(mouseX + (xIndex*60), mouseY + (yIndex*60)), False))
Render()
stickClass = StickTypeClass(selectedStick)
if not yIndex == 0:
stickClass(currentYPoints[yIndex], currentYPoints[yIndex-1], Vector2D.Distance(currentYPoints[yIndex].position, currentYPoints[yIndex-1].position), False)
Render()
if not xIndex == 0:
stickClass(currentYPoints[yIndex], previousYPoints[yIndex], Vector2D.Distance(currentYPoints[yIndex].position, previousYPoints[yIndex].position), False)
Render()
previousYPoints = currentYPoints.copy()
currentYPoints.clear()
canClick = True
def PauseHandler(event):
global pauseSim, simNow, statusText, canClick
if canClick:
if simNow:
pauseSim = not pauseSim
if pauseSim:
statusText = "Paused"
else:
statusText = "Simulating"
def NewFile(contin=False, prompt=False):
global currentFile, gravity, numIterations, points
if not currentFile == "":
if not contin:
if prompt:
SavePrompt(NewFile)
else:
Clear()
currentFile = ""
gravity = 2000
numIterations = 2
else:
if len(points) > 0:
if not contin:
if prompt:
SavePrompt(NewFile)
else:
Clear()
currentFile = ""
gravity = 2000
numIterations = 2
else:
Clear()
currentFile = ""
gravity = 2000
numIterations = 2
def NewFileInst():
NewFile(False, True)
def CloseSave(contin=False, prompt=False):
global currentFile, window
if not currentFile == "":
if not contin:
if prompt:
SavePrompt(CloseSave)
else:
window.destroy()
os._exit(0)
else:
if len(points) > 0:
if not contin:
if prompt:
SavePrompt(CloseSave)
else:
window.destroy()
os._exit(0)
else:
window.destroy()
os._exit(0)
else:
window.destroy()
os._exit(0)
def CloseSaveInst():
CloseSave(False, True)
# ------------[INPUT HANDLERS]------------
# ------------[LOADING]------------
def SaveToFile(event=None, useCurrent=True, returnFunc=None):
global simNow, points, sticks, statusText, canClick, gravity, numIterations, currentFile
if canClick or returnFunc:
if not simNow:
canClick = False
path = os.getcwd()+'/Maps/'
if not os.path.exists(path):
os.mkdir(path)
file = None
if currentFile == "" or not useCurrent:
file = tk.filedialog.asksaveasfile(mode="w", filetypes=[('phys', '*.phys')], defaultextension=[('*.phys')], initialdir=path)
else:
file = open(currentFile, 'w')
if file:
currentFile = file.name
statusText = "Saving"
data = []
for point in points:
if point.save:
data.append(point.Parse()+'\n')
percent = ((points.index(point)) / (len(points) + len(sticks) + len(objectPoints)))*100
statusText = "Saving " + str(int(percent)) + "%"
Render()
data.append('=\n')
for stick in sticks:
if stick.save:
data.append(stick.Parse()+ ',' + str(StickType(stick)) + '\n')
percent = ((sticks.index(stick) + len(points)) / (len(points) + len(sticks) + len(objectPoints)))*100
statusText = "Saving " + str(int(percent)) + "%"
Render()
data.append('=\n')
for objectPoint in objectPoints:
if objectPoint.save:
data.append(objectPoint.Parse() + '\n')
percent = ((objectPoints.index(objectPoint) + len(sticks) + len(points)) / (len(points) + len(sticks) + len(objectPoints)))*100
statusText = "Saving " + str(int(percent)) + "%"
Render()
data.append('=\n')
data.append(str(gravity) + "," + str(numIterations))
file.writelines(data)
file.close()
statusText = "Ready"
if returnFunc:
returnFunc()
canClick = True
def SaveToFileNoCurrent():
SaveToFile(None, False)
def LoadFromFile(event=None):
global points, sticks, simNow, pauseSim, statusText, canClick, gravity, numIterations, currentFile, objectPoints, camPos
if canClick:
simNow = False
pauseSim = False
canClick = False
Clear(True)
pointsOffset = 0 #len(points+objectPoints)
path = os.getcwd()+'/Maps/'
if not os.path.exists(path):
os.mkdir(path)
file = tk.filedialog.askopenfile(mode="r", filetypes=[('phys', '*.phys')], defaultextension=[('*.phys')], initialdir=path)
if file:
statusText = "Loading"
currentFile = file.name
data = file.read()
segments = data.split('=')
pointList = segments[0].split('\n')
stickList = segments[1].split('\n')
objectPointList = segments[2].split('\n')
total = len(pointList) + len(stickList) + len(objectPointList)
for pointDataChunk in pointList:
pointData = pointDataChunk.split(',')
#print(pointData)
if len(pointData) == 3:
Point(Vector2D(int(pointData[0]), int(pointData[1])), bool(int(pointData[2])))
percent = ((pointList.index(pointDataChunk)) / (total))*100
statusText = "Loading " + str(int(percent)) + "%"
statusBar['text'] = statusText
window.update()
for objectPointDataChunk in objectPointList:
objectPointData = objectPointDataChunk.split(',')
if len(objectPointData) >= 3:
ObjectPoint(Vector2D(int(objectPointData[0]), int(objectPointData[1])), bool(int(objectPointData[2])), True, True, True, int(objectPointData[3]), True)
percent = ((objectPointList.index(objectPointDataChunk)+len(pointList)) / (total))*100
statusText = "Loading " + str(int(percent)) + "%"
statusBar['text'] = statusText
window.update()
for stickDataChunk in stickList:
stickData = stickDataChunk.split(',')
#print(stickData)
if len(stickData) == 5:
stickClass = StickTypeClass(int(stickData[4]))
combined = points+objectPoints
stickClass(combined[int(stickData[0])+pointsOffset], combined[int(stickData[1])+pointsOffset], float(stickData[2]), bool(int(stickData[3])))
percent = ((stickList.index(stickDataChunk)+len(pointList)+len(objectPointList)) / (total))*100
statusText = "Loading " + str(int(percent)) + "%"
statusBar['text'] = statusText
window.update()
for objectPoint in objectPoints:
if objectPoint.newlySpawned == True:
sticks[objectPoint.owner].ChangeMiddlePoint(objectPoint)
objectPoint.newlySpawned = False
settings = segments[3].split(',')
gravity = float(settings[0])
numIterations = int(settings[1])
canClick = True
statusText = "Ready"
# ------------[LOADING]------------
def SelectStick1(event):
global selectedStick
selectedStick = 0
def SelectStick2(event):
global selectedStick
selectedStick = 1
def SelectStick3(event):
global selectedStick
selectedStick = 2
def SelectStick4(event):
global selectedStick
selectedStick = 3
def SelectStick5(event):
global selectedStick
selectedStick = 4
# ------------[BINDS]------------
platform.system()
rightClickNum = "3"
altModifier = "Alt"
onMac = False
if platform.system() == 'Darwin':
rightClickNum = "2"
altModifier = "Option"
window.bind("<Control-ButtonPress-2>", MiddleMouseDownHandler)
window.bind("<Control-ButtonRelease-2>", MiddleMouseUpHandler)
onMac = True
window.bind("<ButtonPress-1>", Mouse1DownHandler)
window.bind("<ButtonRelease-1>", Mouse1UpHandler)
window.bind("<ButtonPress-" + rightClickNum + ">", Mouse2DownHandler)
window.bind("<ButtonRelease-" + rightClickNum + ">", Mouse2UpHandler)
if not onMac:
window.bind("<ButtonPress-2>", MiddleMouseDownHandler)
window.bind("<ButtonRelease-2>", MiddleMouseUpHandler)
window.bind("<space>", SpaceHandler)
window.bind("<Return>", LockHandler)
window.bind("r", DeleteHandler)
window.bind("g", GridSpawnHandler)
window.bind("p", PauseHandler)
window.bind("<Shift-ButtonPress-3>", ShiftDownHandler)
window.bind("<Shift-ButtonRelease-3>", ShiftUpHandler)
window.bind("<" + altModifier + "-ButtonPress-" + rightClickNum + ">", AltDownHandler)
window.bind("<" + altModifier + "-ButtonRelease-" + rightClickNum + ">", AltUpHandler)
window.bind("<Control-s>", SaveToFile)
window.bind("<Control-Shift-s>", SaveToFileNoCurrent)
window.bind("<Control-o>", LoadFromFile)
window.bind("<Control-n>", NewFileInst)
window.bind("1", SelectStick1)
window.bind("2", SelectStick2)
window.bind("3", SelectStick3)
window.bind("4", SelectStick4)
window.bind("5", SelectStick5)
# ------------[BINDS]------------
# ------------[SIMULATION]------------
def Simulate():
global points, objectPoints, sticks, lastFrameTime, numIterations, windowCollide
for point in points:
point.Simulate()
for point in objectPoints:
point.Simulate()
# Run through iterations to get physics to settle
for i in range(numIterations):
for stick in sticks:
stick.Simulate()
# ------------[SIMULATION]------------
# ------------[INTERACT]------------
def Interact():
global heldPoint, grabPoint, dragDeleting, lastMousePos, camPos
if not heldPoint == 0:
mouseX = int(window.winfo_pointerx()-window.winfo_rootx())
mouseY = int(window.winfo_pointery()-window.winfo_rooty())
heldPoint.position = Point.SnapPosition(Vector2D(mouseX + camPos.x, mouseY + camPos.y))
if not simNow:
for ref in heldPoint.references:
if ref.__class__.__name__ == "SlideStick":
ref.CalcMiddlePoint()
if not grabPoint == 0:
mouseX = int(window.winfo_pointerx()-window.winfo_rootx())
mouseY = int(window.winfo_pointery()-window.winfo_rooty())
grabPoint.position = Vector2D(mouseX + camPos.x, mouseY + camPos.y)
if dragDeleting:
mouseX = int(window.winfo_pointerx()-window.winfo_rootx())
mouseY = int(window.winfo_pointery()-window.winfo_rooty())
for stick in sticks:
if Vector2D.isIntersecting(lastMousePos, Vector2D(mouseX,mouseY), (stick.pointA.position - camPos), (stick.pointB.position - camPos)):
stick.Remove()
lastMousePos = Vector2D(mouseX,mouseY)
if middleMouseDown:
mouseX = int(window.winfo_pointerx()-window.winfo_rootx())
mouseY = int(window.winfo_pointery()-window.winfo_rooty())
camPos.x += lastMousePos.x - mouseX
camPos.y += lastMousePos.y - mouseY
lastMousePos = Vector2D(mouseX,mouseY)
# ------------[INTERACT]------------
# ------------[RENDER]------------
def Render():
global canvas, fpsText, lastFrameTime, currentTempStick, statusBar, statusText, window, currentFile, objectPoints, sticks, camPos
# Update each point and stick's location
for stick in sticks:
if hasattr(stick, 'renderObject'):
stick.Render()
canvas.coords(stick.renderObject, stick.pointA.position.x - camPos.x, stick.pointA.position.y - camPos.y, stick.pointB.position.x - camPos.x, stick.pointB.position.y - camPos.y)
for point in points:
canvas.coords(point.renderObject, point.position.x-circleRadius - camPos.x, point.position.y-circleRadius - camPos.y, point.position.x+circleRadius - camPos.x, point.position.y+circleRadius - camPos.y)
for point in objectPoints:
canvas.coords(point.renderObject, point.position.x-circleRadius - camPos.x, point.position.y-circleRadius - camPos.y, point.position.x+circleRadius - camPos.x, point.position.y+circleRadius - camPos.y)
# Update Statusbar
statusBar['text'] = statusText
mouseX = int(window.winfo_pointerx()-window.winfo_rootx())
mouseY = int(window.winfo_pointery()-window.winfo_rooty())
# Update temp stick if it exists
if not currentTempStick == 0:
canvas.coords(currentTempStick.renderObject, currentTempStick.pointA.position.x - camPos.x, currentTempStick.pointA.position.y - camPos.y, mouseX, mouseY)
# Update FPS Counter
canvas.itemconfigure(fpsText, text="FPS: " + str(math.floor((1/(max((time.time())-lastFrameTime,1/120))))) + " - Camera X: " + str(camPos.x) + ", Y: " + str(-camPos.y) + " - Mouse X: " + str(mouseX) + ", Y: " + str(mouseY))
canvas.itemconfigure(selectedStickText, text="Selected Joint Type (Numbers): " + StickTypeName(selectedStick))
# Update Title Bar
title = "TKinter Physics Sim - V2"
if not currentFile == "":
title += " - " + currentFile
window.title(title)
# Draw
window.update()
# ------------[RENDER]------------
# ------------[GUI FUNCTIONS]------------
def SimParamsEnter():
global grav, iters, gravity, numIterations, simparampopup, canClick, weakstrength, weakStickStrength
canClick = True
try:
simparampopup.destroy()
gravity = float(grav.get())
numIterations = int(iters.get())
weakStickStrength = int(weakstrength.get())
except Exception as e: print(e)
def GridParamsEnter():
global grav, gridx, gridy, gridX, gridY, gridparampopup, canClick
canClick = True
try:
gridparampopup.destroy()
gridX = int(gridx.get())
gridY = int(gridy.get())
except Exception as e: print(e)
def SnapParamsEnter():
global snapresolution, snapResolution, canClick, snapparampopup
canClick = True
try:
snapparampopup.destroy()
snapResolution = int(snapresolution.get())
except Exception as e: print(e)
def SnapParamsResolutionDefault():
global snapresolution, snapResolution
snapresolution.set('1')
snapResolution = 1
def SimParamsGravDefault():
global grav, gravity
grav.set('2000')
gravity = 2000
def SimParamsNumItersDefault():
global iters, numIterations
iters.set('2')
numIterations = 2
def SimParamsWeakStrengthDefault():
global weakstrength, weakStickStrength
weakstrength.set('25')
weakStickStrength = 25
def ControlsLoseFocus(event):
global controlsPopup
controlsPopup.focus_force()
def SavePromptSave():
global savepromptreturn, savepromptpopup
savepromptpopup.destroy()
SaveToFile(None, True, SavePromptSaveFinished)
def SavePromptSaveFinished():
global savepromptreturn
SavePrompt(savepromptreturn, True, True)
def SavePromptNoSave():
global savepromptreturn
SavePrompt(savepromptreturn, True, True)
def SavePromptCancel():
global savepromptreturn
SavePrompt(savepromptreturn, True, False)
# ------------[GUI FUNCTIONS]------------
# ------------[POPUPS]------------
def SavePrompt(returnFunc, returnNow=False, contin=False):
global savepromptreturn, savepromptpopup, canClick
savepromptreturn = returnFunc
if not returnNow:
canClick = False
global window
savepromptpopup = tk.Tk()
savepromptpopup.resizable(False, False)
#savepromptpopup.overrideredirect(True)
width=250
height=100
center = CalculateMainCenter(width, height)
savepromptpopup.geometry('%dx%d+%d+%d' % (width, height, center.x, center.y))
savepromptpopup.wm_title("Alert")
label = ttk.Label(savepromptpopup, text="You will lose your work if you dont save!")
label.pack(side="top", expand=True, fill="none", pady=15)
save = ttk.Button(savepromptpopup, text="Save", command=SavePromptSave)
save.pack(side="left", expand=True, fill="none", pady=(0, 5))
dontsave = ttk.Button(savepromptpopup, text="Don't Save", command=SavePromptNoSave)
dontsave.pack(side="left", expand=True, fill="none", pady=(0, 5))
cancel = ttk.Button(savepromptpopup, text="Cancel", command=SavePromptCancel)
cancel.pack(side="left", expand=True, fill="none", pady=(0, 5))
savepromptpopup.protocol('WM_DELETE_WINDOW', SavePromptCancel)
else:
canClick = True
returnFunc(contin)
def InfoWindow():
global window
popup = tk.Tk()
popup.resizable(False, False)
center = CalculateMainCenter(260, 100)
popup.geometry('%dx%d+%d+%d' % (260, 100, center.x, center.y))
popup.wm_title("About")
label = ttk.Label(popup, text="TKinter-based Physics Simulator. Written by Oxi.")
label.pack(side="top", fill="x", pady=20)
B1 = ttk.Button(popup, text="Okay", command = popup.destroy)
B1.pack()
def SnapParamsWindow():
global window, snapparampopup, snapresolution, snapResolution
snapparampopup = tk.Tk()
snapparampopup.resizable(False, False)
#popup.overrideredirect(True)
width=215
height=60
center = CalculateMainCenter(width, height)
snapparampopup.geometry('%dx%d+%d+%d' % (width, height, center.x, center.y))
snapparampopup.wm_title("Snap Params")
snapresolution = tk.StringVar(snapparampopup, value=str(snapResolution))
tk.Label(snapparampopup, text="Grid Size:").grid(row=0, column=0)
tk.Entry(snapparampopup, textvariable=snapresolution, width=10).grid(row=0, column=1)
resolutionButton = ttk.Button(snapparampopup, text="<", command=SnapParamsResolutionDefault, width=3)
resolutionButton.grid(row=0, column=2)
button = ttk.Button(snapparampopup, text="Save", command=SnapParamsEnter)
button.grid(row=1, column=2)
snapparampopup.protocol('WM_DELETE_WINDOW', SnapParamsEnter)
def SimParamsWindow():
global window, gravity, numIterations, grav, iters, simparampopup, weakstrength, weakStickStength
simparampopup = tk.Tk()
simparampopup.resizable(False, False)
#popup.overrideredirect(True)
width=300
height=100
center = CalculateMainCenter(width, height)
simparampopup.geometry('%dx%d+%d+%d' % (width, height, center.x, center.y))
simparampopup.wm_title("Sim Params")
grav = tk.StringVar(simparampopup, value=str(gravity))
iters = tk.StringVar(simparampopup, value=str(numIterations))
weakstrength = tk.StringVar(simparampopup, value=str(weakStickStrength))
tk.Label(simparampopup, text="Gravity:").grid(row=0, column=0)
tk.Label(simparampopup, text="Iterations:").grid(row=1, column=0)
tk.Label(simparampopup, text="Weak-Stick Max Stretch:").grid(row=2, column=0)
tk.Entry(simparampopup, textvariable=grav).grid(row=0, column=1)
tk.Entry(simparampopup, textvariable=iters).grid(row=1, column=1)
tk.Entry(simparampopup, textvariable=weakstrength).grid(row=2, column=1)
gravButton = ttk.Button(simparampopup, text="<", command=SimParamsGravDefault, width=3)
gravButton.grid(row=0, column=2)
itersButton = ttk.Button(simparampopup, text="<", command=SimParamsNumItersDefault, width=3)
itersButton.grid(row=1, column=2)
strengthButton = ttk.Button(simparampopup, text="<", command=SimParamsWeakStrengthDefault, width=3)
strengthButton.grid(row=2, column=2)
button = ttk.Button(simparampopup, text="Save", command=SimParamsEnter)
button.grid(row=3, column=1)
simparampopup.protocol('WM_DELETE_WINDOW', SimParamsEnter)
def GridParamsWindow():
global window, gridx, gridy, gridX, gridY, gridparampopup
gridparampopup = tk.Tk()
gridparampopup.resizable(False, False)
#popup.overrideredirect(True)
width=215
height=60
center = CalculateMainCenter(width, height)
gridparampopup.geometry('%dx%d+%d+%d' % (width, height, center.x, center.y))
gridparampopup.wm_title("Grid Params")
gridx = tk.StringVar(gridparampopup, value=str(gridX))
gridy = tk.StringVar(gridparampopup, value=str(gridY))
tk.Label(gridparampopup, text="Amount:").grid(row=0, column=0)
tk.Entry(gridparampopup, textvariable=gridx, width=10).grid(row=0, column=1)
tk.Entry(gridparampopup, textvariable=gridy, width=10).grid(row=0, column=2)
button = ttk.Button(gridparampopup, text="Save", command=GridParamsEnter)
button.grid(row=1, column=2)
gridparampopup.protocol('WM_DELETE_WINDOW', GridParamsEnter)
def ControlsWindow():
global window, controlsPopup, canClick
canClick = False
controlsPopup = tk.Tk()
controlsPopup.resizable(False, False)
#controlsPopup.overrideredirect(True)
width=325
height=390
center = CalculateMainCenter(width, height)
controlsPopup.geometry('%dx%d+%d+%d' % (width, height, center.x, center.y))
controlsPopup.wm_title("Welcome")
label = tk.Label(controlsPopup, text="TKinter Physics Sim v2 - Written by Oxi \n \n Controls: \n Click in empty space - Spawn Point \n Right click and drag from a point to another - Join Points \n \n Enter while hovering over point - Lock Point \n \n 1/2/3/4 - Select join type \n\n R - Delete closest point \n Alt + Right Click Drag - Slice joints \n \n G - Spawn Configurable Grid \n \n Space - Start/Stop Simulation \n P - Pause \n \n CTRL+S - Save \n CTRL+O - Open")
label.pack(side="top", fill="x", pady=20)
button = ttk.Button(controlsPopup, text="Continue", command=controlsPopup.destroy)
button.pack()
controlsPopup.bind("<FocusOut>", ControlsLoseFocus)
controlsPopup.attributes("-topmost", True)
controlsPopup.focus_force()
canClick = True
# ------------[POPUPS]------------
# ------------[GUI MENUBAR]------------
menubar = tk.Menu(window)
filemenu = tk.Menu(menubar, tearoff=0)
filemenu.add_command(label="New", command=NewFileInst)
filemenu.add_separator()
filemenu.add_command(label="Open", command=LoadFromFile)
filemenu.add_separator()
filemenu.add_command(label="Save", command=SaveToFile)
filemenu.add_command(label="Save As..", command=SaveToFileNoCurrent)
filemenu.add_separator()
filemenu.add_command(label="Exit", command=CloseSaveInst)
menubar.add_cascade(label="File", menu=filemenu)
editmenu = tk.Menu(menubar, tearoff=0)
editmenu.add_command(label="Clear", command=Clear)
menubar.add_cascade(label="Edit", menu=editmenu)
simmenu = tk.Menu(menubar, tearoff=0)
simmenu.add_command(label="Start/Stop Simulation", command=SpaceHandler)
menubar.add_cascade(label="Simulation", menu=simmenu)
settingsmenu = tk.Menu(menubar, tearoff=0)
settingsmenu.add_command(label="Simulation Parameters", command=SimParamsWindow)
settingsmenu.add_command(label="Grid Parameters", command=GridParamsWindow)
settingsmenu.add_command(label="Snap Parameters", command=SnapParamsWindow)
settingsmenu.add_separator()
settingsmenu.add_command(label="Toggle Window Collision", command=ToggleWindowCollision)
menubar.add_cascade(label="Settings", menu=settingsmenu)
helpmenu = tk.Menu(menubar, tearoff=0)
helpmenu.add_command(label="Controls", command=ControlsWindow)
menubar.add_cascade(label="Help", menu=helpmenu)
window.config(menu=menubar)
# ------------[GUI MENUBAR]------------
statusBar = tk.Label(window, text="Ready", bd=1, relief=tk.SUNKEN, anchor=tk.W)
statusBar.pack(side=tk.BOTTOM, fill=tk.X)
fpsText = canvas.create_text(15, 15, fill="black", text="0", anchor="w")
selectedStickText = canvas.create_text(15, 33, fill="black", text="Current Stick: ", anchor="w")
Render()
ControlsWindow()
window.protocol('WM_DELETE_WINDOW', CloseSaveInst)
lastFrameTime = (time.time())
# MAIN LOOP
while True:
startRenderTime = time.time()
if simNow and not pauseSim:
Simulate()
Interact()
Render()
#temp = RaycastPoints(Vector2D(100, 500), Vector2D(500, 500))
#if temp:
# print(f"Test Ray hit {random.randrange(0,10)}")
# Target 120 fps. If update took longer, remove from delay time, so frames stay consistent
frameTime = (time.time() - startRenderTime)
sleepTime = max(0, (1/120) - frameTime)
lastFrameTime = (time.time())
sleep(sleepTime)
# MAIN LOOP
|
py | 7dfc0888737681d332fcebc91300506205719b9c |
import sys
import signal
from importlib import import_module
_signames = dict((getattr(signal, signame), signame)
for signame in dir(signal)
if signame.startswith('SIG') and '_' not in signame)
def signal_name(signum):
try:
if sys.version_info[:2] >= (3, 5):
return signal.Signals(signum).name
else:
return _signames[signum]
except KeyError:
return 'SIG_UNKNOWN'
except ValueError:
return 'SIG_UNKNOWN'
def load_object(path):
try:
dot = path.rindex('.')
except ValueError:
raise ValueError("Error loading object '%s': not a full path" % path)
module, name = path[:dot], path[dot + 1:]
mod = import_module(module)
try:
obj = getattr(mod, name)
except AttributeError:
raise NameError("Module '%s' doesn't define any object named '%s'" % (module, name))
return obj
# 下载IP对应的地理信息(国家, 地区)
def update_geoip_db():
print('The update in progress, please waite for a while...')
filename = 'GeoLite2-City.tar.gz'
local_file = os.path.join(DATA_DIR, filename)
city_db = os.path.join(DATA_DIR, 'GeoLite2-City.mmdb')
url = 'http://geolite.maxmind.com/download/geoip/database/%s' % filename
urllib.request.urlretrieve(url, local_file)
tmp_dir = tempfile.gettempdir()
with tarfile.open(name=local_file, mode='r:gz') as tf:
for tar_info in tf.getmembers():
if tar_info.name.endswith('.mmdb'):
tf.extract(tar_info, tmp_dir)
tmp_path = os.path.join(tmp_dir, tar_info.name)
shutil.move(tmp_path, city_db)
os.remove(local_file)
if os.path.exists(city_db):
print(
'The GeoLite2-City DB successfully downloaded and now you '
'have access to detailed geolocation information of the proxy.'
)
else:
print('Something went wrong, please try again later.')
|
py | 7dfc0ac5ed77e05430115a77b23b297452ad6ee4 | from minatar import Environment
from utils.modules import Conv2d_MinAtar, MLP, NetworkGlue
from utils.replay import Replay
from utils.commons import to_numpy, get_state
import torch.nn as nn, torch, numpy as np, torch.optim as optim, torch.nn.functional as f, os, time, random
from utils.recorder import Recoder
class BaseDQN:
def __init__(self, opts):
self.opts = opts
self.env_name = opts.env_nm
self.agent_name = opts.agent_nm
self.max_episode_steps = opts.num_frames
self.device = opts.device
self.batch_size = opts.batch_size
self.discount = opts.discount
self.gradient_clip = opts.gradient_clip
# env
self.env = Environment(self.env_name)
self.action_size = self.env.num_actions()
self.state_shape = self.env.state_shape()
self.state_size = self.get_state_size()
self.history_length = self.state_shape[2]
# network
self.input_type = opts.input_type
self.layer_dims = [opts.feature_dim] + opts.hidden_layers + [self.action_size]
self.Q_net = [None]
self.Q_net[0] = self.creatNN(self.input_type).to(self.device)
# optimizer
self.optimizer = [None]
self.optimizer[0] = optim.RMSprop(self.Q_net[0].parameters(), lr=opts.step_size, alpha=opts.squared_grad_momentum, centered=True, eps=opts.min_squared_grad)
# normalizer
self.state_normalizer = lambda x: x
self.reward_normalizer = lambda x: x
# replay buffer
self.replay_buffer = Replay(opts.replay_buffer_size, self.batch_size, self.device)
# update
self.loss = f.smooth_l1_loss
self.update_Q_net_index = 0
self.sgd_update_frequency = opts.training_freq
def get_state_size(self):
return int(np.prod(self.state_shape))
def creatNN(self, input_type):
feature_net = Conv2d_MinAtar(in_channels=self.history_length, feature_dim=self.layer_dims[0])
value_net = MLP(layer_dims=self.layer_dims, hidden_activation=nn.ReLU())
NN = NetworkGlue(feature_net, value_net)
return NN
def comput_q(self, states, actions):
actions = actions.long()
q = self.Q_net[self.update_Q_net_index](states).gather(1, actions).squeeze()
return q
def compute_q_target(self, next_states, rewards, dones):
q_next = self.Q_net[0](next_states).detach().max(1)[0]
q_target = rewards + self.discount * q_next * (1 - dones)
return q_target
def learn(self):
states, actions, next_states, rewards, dones = self.replay_buffer.sample()
# Compute q target
q_target = self.compute_q_target(next_states, rewards, dones)
# Compute q
q = self.comput_q(states, actions)
# Take an optimization step
loss = self.loss(q, q_target)
self.optimizer[self.update_Q_net_index].zero_grad()
loss.backward()
if self.gradient_clip > 0:
nn.utils.clip_grad_norm_(self.Q_net[self.update_Q_net_index].parameters(), self.gradient_clip)
self.optimizer[self.update_Q_net_index].step()
def save_experience(self, state, action, next_state, reward, done):
# Saves recent experience to replay buffer
experience = [state, action, next_state, reward, done]
self.replay_buffer.add([experience])
def get_action_selection_q_values(self, state):
q_values = self.Q_net[0](state)
q_values = to_numpy(q_values).flatten()
return q_values
def get_action(self, state, is_test=False):
if not is_test:
if self.step_count < self.opts.replay_start_size:
action = random.randrange(self.action_size)
else:
epsilon = self.opts.end_epsilon if self.step_count - self.opts.replay_start_size >= self.opts.first_n_frames \
else ((self.opts.end_epsilon - self.opts.epsilon) / self.opts.first_n_frames) * (self.step_count - self.opts.replay_start_size) + self.opts.epsilon
if np.random.binomial(1, epsilon) == 1:
action = random.randrange(self.action_size)
else:
with torch.no_grad():
q_values = self.get_action_selection_q_values(state)
action = np.argmax(q_values)
else:
with torch.no_grad():
q_values = self.get_action_selection_q_values(state)
action = np.argmax(q_values)
return action
def evaluation(self):
def max_q(state):
return self.Q_net[0](state).max(1)[0].item()
scores = []
max_qs, real_qs = [], []
env = Environment(self.opts.env_nm)
for seed in range(self.opts.n_eval_episodes):
# env = Environment(self.opts.env_nm, random_seed=10*seed)
env.reset()
state = get_state(env.state())
score, done = 0., False
max_qs.append(max_q(state))
discount_score, t = 0., 0
while not done:
action = self.get_action(state, is_test=True)
reward, done = env.act(action)
reward = reward.item() if not isinstance(reward, int) else reward
score += reward
discount_score += (self.opts.discount ** t) * reward
t += 1
state = get_state(env.state())
scores.append(score)
real_qs.append(discount_score)
print("timesteps %d, mean score %.4f, mean max_q %.4f, real_q %.4f" % (
self.step_count, np.mean(scores), np.mean(max_qs), np.mean(real_qs)))
return np.asarray(scores), np.asarray(max_qs), np.asarray(real_qs)
def run_steps(self):
# Set initial values
data_return, frame_stamp, avg_return = [], [], 0.
t_start = time.time()
self.step_count, self.episode_count, self.policy_net_update_counter = 0, 0, 0
recoder = Recoder(self.opts.save_dir, seed=0)
while self.step_count < self.opts.num_frames:
print("%d / %d: %.4f %s" % (self.step_count, self.opts.num_frames, self.step_count / self.opts.num_frames, self.opts.tag))
G = 0.0
self.env.reset()
state = self.state_normalizer(self.env.state())
done = False
while (not done) and self.step_count < self.opts.num_frames:
action = self.get_action(get_state(state))
reward, done = self.env.act(action)
next_state = self.state_normalizer(self.env.state())
reward = self.reward_normalizer(reward)
# reward = reward.item() if not isinstance(reward, int) else reward
self.save_experience(state.transpose(2, 0, 1), action, next_state.transpose(2, 0, 1), reward, done)
if self.step_count > self.opts.replay_start_size and self.step_count % self.sgd_update_frequency == 0:
self.policy_net_update_counter += 1
self.learn()
if self.step_count % self.opts.eval_iterval == 0:
eval_scores, max_qs, real_qs = self.evaluation()
recoder.add_result({"eval_scores": eval_scores, "max_qs": max_qs, "real_qs": real_qs}, "test_return")
recoder.save_result()
G += reward
self.step_count += 1
state = next_state
self.episode_count += 1
data_return.append(G)
frame_stamp.append(self.step_count)
avg_return = 0.99 * avg_return + 0.01 * G
if self.episode_count % 50 == 0:
print("Episode " + str(self.episode_count) + " | Return: " + str(G) + " | Avg return: " +
str(np.around(avg_return, 2)) + " | Frame: " + str(self.step_count) + " | Time per frame: " + str(
(time.time() - t_start) / self.step_count))
# Save model data and other intermediate data if the corresponding flag is true
if self.opts.store_intermediate_result and self.episode_count % 50 == 0:
torch.save({
'episode': self.episode_count,
'frame': self.step_count,
'policy_net_update_counter': self.policy_net_update_counter,
'avg_return': avg_return,
'return_per_run': data_return,
'frame_stamp_per_run': frame_stamp,
'replay_buffer': []
}, os.path.join(self.opts.save_dir, "checkpoint.pth")) |
py | 7dfc0b1a0f58c143306b374e189cdae70b56ab0c | import torch
import numpy as np
from typing import Union
from tianshou.data import to_numpy
class MovAvg(object):
"""Class for moving average. It will automatically exclude the infinity and
NaN. Usage:
::
>>> stat = MovAvg(size=66)
>>> stat.add(torch.tensor(5))
5.0
>>> stat.add(float('inf')) # which will not add to stat
5.0
>>> stat.add([6, 7, 8])
6.5
>>> stat.get()
6.5
>>> print(f'{stat.mean():.2f}±{stat.std():.2f}')
6.50±1.12
"""
def __init__(self, size: int = 100) -> None:
super().__init__()
self.size = size
self.cache = []
self.banned = [np.inf, np.nan, -np.inf]
def add(self, x: Union[float, list, np.ndarray, torch.Tensor]) -> float:
"""Add a scalar into :class:`MovAvg`. You can add ``torch.Tensor`` with
only one element, a python scalar, or a list of python scalar.
"""
if isinstance(x, torch.Tensor):
x = to_numpy(x.flatten())
if isinstance(x, list) or isinstance(x, np.ndarray):
for _ in x:
if _ not in self.banned:
self.cache.append(_)
elif x not in self.banned:
self.cache.append(x)
if self.size > 0 and len(self.cache) > self.size:
self.cache = self.cache[-self.size:]
return self.get()
def get(self) -> float:
"""Get the average."""
if len(self.cache) == 0:
return 0
return np.mean(self.cache)
def mean(self) -> float:
"""Get the average. Same as :meth:`get`."""
return self.get()
def std(self) -> float:
"""Get the standard deviation."""
if len(self.cache) == 0:
return 0
return np.std(self.cache)
|
py | 7dfc0b9a123e3ed90517be7bb6619aded4ff56cf | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from os.path import join, basename, splitext
from tools.toolchains import mbedToolchain, TOOLCHAIN_PATHS
from tools.hooks import hook_tool
class GCC(mbedToolchain):
LINKER_EXT = '.ld'
LIBRARY_EXT = '.a'
STD_LIB_NAME = "lib%s.a"
DIAGNOSTIC_PATTERN = re.compile('((?P<file>[^:]+):(?P<line>\d+):)(\d+:)? (?P<severity>warning|error): (?P<message>.+)')
INDEX_PATTERN = re.compile('(?P<col>\s*)\^')
# ANY changes to these default flags is backwards incompatible and require
# an update to the mbed-sdk-tools and website that introduces a profile
# for the previous version of these flags
DEFAULT_FLAGS = {
'common': ["-c", "-Wall", "-Wextra",
"-Wno-unused-parameter", "-Wno-missing-field-initializers",
"-fmessage-length=0", "-fno-exceptions", "-fno-builtin",
"-ffunction-sections", "-fdata-sections", "-funsigned-char",
"-MMD", "-fno-delete-null-pointer-checks", "-fomit-frame-pointer"
],
'asm': ["-x", "assembler-with-cpp"],
'c': ["-std=gnu99"],
'cxx': ["-std=gnu++98", "-fno-rtti", "-Wvla"],
'ld': ["-Wl,--gc-sections", "-Wl,--wrap,main",
"-Wl,--wrap,_malloc_r", "-Wl,--wrap,_free_r", "-Wl,--wrap,_realloc_r", "-Wl,--wrap,_calloc_r"],
}
def __init__(self, target, options=None, notify=None, macros=None, silent=False, tool_path="", extra_verbose=False):
mbedToolchain.__init__(self, target, options, notify, macros, silent, extra_verbose=extra_verbose)
if target.core == "Cortex-M0+":
cpu = "cortex-m0plus"
elif target.core == "Cortex-M4F":
cpu = "cortex-m4"
elif target.core == "Cortex-M7F":
cpu = "cortex-m7"
elif target.core == "Cortex-M7FD":
cpu = "cortex-m7"
else:
cpu = target.core.lower()
self.cpu = ["-mcpu=%s" % cpu]
if target.core.startswith("Cortex"):
self.cpu.append("-mthumb")
# FPU handling, M7 possibly to have double FPU
if target.core == "Cortex-M4F":
self.cpu.append("-mfpu=fpv4-sp-d16")
self.cpu.append("-mfloat-abi=softfp")
elif target.core == "Cortex-M7F":
self.cpu.append("-mfpu=fpv5-sp-d16")
self.cpu.append("-mfloat-abi=softfp")
elif target.core == "Cortex-M7FD":
self.cpu.append("-mfpu=fpv5-d16")
self.cpu.append("-mfloat-abi=softfp")
if target.core == "Cortex-A9":
self.cpu.append("-mthumb-interwork")
self.cpu.append("-marm")
self.cpu.append("-march=armv7-a")
self.cpu.append("-mfpu=vfpv3")
self.cpu.append("-mfloat-abi=hard")
self.cpu.append("-mno-unaligned-access")
# Note: We are using "-O2" instead of "-Os" to avoid this known GCC bug:
# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=46762
self.flags["common"] += self.cpu
if "save-asm" in self.options:
self.flags["common"].append("-save-temps")
if "debug-info" in self.options:
self.flags["common"].append("-g")
self.flags["common"].append("-O0")
else:
self.flags["common"].append("-Os")
main_cc = join(tool_path, "arm-none-eabi-gcc")
main_cppc = join(tool_path, "arm-none-eabi-g++")
self.asm = [main_cc] + self.flags['asm'] + self.flags["common"]
self.cc = [main_cc]
self.cppc =[main_cppc]
self.cc += self.flags['c'] + self.flags['common']
self.cppc += self.flags['cxx'] + self.flags['common']
self.flags['ld'] += self.cpu
self.ld = [join(tool_path, "arm-none-eabi-gcc")] + self.flags['ld']
self.sys_libs = ["stdc++", "supc++", "m", "c", "gcc"]
self.ar = join(tool_path, "arm-none-eabi-ar")
self.elf2bin = join(tool_path, "arm-none-eabi-objcopy")
def parse_dependencies(self, dep_path):
dependencies = []
buff = open(dep_path).readlines()
buff[0] = re.sub('^(.*?)\: ', '', buff[0])
for line in buff:
file = line.replace('\\\n', '').strip()
if file:
# GCC might list more than one dependency on a single line, in this case
# the dependencies are separated by a space. However, a space might also
# indicate an actual space character in a dependency path, but in this case
# the space character is prefixed by a backslash.
# Temporary replace all '\ ' with a special char that is not used (\a in this
# case) to keep them from being interpreted by 'split' (they will be converted
# back later to a space char)
file = file.replace('\\ ', '\a')
if file.find(" ") == -1:
dependencies.append((self.CHROOT if self.CHROOT else '') + file.replace('\a', ' '))
else:
dependencies = dependencies + [(self.CHROOT if self.CHROOT else '') + f.replace('\a', ' ') for f in file.split(" ")]
return dependencies
def is_not_supported_error(self, output):
return "error: #error [NOT_SUPPORTED]" in output
def parse_output(self, output):
# The warning/error notification is multiline
msg = None
for line in output.splitlines():
match = GCC.DIAGNOSTIC_PATTERN.search(line)
if match is not None:
if msg is not None:
self.cc_info(msg)
msg = {
'severity': match.group('severity').lower(),
'file': match.group('file'),
'line': match.group('line'),
'col': 0,
'message': match.group('message'),
'text': '',
'target_name': self.target.name,
'toolchain_name': self.name
}
elif msg is not None:
# Determine the warning/error column by calculating the ^ position
match = GCC.INDEX_PATTERN.match(line)
if match is not None:
msg['col'] = len(match.group('col'))
self.cc_info(msg)
msg = None
else:
msg['text'] += line+"\n"
def get_dep_option(self, object):
base, _ = splitext(object)
dep_path = base + '.d'
return ["-MD", "-MF", dep_path]
def get_config_option(self, config_header):
return ['-include', config_header]
def get_compile_options(self, defines, includes, for_asm=False):
opts = ['-D%s' % d for d in defines]
if self.RESPONSE_FILES:
opts += ['@%s' % self.get_inc_file(includes)]
else:
opts += ["-I%s" % i for i in includes]
if not for_asm:
config_header = self.get_config_header()
if config_header is not None:
opts = opts + self.get_config_option(config_header)
return opts
@hook_tool
def assemble(self, source, object, includes):
# Build assemble command
cmd = self.asm + self.get_compile_options(self.get_symbols(True), includes) + ["-o", object, source]
# Call cmdline hook
cmd = self.hook.get_cmdline_assembler(cmd)
# Return command array, don't execute
return [cmd]
@hook_tool
def compile(self, cc, source, object, includes):
# Build compile command
cmd = cc + self.get_compile_options(self.get_symbols(), includes)
cmd.extend(self.get_dep_option(object))
cmd.extend(["-o", object, source])
# Call cmdline hook
cmd = self.hook.get_cmdline_compiler(cmd)
return [cmd]
def compile_c(self, source, object, includes):
return self.compile(self.cc, source, object, includes)
def compile_cpp(self, source, object, includes):
return self.compile(self.cppc, source, object, includes)
@hook_tool
def link(self, output, objects, libraries, lib_dirs, mem_map):
libs = []
for l in libraries:
name, _ = splitext(basename(l))
libs.append("-l%s" % name[3:])
libs.extend(["-l%s" % l for l in self.sys_libs])
# Build linker command
map_file = splitext(output)[0] + ".map"
cmd = self.ld + ["-o", output, "-Wl,-Map=%s" % map_file] + objects + ["-Wl,--start-group"] + libs + ["-Wl,--end-group"]
if mem_map:
cmd.extend(['-T', mem_map])
for L in lib_dirs:
cmd.extend(['-L', L])
cmd.extend(libs)
# Call cmdline hook
cmd = self.hook.get_cmdline_linker(cmd)
if self.RESPONSE_FILES:
# Split link command to linker executable + response file
cmd_linker = cmd[0]
link_files = self.get_link_file(cmd[1:])
cmd = [cmd_linker, "@%s" % link_files]
# Exec command
self.cc_verbose("Link: %s" % ' '.join(cmd))
self.default_cmd(cmd)
@hook_tool
def archive(self, objects, lib_path):
if self.RESPONSE_FILES:
param = ["@%s" % self.get_arch_file(objects)]
else:
param = objects
# Exec command
self.default_cmd([self.ar, 'rcs', lib_path] + param)
@hook_tool
def binary(self, resources, elf, bin):
# Build binary command
cmd = [self.elf2bin, "-O", "binary", elf, bin]
# Call cmdline hook
cmd = self.hook.get_cmdline_binary(cmd)
# Exec command
self.cc_verbose("FromELF: %s" % ' '.join(cmd))
self.default_cmd(cmd)
class GCC_ARM(GCC):
@staticmethod
def check_executable():
"""Returns True if the executable (arm-none-eabi-gcc) location
specified by the user exists OR the executable can be found on the PATH.
Returns False otherwise."""
return mbedToolchain.generic_check_executable("GCC_ARM", 'arm-none-eabi-gcc', 1)
def __init__(self, target, options=None, notify=None, macros=None, silent=False, extra_verbose=False):
GCC.__init__(self, target, options, notify, macros, silent, TOOLCHAIN_PATHS['GCC_ARM'], extra_verbose=extra_verbose)
# Use latest gcc nanolib
if "std-lib" in self.options:
use_nano = False
elif "small-lib" in self.options:
use_nano = True
elif target.default_lib == "std":
use_nano = False
elif target.default_lib == "small":
use_nano = True
else:
use_nano = False
if use_nano:
self.ld.append("--specs=nano.specs")
self.flags['ld'].append("--specs=nano.specs")
self.cc += ["-DMBED_RTOS_SINGLE_THREAD"]
self.cppc += ["-DMBED_RTOS_SINGLE_THREAD"]
self.macros.extend(["MBED_RTOS_SINGLE_THREAD"])
self.sys_libs.append("nosys")
class GCC_CR(GCC):
@staticmethod
def check_executable():
"""Returns True if the executable (arm-none-eabi-gcc) location
specified by the user exists OR the executable can be found on the PATH.
Returns False otherwise."""
return mbedToolchain.generic_check_executable("GCC_CR", 'arm-none-eabi-gcc', 1)
def __init__(self, target, options=None, notify=None, macros=None, silent=False, extra_verbose=False):
GCC.__init__(self, target, options, notify, macros, silent, TOOLCHAIN_PATHS['GCC_CR'], extra_verbose=extra_verbose)
additional_compiler_flags = [
"-D__NEWLIB__", "-D__CODE_RED", "-D__USE_CMSIS", "-DCPP_USE_HEAP",
]
self.cc += additional_compiler_flags
self.cppc += additional_compiler_flags
# Use latest gcc nanolib
self.ld.append("--specs=nano.specs")
if target.name in ["LPC1768", "LPC4088", "LPC4088_DM", "LPC4330", "UBLOX_C027", "LPC2368"]:
self.ld.extend(["-u _printf_float", "-u _scanf_float"])
self.ld += ["-nostdlib"]
|
py | 7dfc0c54dd2966aa1a59ea444eac4343cbb4e3c8 | from django.contrib.auth import authenticate, login, logout
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_protect
from rest_framework import status, views
from rest_framework.response import Response
from .serializers import UserSerializer
class LoginView(views.APIView):
@method_decorator(csrf_protect)
def post(self, request):
user = authenticate(
username=request.data.get("username"),
password=request.data.get("password")
)
if user is None or not user.is_active:
return Response({
'status': 'Unauthorized',
'message': 'Username or password incorrect'
}, status=status.HTTP_401_UNAUTHORIZED)
login(request, user)
return Response(UserSerializer(user).data)
class LogoutView(views.APIView):
@staticmethod
def get(request):
logout(request)
return Response({}, status=status.HTTP_204_NO_CONTENT)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.