ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a366a08502d57b102560ff90f71b4b67f40c354 | '''
Scratchpad for test-based development.
LICENSING
-------------------------------------------------
hypergolix: A python Golix client.
Copyright (C) 2016 Muterra, Inc.
Contributors
------------
Nick Badger
[email protected] | [email protected] | nickbadger.com
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the
Free Software Foundation, Inc.,
51 Franklin Street,
Fifth Floor,
Boston, MA 02110-1301 USA
------------------------------------------------------
'''
import argparse
import unittest
import sys
import time
import statistics
import collections
import threading
import random
import pathlib
import tempfile
import shutil
import logging
from loopa.utils import await_coroutine_threadsafe
from hypergolix.utils import ApiID
from hypergolix.objproxy import Obj
from hypergolix.comms import WSConnection
from hypergolix.comms import WSBeatingConn
from hypergolix.service import RemotePersistenceServer
from hypergolix.app import HypergolixCore
from hypergolix.accounting import Account
from hypergolix.embed import HGXLink
from golix._getlow import GIDC
from hypergolix.persistence import _GidcLite
# ###############################################
# Fixtures
# ###############################################
from trashtest._fixtures.identities import TEST_AGENT1
from trashtest._fixtures.identities import TEST_READER1
from trashtest._fixtures.identities import TEST_AGENT2
from trashtest._fixtures.identities import TEST_READER2
gidc1 = TEST_READER1.packed
gidclite1 = _GidcLite.from_golix(GIDC.unpack(TEST_READER1.packed))
gidc2 = TEST_READER2.packed
gidclite2 = _GidcLite.from_golix(GIDC.unpack(TEST_READER2.packed))
logger = logging.getLogger(__name__)
# ###############################################
# Testing
# ###############################################
class TestAppNoRestore(unittest.TestCase):
''' Test a fake application with no account restoration, just with
a parrot between two identities.
'''
@classmethod
def setUpClass(cls):
''' Make a fake application, yo.
'''
# Set up the SERVER
###########################################
cls.server_cachedir = tempfile.mkdtemp()
cls.server = RemotePersistenceServer(
cache_dir = cls.server_cachedir,
host = '127.0.0.1',
port = 6022,
reusable_loop = False,
threaded = True,
# debug = True,
thread_kwargs = {'name': 'pserver'}
)
# Set up the FIRST CLIENT
###########################################
cls.hgxcore1_cachedir = tempfile.mkdtemp()
cls.hgxcore1 = HypergolixCore(
cache_dir = cls.hgxcore1_cachedir,
ipc_port = 6023,
reusable_loop = False,
threaded = True,
# debug = True,
thread_kwargs = {'name': 'hgxcore1'}
)
cls.hgxcore1.add_remote(
connection_cls = WSBeatingConn,
host = '127.0.0.1',
port = 6022,
tls = False
)
cls.root_secret_1 = TEST_AGENT1.new_secret()
cls.account1 = Account(
user_id = TEST_AGENT1,
root_secret = cls.root_secret_1,
hgxcore = cls.hgxcore1
)
cls.hgxcore1.account = cls.account1
cls.hgxlink1 = HGXLink(
ipc_port = 6023,
autostart = False,
# debug = True,
threaded = True,
thread_kwargs = {'name': 'hgxlink1'}
)
# Set up the SECOND CLIENT
###########################################
cls.hgxcore2_cachedir = tempfile.mkdtemp()
cls.hgxcore2 = HypergolixCore(
cache_dir = cls.hgxcore2_cachedir,
ipc_port = 6024,
reusable_loop = False,
threaded = True,
# debug = True,
thread_kwargs = {'name': 'hgxcore2'}
)
cls.hgxcore2.add_remote(
connection_cls = WSBeatingConn,
host = '127.0.0.1',
port = 6022,
tls = False
)
cls.root_secret_2 = TEST_AGENT2.new_secret()
cls.account2 = Account(
user_id = TEST_AGENT2,
root_secret = cls.root_secret_2,
hgxcore = cls.hgxcore2
)
cls.hgxcore2.account = cls.account2
cls.hgxlink2 = HGXLink(
ipc_port = 6024,
autostart = False,
# debug = True,
threaded = True,
thread_kwargs = {'name': 'hgxlink2'}
)
# START THE WHOLE SHEBANG
###########################################
# Start the server and wait until it's ready to serve connections
cls.server.start()
await_coroutine_threadsafe(
coro = cls.server.await_init(),
loop = cls.server._loop
)
# Start the first core and wait until it's ready to serve connections
cls.hgxcore1.start()
await_coroutine_threadsafe(
coro = cls.hgxcore1.await_init(),
loop = cls.hgxcore1._loop
)
# Start the second core and wait until it's ready to serve connections
cls.hgxcore2.start()
await_coroutine_threadsafe(
coro = cls.hgxcore2.await_init(),
loop = cls.hgxcore2._loop
)
# These don't need to wait though.
cls.hgxlink1.start()
cls.hgxlink2.start()
@classmethod
def tearDownClass(cls):
''' Kill errything and then remove the caches.
'''
try:
cls.hgxlink2.stop_threadsafe(timeout=.5)
cls.hgxlink1.stop_threadsafe(timeout=.5)
cls.hgxcore2.stop_threadsafe(timeout=.5)
cls.hgxcore1.stop_threadsafe(timeout=.5)
cls.server.stop_threadsafe(timeout=.5)
finally:
shutil.rmtree(cls.hgxcore2_cachedir)
shutil.rmtree(cls.hgxcore1_cachedir)
shutil.rmtree(cls.server_cachedir)
def setUp(self):
''' Do some housekeeping.
'''
self.iterations = 10
self.timeout = 10
self.request_api = ApiID(bytes(63) + b'\x01')
self.response_api = ApiID(bytes(63) + b'\x02')
self.incoming1 = collections.deque()
self.incoming2 = collections.deque()
self.cache2 = collections.deque()
self.returnflag1 = threading.Event()
self.updateflags = collections.deque()
# Set up the timing recorder
self.timers = collections.deque()
async def roundtrip_notifier(self, mirror_obj):
''' This gets called when we get an update for a response.
'''
end_time = time.monotonic()
ii = int.from_bytes(mirror_obj.state[:1], 'big')
self.timers[ii].appendleft(end_time)
self.updateflags[ii].set()
def share_handler(self, ghid, origin, api_id):
''' This handles all shares. It's defined to be used STRICTLY in
one direction.
'''
# The request handler. Requests are only received by hgxlink2.
if api_id == self.request_api:
# Get the object itself
obj = self.hgxlink2.get_threadsafe(
cls = Obj,
ghid = ghid
)
# Construct a mirror object
mirror = self.hgxlink2.new_threadsafe(
cls = Obj,
state = obj.state,
api_id = self.response_api,
dynamic = True,
private = False
)
# Create an update callback
async def state_mirror(source_obj, mirror_obj=mirror):
mirror_obj.state = source_obj.state
await mirror_obj.push()
# Set the update callback and then share the mirror
obj.callback = state_mirror
self.incoming2.appendleft(obj)
self.cache2.appendleft(mirror)
mirror.share_threadsafe(origin)
# The response handler. Responses are only received by hgxlink1.
elif api_id == self.response_api:
# Get the object itself
mirror = self.hgxlink1.get_threadsafe(
cls = Obj,
ghid = ghid
)
mirror.callback = self.roundtrip_notifier
self.incoming1.appendleft(mirror)
self.returnflag1.set()
else:
raise ValueError('Bad api.')
def test_whoami(self):
''' Super simple whoami test to make sure it's working.
'''
# First make sure everything is correctly started up.
await_coroutine_threadsafe(
coro = self.hgxcore1.await_startup(),
loop = self.hgxcore1._loop
)
await_coroutine_threadsafe(
coro = self.hgxcore2.await_startup(),
loop = self.hgxcore2._loop
)
whoami = await_coroutine_threadsafe(
coro = self.hgxlink1._ipc_manager.get_whoami(timeout=5),
loop = self.hgxlink1._loop
)
self.assertEqual(whoami, self.hgxlink1.whoami)
self.assertEqual(whoami, TEST_AGENT1.ghid)
whoami2 = await_coroutine_threadsafe(
coro = self.hgxlink2._ipc_manager.get_whoami(timeout=5),
loop = self.hgxlink2._loop
)
self.assertEqual(whoami2, self.hgxlink2.whoami)
self.assertEqual(whoami2, TEST_AGENT2.ghid)
def test_roundtrip(self):
''' Bidirectional communication test.
'''
# First make sure everything is correctly started up.
await_coroutine_threadsafe(
coro = self.hgxcore1.await_startup(),
loop = self.hgxcore1._loop
)
await_coroutine_threadsafe(
coro = self.hgxcore2.await_startup(),
loop = self.hgxcore2._loop
)
# First we need to wrap the share handler appropriately
handler1 = self.hgxlink1.wrap_threadsafe(self.share_handler)
handler2 = self.hgxlink2.wrap_threadsafe(self.share_handler)
# Then we need to actually register it with the respective links
self.hgxlink1.register_share_handler_threadsafe(
self.response_api,
handler1
)
self.hgxlink2.register_share_handler_threadsafe(
self.request_api,
handler2
)
# Now let's make the actual request, then share is
state = bytes([random.randint(0, 255) for i in range(0, 25)])
request = self.hgxlink1.new_threadsafe(
cls = Obj,
state = state,
api_id = self.request_api,
dynamic = True,
private = False
)
request.share_threadsafe(self.hgxlink2.whoami)
# Wait for a response. First make sure one comes, then that it matches
self.assertTrue(self.returnflag1.wait(30))
mirror = self.incoming1.pop()
self.assertEqual(request.state, mirror.state)
# Notify that we're starting the actual tests
logger.info(
'\n\n########################################################\n' +
'######### Handshakes complete! Starting tests. #########\n' +
'########################################################\n'
)
for ii in range(self.iterations):
with self.subTest(i=ii):
logger.info(
'\n' +
'################ Starting mirror cycle. ################'
)
# Prep the object with an update
state = ii.to_bytes(1, 'big') + \
bytes([random.randint(0, 255) for i in range(0, 25)])
request.state = state
# Clear the update flag and zero out the timer
self.updateflags.append(threading.Event())
self.timers.append(collections.deque([0, 0], maxlen=2))
self.timers[ii].appendleft(time.monotonic())
# Call an update, wait for the response, and record the time
request.push_threadsafe()
success = self.updateflags[ii].wait(self.timeout)
# Check for success
self.assertTrue(success)
self.assertEqual(mirror.state, state)
times = [end - start for end, start in self.timers]
# Get a newline for tidyness when being run within the whole test suite
print('')
print('Max time: ', max(times))
print('Min time: ', min(times))
print('Mean time:', statistics.mean(times))
print('Med time: ', statistics.median(times))
# ###############################################
# Operations
# ###############################################
if __name__ == "__main__":
from hypergolix import logutils
logutils.autoconfig(loglevel='debug')
# from hypergolix.utils import TraceLogger
# with TraceLogger(interval=30):
# unittest.main()
unittest.main()
|
py | 1a366a4c5c22d4e02fbb7230a91f1bf985411b5e | #!/usr/bin/env python
import sys
sys.path.append('../cvk2')
import cv2
import httplib
import microsoftCVHelpers as msCV
import microsoftTranslatorHelper as msTranslator
import microsoftCogServicesHelper as msCogServs
import numpy as np
import requests
import time
_CVkey = 'e80f8ece393f4eebb3d98b0bb36f04d0'
_translatorKey = '420c6ab49ed1449db517207d6aef32d9'
def tokenReaderTester():
g = open("token.txt", 'r')
firstLine = g.readline()
print firstLine
timenow = time.time()
print timenow
print firstLine
tokenTime = float(firstLine)
if timenow - tokenTime < 60:
print g.readline()
else:
print "diff: ", timenow - tokenTime
g.close()
g = open("token.txt", 'w')
g.write(str(timenow) + "\n")
g.write("blahblahblah\n")
g.close()
def makeTestTokenTxt():
g = open("token.txt", 'w')
timenow = time.time()
print str(timenow)
g.write(str(timenow) + "\n")
g.write("tokenlalalalala")
g.close()
def imageAPITester():
# Load raw image file into memory
pathToFileInDisk = 'bathroom.jpg'
with open( pathToFileInDisk, 'rb' ) as f:
data = f.read()
# Computer Vision parameters
params = { 'visualFeatures' : 'Categories, Tags, Description, Faces'}
headers = dict()
headers['Ocp-Apim-Subscription-Key'] = _CVkey
headers['Content-Type'] = 'application/octet-stream'
json = None
start = time.time()
result = msCogServs.processCVRequest(json, data, headers, params )
if result is not None:
# Load the original image, fetched from the URL
data8uint = np.fromstring( data, np.uint8 ) # Convert string to an unsigned int array
img = cv2.cvtColor( cv2.imdecode( data8uint, cv2.IMREAD_COLOR ), cv2.COLOR_BGR2RGB )
# in reverse order: lowest confidence -> highest confidence
tags = sorted(result['tags'], key=lambda x: x['confidence'])
description = result['description']
caption = description['captions'][0]['text']
print "here and:"
print tags
print description
end = time.time()
print(end - start)
if __name__ == "__main__":
imageAPITester()
token = None
f = open("token.txt", 'r')
firstLine = f.readline()
lastTokenTime = float(firstLine)
currentTime = time.time()
if currentTime - lastTokenTime > 8 * 60: # need new token
f.close()
f = open("token.txt", "w")
textHeaders = dict()
textHeaders['Content-Type'] = 'application/json'
textHeaders['Accept'] = 'application/jwt'
textHeaders['Ocp-Apim-Subscription-Key'] = _translatorKey
token = msCogServs.processTokenRequest(textHeaders)
if token is not None:
print "YAYAYAYA new token worked"
print token
now = time.time()
f.write(str(now) + "\n")
f.write(token)
else:
print "Could not get token for translation. Exiting."
exit(0)
else:
token = f.readline()
print "successfully read in token: ##", token, "##"
f.close()
print "~~~~~~~~~~~~~~~ NOW TRANSLATING ~~~~~~~~~~~~~~~"
textToTranslate = 'Hello, my name is Lucarne. I like cats and ice cream. On the weekends, I go fishing and skiing. Sometimes, I have lunch with my brother. My brother is a turtle.'
langToTranslateTo = 'ja'
translatorHeaders = dict()
translatorHeaders['Accept'] = 'application/xml'
translatorParameters = dict()
translatorParameters['appid'] = 'Bearer' + ' ' + token
translatorParameters['text'] = textToTranslate
translatorParameters['to'] = langToTranslateTo
translatorParameters['contentType'] = "text/plain"
print translatorParameters
translation = msCogServs.processTranslationRequest(translatorHeaders, translatorParameters)
length = len(translation.content)
translationCleaned = translation.content[68:length - 10]
print "here and translation: ", translationCleaned
# print "~~~~~~~~~~~~~~~ NOW TRANSLATING 2 ~~~~~~~~~~~~~~~"
# textToTranslate2 = ['Hello, my name is Amy.', 'I like cats', 'I liek mudkips']
# langToTranslateTo2 = 'es'
# translatorHeaders2 = dict()
# translatorHeaders2['Content-Type'] = 'application/x-www-form-urlencoded'
# translatorHeaders2['Accept'] = 'application/xml'
# translatorParameters2 = dict()
# translatorParameters2['appid'] = 'Bearer' + ' ' + token
# translatorParameters2['texts'] = textToTranslate2
# translatorParameters2['to'] = langToTranslateTo2
# print translatorParameters2
# translation2 = msCogServs.processTranslationArrayRequest(translatorHeaders2, translatorParameters2)
# print "here and translation: ", translation2
# print "here and translation: ", translation2.content
|
py | 1a366a67dd5223db5f52995bfbcdc8c2f8a632db | N = int(input())
ans = ''
for _ in range(N):
p, q, r = input().split()
if p == 'BEGINNING':
ans += r[0]
elif p == 'MIDDLE':
ans += r[len(r)//2]
else:
ans += r[-1]
print(ans)
|
py | 1a366a704b43aa39abd3ed780d21d1aa32d38289 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
fig = plt.figure()
axes1 = fig.add_subplot(1, 1, 1)
line, = axes1.plot(np.random.rand(10))
def update(data):
line.set_ydata(data)
return line,
def data_gen():
while True:
yield np.random.rand(10)
ani = animation.FuncAnimation(fig, update, data_gen, interval=1000)
plt.show()
|
py | 1a366b0d2df4b15113d2a704672963beaf0c04a8 | """The tests for the device tracker component."""
from datetime import datetime, timedelta
import json
import logging
import os
import pytest
from homeassistant.components import zone
import homeassistant.components.device_tracker as device_tracker
from homeassistant.components.device_tracker import const, legacy
from homeassistant.const import (
ATTR_ENTITY_PICTURE,
ATTR_FRIENDLY_NAME,
ATTR_GPS_ACCURACY,
ATTR_ICON,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_PLATFORM,
STATE_HOME,
STATE_NOT_HOME,
)
from homeassistant.core import State, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import discovery
from homeassistant.helpers.json import JSONEncoder
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import Mock, call, patch
from tests.common import (
assert_setup_component,
async_fire_time_changed,
mock_registry,
mock_restore_cache,
patch_yaml_files,
)
from tests.components.device_tracker import common
TEST_PLATFORM = {device_tracker.DOMAIN: {CONF_PLATFORM: "test"}}
_LOGGER = logging.getLogger(__name__)
@pytest.fixture(name="yaml_devices")
def mock_yaml_devices(hass):
"""Get a path for storing yaml devices."""
yaml_devices = hass.config.path(legacy.YAML_DEVICES)
if os.path.isfile(yaml_devices):
os.remove(yaml_devices)
yield yaml_devices
if os.path.isfile(yaml_devices):
os.remove(yaml_devices)
async def test_is_on(hass):
"""Test is_on method."""
entity_id = f"{const.DOMAIN}.test"
hass.states.async_set(entity_id, STATE_HOME)
assert device_tracker.is_on(hass, entity_id)
hass.states.async_set(entity_id, STATE_NOT_HOME)
assert not device_tracker.is_on(hass, entity_id)
async def test_reading_broken_yaml_config(hass):
"""Test when known devices contains invalid data."""
files = {
"empty.yaml": "",
"nodict.yaml": "100",
"badkey.yaml": "@:\n name: Device",
"noname.yaml": "my_device:\n",
"allok.yaml": "My Device:\n name: Device",
"oneok.yaml": ("My Device!:\n name: Device\nbad_device:\n nme: Device"),
}
args = {"hass": hass, "consider_home": timedelta(seconds=60)}
with patch_yaml_files(files):
assert await legacy.async_load_config("empty.yaml", **args) == []
assert await legacy.async_load_config("nodict.yaml", **args) == []
assert await legacy.async_load_config("noname.yaml", **args) == []
assert await legacy.async_load_config("badkey.yaml", **args) == []
res = await legacy.async_load_config("allok.yaml", **args)
assert len(res) == 1
assert res[0].name == "Device"
assert res[0].dev_id == "my_device"
res = await legacy.async_load_config("oneok.yaml", **args)
assert len(res) == 1
assert res[0].name == "Device"
assert res[0].dev_id == "my_device"
async def test_reading_yaml_config(hass, yaml_devices):
"""Test the rendering of the YAML configuration."""
dev_id = "test"
device = legacy.Device(
hass,
timedelta(seconds=180),
True,
dev_id,
"AB:CD:EF:GH:IJ",
"Test name",
picture="http://test.picture",
icon="mdi:kettle",
)
await hass.async_add_executor_job(
legacy.update_config, yaml_devices, dev_id, device
)
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
config = (await legacy.async_load_config(yaml_devices, hass, device.consider_home))[
0
]
assert device.dev_id == config.dev_id
assert device.track == config.track
assert device.mac == config.mac
assert device.config_picture == config.config_picture
assert device.consider_home == config.consider_home
assert device.icon == config.icon
@patch("homeassistant.components.device_tracker.const.LOGGER.warning")
async def test_duplicate_mac_dev_id(mock_warning, hass):
"""Test adding duplicate MACs or device IDs to DeviceTracker."""
devices = [
legacy.Device(
hass, True, True, "my_device", "AB:01", "My device", None, None, False
),
legacy.Device(
hass, True, True, "your_device", "AB:01", "Your device", None, None, False
),
]
legacy.DeviceTracker(hass, False, True, {}, devices)
_LOGGER.debug(mock_warning.call_args_list)
assert (
mock_warning.call_count == 1
), "The only warning call should be duplicates (check DEBUG)"
args, _ = mock_warning.call_args
assert "Duplicate device MAC" in args[0], "Duplicate MAC warning expected"
mock_warning.reset_mock()
devices = [
legacy.Device(
hass, True, True, "my_device", "AB:01", "My device", None, None, False
),
legacy.Device(
hass, True, True, "my_device", None, "Your device", None, None, False
),
]
legacy.DeviceTracker(hass, False, True, {}, devices)
_LOGGER.debug(mock_warning.call_args_list)
assert (
mock_warning.call_count == 1
), "The only warning call should be duplicates (check DEBUG)"
args, _ = mock_warning.call_args
assert "Duplicate device IDs" in args[0], "Duplicate device IDs warning expected"
async def test_setup_without_yaml_file(hass):
"""Test with no YAML file."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
async def test_gravatar(hass):
"""Test the Gravatar generation."""
dev_id = "test"
device = legacy.Device(
hass,
timedelta(seconds=180),
True,
dev_id,
"AB:CD:EF:GH:IJ",
"Test name",
gravatar="[email protected]",
)
gravatar_url = (
"https://www.gravatar.com/avatar/"
"55502f40dc8b7c769880b10874abc9d0.jpg?s=80&d=wavatar"
)
assert device.config_picture == gravatar_url
async def test_gravatar_and_picture(hass):
"""Test that Gravatar overrides picture."""
dev_id = "test"
device = legacy.Device(
hass,
timedelta(seconds=180),
True,
dev_id,
"AB:CD:EF:GH:IJ",
"Test name",
picture="http://test.picture",
gravatar="[email protected]",
)
gravatar_url = (
"https://www.gravatar.com/avatar/"
"55502f40dc8b7c769880b10874abc9d0.jpg?s=80&d=wavatar"
)
assert device.config_picture == gravatar_url
@patch("homeassistant.components.device_tracker.legacy.DeviceTracker.see")
@patch("homeassistant.components.demo.device_tracker.setup_scanner", autospec=True)
async def test_discover_platform(mock_demo_setup_scanner, mock_see, hass):
"""Test discovery of device_tracker demo platform."""
await discovery.async_load_platform(
hass, device_tracker.DOMAIN, "demo", {"test_key": "test_val"}, {"bla": {}}
)
await hass.async_block_till_done()
assert device_tracker.DOMAIN in hass.config.components
assert mock_demo_setup_scanner.called
assert mock_demo_setup_scanner.call_args[0] == (
hass,
{},
mock_see,
{"test_key": "test_val"},
)
async def test_update_stale(hass, mock_device_tracker_conf):
"""Test stalled update."""
scanner = getattr(hass.components, "test.device_tracker").SCANNER
scanner.reset()
scanner.come_home("DEV1")
register_time = datetime(2015, 9, 15, 23, tzinfo=dt_util.UTC)
scan_time = datetime(2015, 9, 15, 23, 1, tzinfo=dt_util.UTC)
with patch(
"homeassistant.components.device_tracker.legacy.dt_util.utcnow",
return_value=register_time,
):
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(
hass,
device_tracker.DOMAIN,
{
device_tracker.DOMAIN: {
CONF_PLATFORM: "test",
device_tracker.CONF_CONSIDER_HOME: 59,
}
},
)
await hass.async_block_till_done()
assert STATE_HOME == hass.states.get("device_tracker.dev1").state
scanner.leave_home("DEV1")
with patch(
"homeassistant.components.device_tracker.legacy.dt_util.utcnow",
return_value=scan_time,
):
async_fire_time_changed(hass, scan_time)
await hass.async_block_till_done()
assert STATE_NOT_HOME == hass.states.get("device_tracker.dev1").state
async def test_entity_attributes(hass, mock_device_tracker_conf):
"""Test the entity attributes."""
devices = mock_device_tracker_conf
dev_id = "test_entity"
entity_id = f"{const.DOMAIN}.{dev_id}"
friendly_name = "Paulus"
picture = "http://placehold.it/200x200"
icon = "mdi:kettle"
device = legacy.Device(
hass,
timedelta(seconds=180),
True,
dev_id,
None,
friendly_name,
picture,
icon=icon,
)
devices.append(device)
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
attrs = hass.states.get(entity_id).attributes
assert friendly_name == attrs.get(ATTR_FRIENDLY_NAME)
assert icon == attrs.get(ATTR_ICON)
assert picture == attrs.get(ATTR_ENTITY_PICTURE)
@patch("homeassistant.components.device_tracker.legacy." "DeviceTracker.async_see")
async def test_see_service(mock_see, hass):
"""Test the see service with a unicode dev_id and NO MAC."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
params = {
"dev_id": "some_device",
"host_name": "example.com",
"location_name": "Work",
"gps": [0.3, 0.8],
"attributes": {"test": "test"},
}
common.async_see(hass, **params)
await hass.async_block_till_done()
assert mock_see.call_count == 1
assert mock_see.call_count == 1
assert mock_see.call_args == call(**params)
mock_see.reset_mock()
params["dev_id"] += chr(233) # e' acute accent from icloud
common.async_see(hass, **params)
await hass.async_block_till_done()
assert mock_see.call_count == 1
assert mock_see.call_count == 1
assert mock_see.call_args == call(**params)
async def test_see_service_guard_config_entry(hass, mock_device_tracker_conf):
"""Test the guard if the device is registered in the entity registry."""
mock_entry = Mock()
dev_id = "test"
entity_id = f"{const.DOMAIN}.{dev_id}"
mock_registry(hass, {entity_id: mock_entry})
devices = mock_device_tracker_conf
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
params = {"dev_id": dev_id, "gps": [0.3, 0.8]}
common.async_see(hass, **params)
await hass.async_block_till_done()
assert not devices
async def test_new_device_event_fired(hass, mock_device_tracker_conf):
"""Test that the device tracker will fire an event."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
test_events = []
@callback
def listener(event):
"""Record that our event got called."""
test_events.append(event)
hass.bus.async_listen("device_tracker_new_device", listener)
common.async_see(hass, "mac_1", host_name="hello")
common.async_see(hass, "mac_1", host_name="hello")
await hass.async_block_till_done()
assert len(test_events) == 1
# Assert we can serialize the event
json.dumps(test_events[0].as_dict(), cls=JSONEncoder)
assert test_events[0].data == {
"entity_id": "device_tracker.hello",
"host_name": "hello",
"mac": "MAC_1",
}
async def test_duplicate_yaml_keys(hass, mock_device_tracker_conf):
"""Test that the device tracker will not generate invalid YAML."""
devices = mock_device_tracker_conf
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
common.async_see(hass, "mac_1", host_name="hello")
common.async_see(hass, "mac_2", host_name="hello")
await hass.async_block_till_done()
assert len(devices) == 2
assert devices[0].dev_id != devices[1].dev_id
async def test_invalid_dev_id(hass, mock_device_tracker_conf):
"""Test that the device tracker will not allow invalid dev ids."""
devices = mock_device_tracker_conf
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
common.async_see(hass, dev_id="hello-world")
await hass.async_block_till_done()
assert not devices
async def test_see_state(hass, yaml_devices):
"""Test device tracker see records state correctly."""
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
params = {
"mac": "AA:BB:CC:DD:EE:FF",
"dev_id": "some_device",
"host_name": "example.com",
"location_name": "Work",
"gps": [0.3, 0.8],
"gps_accuracy": 1,
"battery": 100,
"attributes": {"test": "test", "number": 1},
}
common.async_see(hass, **params)
await hass.async_block_till_done()
config = await legacy.async_load_config(yaml_devices, hass, timedelta(seconds=0))
assert len(config) == 1
state = hass.states.get("device_tracker.example_com")
attrs = state.attributes
assert state.state == "Work"
assert state.object_id == "example_com"
assert state.name == "example.com"
assert attrs["friendly_name"] == "example.com"
assert attrs["battery"] == 100
assert attrs["latitude"] == 0.3
assert attrs["longitude"] == 0.8
assert attrs["test"] == "test"
assert attrs["gps_accuracy"] == 1
assert attrs["source_type"] == "gps"
assert attrs["number"] == 1
async def test_see_passive_zone_state(hass, mock_device_tracker_conf):
"""Test that the device tracker sets gps for passive trackers."""
register_time = datetime(2015, 9, 15, 23, tzinfo=dt_util.UTC)
scan_time = datetime(2015, 9, 15, 23, 1, tzinfo=dt_util.UTC)
with assert_setup_component(1, zone.DOMAIN):
zone_info = {
"name": "Home",
"latitude": 1,
"longitude": 2,
"radius": 250,
"passive": False,
}
await async_setup_component(hass, zone.DOMAIN, {"zone": zone_info})
scanner = getattr(hass.components, "test.device_tracker").SCANNER
scanner.reset()
scanner.come_home("dev1")
with patch(
"homeassistant.components.device_tracker.legacy.dt_util.utcnow",
return_value=register_time,
):
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(
hass,
device_tracker.DOMAIN,
{
device_tracker.DOMAIN: {
CONF_PLATFORM: "test",
device_tracker.CONF_CONSIDER_HOME: 59,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("device_tracker.dev1")
attrs = state.attributes
assert STATE_HOME == state.state
assert state.object_id == "dev1"
assert state.name == "dev1"
assert attrs.get("friendly_name") == "dev1"
assert attrs.get("latitude") == 1
assert attrs.get("longitude") == 2
assert attrs.get("gps_accuracy") == 0
assert attrs.get("source_type") == device_tracker.SOURCE_TYPE_ROUTER
scanner.leave_home("dev1")
with patch(
"homeassistant.components.device_tracker.legacy.dt_util.utcnow",
return_value=scan_time,
):
async_fire_time_changed(hass, scan_time)
await hass.async_block_till_done()
state = hass.states.get("device_tracker.dev1")
attrs = state.attributes
assert STATE_NOT_HOME == state.state
assert state.object_id == "dev1"
assert state.name == "dev1"
assert attrs.get("friendly_name") == "dev1"
assert attrs.get("latitude") is None
assert attrs.get("longitude") is None
assert attrs.get("gps_accuracy") is None
assert attrs.get("source_type") == device_tracker.SOURCE_TYPE_ROUTER
@patch("homeassistant.components.device_tracker.const.LOGGER.warning")
async def test_see_failures(mock_warning, hass, mock_device_tracker_conf):
"""Test that the device tracker see failures."""
devices = mock_device_tracker_conf
tracker = legacy.DeviceTracker(hass, timedelta(seconds=60), 0, {}, [])
# MAC is not a string (but added)
await tracker.async_see(mac=567, host_name="Number MAC")
# No device id or MAC(not added)
with pytest.raises(HomeAssistantError):
await tracker.async_see()
assert mock_warning.call_count == 0
# Ignore gps on invalid GPS (both added & warnings)
await tracker.async_see(mac="mac_1_bad_gps", gps=1)
await tracker.async_see(mac="mac_2_bad_gps", gps=[1])
await tracker.async_see(mac="mac_3_bad_gps", gps="gps")
await hass.async_block_till_done()
assert mock_warning.call_count == 3
assert len(devices) == 4
async def test_async_added_to_hass(hass):
"""Test restoring state."""
attr = {
ATTR_LONGITUDE: 18,
ATTR_LATITUDE: -33,
const.ATTR_SOURCE_TYPE: "gps",
ATTR_GPS_ACCURACY: 2,
const.ATTR_BATTERY: 100,
}
mock_restore_cache(hass, [State("device_tracker.jk", "home", attr)])
path = hass.config.path(legacy.YAML_DEVICES)
files = {path: "jk:\n name: JK Phone\n track: True"}
with patch_yaml_files(files):
assert await async_setup_component(hass, device_tracker.DOMAIN, {})
state = hass.states.get("device_tracker.jk")
assert state
assert state.state == "home"
for key, val in attr.items():
atr = state.attributes.get(key)
assert atr == val, f"{key}={atr} expected: {val}"
async def test_bad_platform(hass):
"""Test bad platform."""
config = {"device_tracker": [{"platform": "bad_platform"}]}
with assert_setup_component(0, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN, config)
async def test_adding_unknown_device_to_config(mock_device_tracker_conf, hass):
"""Test the adding of unknown devices to configuration file."""
scanner = getattr(hass.components, "test.device_tracker").SCANNER
scanner.reset()
scanner.come_home("DEV1")
await async_setup_component(
hass, device_tracker.DOMAIN, {device_tracker.DOMAIN: {CONF_PLATFORM: "test"}}
)
await hass.async_block_till_done()
assert len(mock_device_tracker_conf) == 1
device = mock_device_tracker_conf[0]
assert device.dev_id == "dev1"
assert device.track
async def test_picture_and_icon_on_see_discovery(mock_device_tracker_conf, hass):
"""Test that picture and icon are set in initial see."""
tracker = legacy.DeviceTracker(hass, timedelta(seconds=60), False, {}, [])
await tracker.async_see(dev_id=11, picture="pic_url", icon="mdi:icon")
await hass.async_block_till_done()
assert len(mock_device_tracker_conf) == 1
assert mock_device_tracker_conf[0].icon == "mdi:icon"
assert mock_device_tracker_conf[0].entity_picture == "pic_url"
async def test_backward_compatibility_for_track_new(mock_device_tracker_conf, hass):
"""Test backward compatibility for track new."""
tracker = legacy.DeviceTracker(
hass, timedelta(seconds=60), False, {device_tracker.CONF_TRACK_NEW: True}, []
)
await tracker.async_see(dev_id=13)
await hass.async_block_till_done()
assert len(mock_device_tracker_conf) == 1
assert mock_device_tracker_conf[0].track is False
async def test_old_style_track_new_is_skipped(mock_device_tracker_conf, hass):
"""Test old style config is skipped."""
tracker = legacy.DeviceTracker(
hass, timedelta(seconds=60), None, {device_tracker.CONF_TRACK_NEW: False}, []
)
await tracker.async_see(dev_id=14)
await hass.async_block_till_done()
assert len(mock_device_tracker_conf) == 1
assert mock_device_tracker_conf[0].track is False
def test_see_schema_allowing_ios_calls():
"""Test SEE service schema allows extra keys.
Temp work around because the iOS app sends incorrect data.
"""
device_tracker.SERVICE_SEE_PAYLOAD_SCHEMA(
{
"dev_id": "Test",
"battery": 35,
"battery_status": "Not Charging",
"gps": [10.0, 10.0],
"gps_accuracy": 300,
"hostname": "beer",
}
)
|
py | 1a366bc997919632c2fe908056783a75e4298957 | # cta_apply2(test_images{l},model_orient,'padding',0,'precision','single')
import numpy as np
import scipy.ndimage
class Cta_apply2():
def __init__(self, image, model, padding=0, precision='complex64'):
self.precision=precision # 'double'
self.verbosity=0
self.padding=padding
self.image=image
self.model=model
self.shape=image.shape
# if padding>0
# padded_shape=cta_fft_bestshape(shape+padding)
# original_shape=shape
# img=zeros(padded_shape)
# img(1:shape(1),1:shape(2))=image
# image=img
# end
# shape=size(image)
self.complex_derivatives=np.array([[0,0,(-1j)/8,0,0],
[0,0,1j,0,0],
[(-1)/8,1 ,0,-1, (1)/8],
[0,0,-1j,0,0],
[0,0,(1j)/8,0,0]])
self.complex_derivatives=np.conj(self.complex_derivatives)
def cta_apply2(self):
chog=Cta_chog(self.image, self.model['L'], self.precision, self.model['chog_options']).cta_chog()
#model[4] = model.chog_options
# chog=Cta_chog(image, self.L, self.precision, self.chog_options).cta_chog()
num_products=self.model['products'].shape[0] #size(model.products,1)
H=np.zeros(self.shape, dtype=self.precision) # test_image と同じ大きさのゼロ行列
for vf in range(len(self.model['v_sigma'])): #=1:numel(model.v_sigma),
L= self.model['products'][0, -2] #model.products(1,end-1)
# print('L: ' + str(L))
H_tmp=np.zeros(self.shape,self.precision)
for p in range(num_products):
product=self.model['products'][p,:] #model.products(p,:)
if product[2] != 0:
A=np.conj(np.squeeze(chog[int(product[0]-1)]['data'][int(product[1])]))
else:
A=np.squeeze(chog[int(product[0]-1)]['data'][int(product[1])])
if product[3]==-1: # 2つ目以降の window 関数がない場合
if self.verbosity>1:
print('(%d) [%d]%d -> %d' % (product[0],(-1)**product[2],product[1],product[1]))
tmp=A
else:
if product[5] != 0:
B=np.conj(np.squeeze(chog[int(product[3]-1)]['data'][int(product[4])]))
else:
B=np.squeeze(chog[int(product[3]-1)]['data'][int(product[4])])
if product[6]==-1: # 3つ目以降の window 関数がない場合
if self.verbosity>1:
print('(%d) [%d]%d x (%d) [%d]%d -> %d' % (product[0],(-1)^product[2],product[1],product[3],(-1)^product[5],product[4],product[9]))
tmp=A*B
else:
if product[8] != 0:
C=np.conj(np.squeeze(chog[int(product[6]-1)]['data'][int(product[7])]))
else:
C=np.squeeze(chog[int(product[6]-1)]['data'][int(product[7])])
if self.verbosity>1:
print('(%d) [%d]%d x (%d) [%d]%d x (%d) [%d]%d -> %d',product[0],(-1)^product[2],product[1],product[3],(-1)^product[5],product[4],product[6],(-1)^product[8],product[7],product[9])
tmp=A*B*C
l=product[-2]
while l<L:
L=L-1
# H_tmp=imfilter(H_tmp,complex_derivatives,model.filter_mode)
H_tmp_x_real=scipy.ndimage.correlate(np.real(H_tmp), np.real(self.complex_derivatives), mode=self.model['filter_mode'])
H_tmp_y_real=scipy.ndimage.correlate(np.real(H_tmp) , np.imag(self.complex_derivatives), mode=self.model['filter_mode'])
H_tmp_x_imag=scipy.ndimage.correlate(np.imag(H_tmp), np.real(self.complex_derivatives), mode=self.model['filter_mode'])
H_tmp_y_imag=scipy.ndimage.correlate(np.imag(H_tmp) , np.imag(self.complex_derivatives), mode=self.model['filter_mode'])
H_tmp= H_tmp_x_real+ 1j*H_tmp_y_real + 1j*H_tmp_x_imag - H_tmp_y_imag
H_tmp = H_tmp + self.model['alpha'][vf][p]*tmp
while L>self.model['output_order']: #(L>model.output_order)
L=L-1
# H_tmp=imfilter(H_tmp,complex_derivatives,model.filter_mode)
H_tmp_x_real=scipy.ndimage.correlate(np.real(H_tmp), np.real(self.scomplex_derivatives), mode=self.model['filter_mode'])
H_tmp_y_real=scipy.ndimage.correlate(np.real(H_tmp) , np.imag(self.complex_derivatives), mode=self.model['filter_mode'])
H_tmp_x_imag=scipy.ndimage.correlate(np.imag(H_tmp), np.real(self.complex_derivatives), mode=self.model['filter_mode'])
H_tmp_y_imag=scipy.ndimage.correlate(np.imag(H_tmp) , np.imag(self.complex_derivatives), mode=self.model['filter_mode'])
H_tmp= H_tmp_x_real+ 1j*H_tmp_y_real + 1j*H_tmp_x_imag - H_tmp_y_imag
# ft_kernel=fftn(cta_fspecial('gauss',model.v_sigma(vf),shape,false,precision))
ft_kernel=np.fft.fftn(Cta_fspecial(self.shape, 'gauss', self.model['v_sigma'][vf], False, self.precision).cta_fspecial())
if self.model['output_order']==0:
H=H+np.real(np.fft.ifftn(np.fft.fftn(H_tmp)*ft_kernel))
else:
H=H+np.fft.ifftn(np.fft.fftn(H_tmp)*ft_kernel)
if self.model['output_order']>0: #(model.output_order>0)
# H=abs(H).*(H./abs(H)).^(1/model.output_order)
H=np.abs(H)*(H/np.abs(H))**(1/self.model['output_order'])
# if padding>0
# H=H(1:original_shape(1),1:original_shape(2))
Mask=np.zeros(H.shape)
border=int(np.ceil(np.max(self.model['v_sigma']))) # ceil(max(model.v_sigma))
Mask[border-1:Mask.shape[0]-border+1, border-1:Mask.shape[1]-border+1]=1
H[Mask==0]=0
return H
|
py | 1a366c8789231305e0120f90b43bdefd0735d291 | ##script for finding the overlap in the top 100 most significant genes in each cancer and plotting results
##load necessary modules
import pylab as plt
import numpy as np
import math
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
##I did not write this function, from http://depts.washington.edu/clawpack/clawpack-4.6.3/python/pyclaw/plotters/colormaps.py
##-------------------------
def make_colormap(colors):
##-------------------------
"""
Define a new color map based on values specified in the dictionary
colors, where colors[z] is the color that value z should be mapped to,
with linear interpolation between the given values of z.
The z values (dictionary keys) are real numbers and the values
colors[z] can be either an RGB list, e.g. [1,0,0] for red, or an
html hex string, e.g. "#ff0000" for red.
"""
from matplotlib.colors import LinearSegmentedColormap, ColorConverter
from numpy import sort
z = sort(colors.keys())
n = len(z)
z1 = min(z)
zn = max(z)
x0 = (z - z1) / (zn - z1)
CC = ColorConverter()
R = []
G = []
B = []
for i in range(n):
#i'th color at level z[i]:
Ci = colors[z[i]]
if type(Ci) == str:
# a hex string of form '#ff0000' for example (for red)
RGB = CC.to_rgb(Ci)
else:
# assume it's an RGB triple already:
RGB = Ci
R.append(RGB[0])
G.append(RGB[1])
B.append(RGB[2])
cmap_dict = {}
cmap_dict['red'] = [(x0[i],R[i],R[i]) for i in range(len(R))]
cmap_dict['green'] = [(x0[i],G[i],G[i]) for i in range(len(G))]
cmap_dict['blue'] = [(x0[i],B[i],B[i]) for i in range(len(B))]
mymap = LinearSegmentedColormap('mymap',cmap_dict)
return mymap
def compare3(first,second):
if float(first[-1])>float(second[-1]):
return 1
elif float(first[-1])<float(second[-1]):
return -1
else:
return 0
##get the 100 most significant genes for each cancer
f=open(os.path.join(BASE_DIR,'cox_regression','BLCA','coeffs_pvalues.txt'))
BLCA=[i.strip().split() for i in f]
BLCA.sort(cmp=compare3)
BLCA_dict_100={}
for i in BLCA[:100]:
BLCA_dict_100[i[0]]=''
f=open(os.path.join(BASE_DIR,'cox_regression','LGG','coeffs_pvalues.txt'))
LGG=[i.strip().split() for i in f]
LGG.sort(cmp=compare3)
LGG_dict_100={}
for i in LGG[:100]:
LGG_dict_100[i[0]]=''
f=open(os.path.join(BASE_DIR,'cox_regression','BRCA','coeffs_pvalues.txt'))
BRCA=[i.strip().split() for i in f]
BRCA.sort(cmp=compare3)
BRCA_dict_100={}
for i in BRCA[:100]:
BRCA_dict_100[i[0]]=''
f=open(os.path.join(BASE_DIR,'cox_regression','CESC','coeffs_pvalues.txt'))
CESC=[i.strip().split() for i in f]
CESC.sort(cmp=compare3)
CESC_dict_100={}
for i in CESC[:100]:
CESC_dict_100[i[0]]=''
f=open(os.path.join(BASE_DIR,'cox_regression','COAD','coeffs_pvalues.txt'))
COAD=[i.strip().split() for i in f]
COAD.sort(cmp=compare3)
COAD_dict_100={}
for i in COAD[:100]:
COAD_dict_100[i[0]]=''
f=open(os.path.join(BASE_DIR,'cox_regression','GBM','coeffs_pvalues.txt'))
GBM=[i.strip().split() for i in f]
GBM.sort(cmp=compare3)
GBM_dict_100={}
for i in GBM[:100]:
GBM_dict_100[i[0]]=''
f=open(os.path.join(BASE_DIR,'cox_regression','HNSC','coeffs_pvalues.txt'))
HNSC=[i.strip().split() for i in f]
HNSC.sort(cmp=compare3)
HNSC_dict_100={}
for i in HNSC[:100]:
HNSC_dict_100[i[0]]=''
f=open(os.path.join(BASE_DIR,'cox_regression','KIRC','coeffs_pvalues.txt'))
KIRC=[i.strip().split() for i in f]
KIRC.sort(cmp=compare3)
KIRC_dict_100={}
for i in KIRC[:100]:
KIRC_dict_100[i[0]]=''
f=open(os.path.join(BASE_DIR,'cox_regression','KIRP','coeffs_pvalues.txt'))
KIRP=[i.strip().split() for i in f]
KIRP.sort(cmp=compare3)
KIRP_dict_100={}
for i in KIRP[:100]:
KIRP_dict_100[i[0]]=''
f=open(os.path.join(BASE_DIR,'cox_regression','LAML','coeffs_pvalues.txt'))
LAML=[i.strip().split() for i in f]
LAML.sort(cmp=compare3)
LAML_dict_100={}
for i in LAML[:100]:
LAML_dict_100[i[0]]=''
f=open(os.path.join(BASE_DIR,'cox_regression','LIHC','coeffs_pvalues.txt'))
LIHC=[i.strip().split() for i in f]
LIHC.sort(cmp=compare3)
LIHC_dict_100={}
for i in LIHC[:100]:
LIHC_dict_100[i[0]]=''
f=open(os.path.join(BASE_DIR,'cox_regression','LUAD','coeffs_pvalues.txt'))
LUAD=[i.strip().split() for i in f]
LUAD.sort(cmp=compare3)
LUAD_dict_100={}
for i in LUAD[:100]:
LUAD_dict_100[i[0]]=''
f=open(os.path.join(BASE_DIR,'cox_regression','LUSC','coeffs_pvalues.txt'))
LUSC=[i.strip().split() for i in f]
LUSC.sort(cmp=compare3)
LUSC_dict_100={}
for i in LUSC[:100]:
LUSC_dict_100[i[0]]=''
f=open(os.path.join(BASE_DIR,'cox_regression','SKCM','coeffs_pvalues.txt'))
SKCM=[i.strip().split() for i in f]
SKCM.sort(cmp=compare3)
SKCM_dict_100={}
for i in SKCM[:100]:
SKCM_dict_100[i[0]]=''
f=open(os.path.join(BASE_DIR,'cox_regression','OV','coeffs_pvalues.txt'))
OV=[i.strip().split() for i in f]
OV.sort(cmp=compare3)
OV_dict_100={}
for i in OV[:100]:
OV_dict_100[i[0]]=''
f=open(os.path.join(BASE_DIR,'cox_regression','STAD','coeffs_pvalues.txt'))
STAD=[i.strip().split() for i in f]
STAD.sort(cmp=compare3)
STAD_dict_100={}
for i in STAD[:100]:
STAD_dict_100[i[0]]=''
all_cancers=[BLCA_dict_100,BRCA_dict_100,CESC_dict_100,COAD_dict_100,\
GBM_dict_100,HNSC_dict_100,KIRC_dict_100,KIRP_dict_100,LAML_dict_100,\
LGG_dict_100,LIHC_dict_100,LUAD_dict_100,LUSC_dict_100,OV_dict_100,\
SKCM_dict_100,STAD_dict_100]
final_array=[]
for i in all_cancers[::-1]:
temp=[]
for j in all_cancers[::-1]:
##compute overlap
temp.append(len([k for k in j if k in i]))
final_array.append(temp)
##create a custom colormap
blue_yellow_red = make_colormap({0:'w',.05:'#85A3E0',.1:'#3366CC',.2:'#00FF00',.3:'#FFFF66',0.4:'#FF9966', 1:'#CC3300'})
##plot
Z=np.array(final_array)
mask=np.tri(Z.shape[0],k=-1)
Z= np.ma.array(Z, mask=mask)
fig = plt.figure()
fig.subplots_adjust(bottom=.15)
fig.subplots_adjust(left=.15)
ax = fig.add_subplot(111)
figure=ax.imshow(Z,cmap=blue_yellow_red,interpolation="nearest")
cbar=fig.colorbar(figure,pad=.02)
cbar.ax.tick_params(labelsize=40)
cbar.set_label('number of genes', rotation=270,fontsize=80,labelpad=25)
ax.set_yticks([i for i in range(0,16)])
ax.set_yticklabels(['BLCA','BRCA','CESC','COAD','GBM','HNSC','KIRC','KIRP','LAML','LGG','LIHC','LUAD','LUSC','OV','SKCM','STAD'][::-1])
ax.tick_params(axis='y',labelsize=40)
ax.set_xticks([i for i in range(0,16)])
ax.set_xticklabels(['BLCA','BRCA','CESC','COAD','GBM','HNSC','KIRC','KIRP','LAML','LGG','LIHC','LUAD','LUSC','OV','SKCM','STAD'][::-1],rotation=90)
ax.tick_params(axis='x',labelsize=40)
ax.tick_params(axis='x',length=0,width=0)
ax.tick_params(axis='y',length=0,width=0)
ax.invert_yaxis()
ax.invert_xaxis()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
plt.show()
|
py | 1a366d126eb5a40c7f41663de5ed7853ac748721 | # Original Code: https://github.com/nrsyed/computer-vision/blob/master/multithread/CountsPerSec.py
# Modified for use in PyPotter
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from datetime import datetime
class CountsPerSec:
"""
Class that tracks the number of occurrences ("counts") of an
arbitrary event and returns the frequency in occurrences
(counts) per second. The caller must increment the count.
"""
def __init__(self):
self._SmoothingFactor = 90
self._timeList = []
def countsPerSec(self):
self._timeList.append(datetime.now())
if (len(self._timeList) > self._SmoothingFactor):
self._timeList.pop(0)
elapsed_time = (self._timeList[-1] - self._timeList[0]).total_seconds()
if (elapsed_time > 0):
return len(self._timeList) / elapsed_time
return 0 |
py | 1a366d482c579d0ce167da451c6863dcd97e1bbb | """
Shows 20 most important Amino acids, can be used to learn them.
"""
from setuptools import setup, find_packages
dependencies = ["pyqt5", "pandas"]
opt_dependencies = []
setup(
name="amino-acids-tutor",
version="1.0",
author="Luka Jeromel",
author_email="[email protected]",
description="Shows desired Amino Acid",
long_description=__doc__,
packages=find_packages(exclude=["tests"]),
# modules=["amino_acids"],
install_requires=dependencies,
install_extas=opt_dependencies,
entry_points={
# "console_scripts": ["luka-led-display=led_display.__main__:main"],
"gui_scripts": ["amino-acids=amino_acids.__main__:main"],
},
)
|
py | 1a366d5bec0f9cfce7263998e0296681759b90b4 | from fastapi.testclient import TestClient
from main import app
client = TestClient(app)
def test_vowels_post_expected_code():
response = client.post("/vowels/", json={"line": "HOLA"})
assert response.status_code == 200
def test_vowels_post_result():
response = client.post("/vowels/", json={"line": "HOLA"})
assert response.json() == {
"data": {"vowels_count": 2, "new_line": "HULE"},
"message": "success",
"code": 200,
}
|
py | 1a366fd1caec0443296511a0065ca83a7b5c4eb1 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
__all__ = ['Database']
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-native:sql:Database'.""", DeprecationWarning)
class Database(pulumi.CustomResource):
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-native:sql:Database'.""", DeprecationWarning)
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
collation: Optional[pulumi.Input[str]] = None,
create_mode: Optional[pulumi.Input[Union[str, 'CreateMode']]] = None,
database_name: Optional[pulumi.Input[str]] = None,
edition: Optional[pulumi.Input[Union[str, 'DatabaseEdition']]] = None,
elastic_pool_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
max_size_bytes: Optional[pulumi.Input[str]] = None,
read_scale: Optional[pulumi.Input[Union[str, 'ReadScale']]] = None,
recovery_services_recovery_point_resource_id: Optional[pulumi.Input[str]] = None,
requested_service_objective_id: Optional[pulumi.Input[str]] = None,
requested_service_objective_name: Optional[pulumi.Input[Union[str, 'ServiceObjectiveName']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
restore_point_in_time: Optional[pulumi.Input[str]] = None,
sample_name: Optional[pulumi.Input[Union[str, 'SampleName']]] = None,
server_name: Optional[pulumi.Input[str]] = None,
source_database_deletion_date: Optional[pulumi.Input[str]] = None,
source_database_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
zone_redundant: Optional[pulumi.Input[bool]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Represents a database.
Latest API Version: 2014-04-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] collation: The collation of the database. If createMode is not Default, this value is ignored.
:param pulumi.Input[Union[str, 'CreateMode']] create_mode: Specifies the mode of database creation.
Default: regular database creation.
Copy: creates a database as a copy of an existing database. sourceDatabaseId must be specified as the resource ID of the source database.
OnlineSecondary/NonReadableSecondary: creates a database as a (readable or nonreadable) secondary replica of an existing database. sourceDatabaseId must be specified as the resource ID of the existing primary database.
PointInTimeRestore: Creates a database by restoring a point in time backup of an existing database. sourceDatabaseId must be specified as the resource ID of the existing database, and restorePointInTime must be specified.
Recovery: Creates a database by restoring a geo-replicated backup. sourceDatabaseId must be specified as the recoverable database resource ID to restore.
Restore: Creates a database by restoring a backup of a deleted database. sourceDatabaseId must be specified. If sourceDatabaseId is the database's original resource ID, then sourceDatabaseDeletionDate must be specified. Otherwise sourceDatabaseId must be the restorable dropped database resource ID and sourceDatabaseDeletionDate is ignored. restorePointInTime may also be specified to restore from an earlier point in time.
RestoreLongTermRetentionBackup: Creates a database by restoring from a long term retention vault. recoveryServicesRecoveryPointResourceId must be specified as the recovery point resource ID.
Copy, NonReadableSecondary, OnlineSecondary and RestoreLongTermRetentionBackup are not supported for DataWarehouse edition.
:param pulumi.Input[str] database_name: The name of the database to be operated on (updated or created).
:param pulumi.Input[Union[str, 'DatabaseEdition']] edition: The edition of the database. The DatabaseEditions enumeration contains all the valid editions. If createMode is NonReadableSecondary or OnlineSecondary, this value is ignored.
The list of SKUs may vary by region and support offer. To determine the SKUs (including the SKU name, tier/edition, family, and capacity) that are available to your subscription in an Azure region, use the `Capabilities_ListByLocation` REST API or one of the following commands:
```azurecli
az sql db list-editions -l <location> -o table
````
```powershell
Get-AzSqlServerServiceObjective -Location <location>
````
:param pulumi.Input[str] elastic_pool_name: The name of the elastic pool the database is in. If elasticPoolName and requestedServiceObjectiveName are both updated, the value of requestedServiceObjectiveName is ignored. Not supported for DataWarehouse edition.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] max_size_bytes: The max size of the database expressed in bytes. If createMode is not Default, this value is ignored. To see possible values, query the capabilities API (/subscriptions/{subscriptionId}/providers/Microsoft.Sql/locations/{locationID}/capabilities) referred to by operationId: "Capabilities_ListByLocation."
:param pulumi.Input[Union[str, 'ReadScale']] read_scale: Conditional. If the database is a geo-secondary, readScale indicates whether read-only connections are allowed to this database or not. Not supported for DataWarehouse edition.
:param pulumi.Input[str] recovery_services_recovery_point_resource_id: Conditional. If createMode is RestoreLongTermRetentionBackup, then this value is required. Specifies the resource ID of the recovery point to restore from.
:param pulumi.Input[str] requested_service_objective_id: The configured service level objective ID of the database. This is the service level objective that is in the process of being applied to the database. Once successfully updated, it will match the value of currentServiceObjectiveId property. If requestedServiceObjectiveId and requestedServiceObjectiveName are both updated, the value of requestedServiceObjectiveId overrides the value of requestedServiceObjectiveName.
The list of SKUs may vary by region and support offer. To determine the service objective ids that are available to your subscription in an Azure region, use the `Capabilities_ListByLocation` REST API.
:param pulumi.Input[Union[str, 'ServiceObjectiveName']] requested_service_objective_name: The name of the configured service level objective of the database. This is the service level objective that is in the process of being applied to the database. Once successfully updated, it will match the value of serviceLevelObjective property.
The list of SKUs may vary by region and support offer. To determine the SKUs (including the SKU name, tier/edition, family, and capacity) that are available to your subscription in an Azure region, use the `Capabilities_ListByLocation` REST API or one of the following commands:
```azurecli
az sql db list-editions -l <location> -o table
````
```powershell
Get-AzSqlServerServiceObjective -Location <location>
````
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param pulumi.Input[str] restore_point_in_time: Conditional. If createMode is PointInTimeRestore, this value is required. If createMode is Restore, this value is optional. Specifies the point in time (ISO8601 format) of the source database that will be restored to create the new database. Must be greater than or equal to the source database's earliestRestoreDate value.
:param pulumi.Input[Union[str, 'SampleName']] sample_name: Indicates the name of the sample schema to apply when creating this database. If createMode is not Default, this value is ignored. Not supported for DataWarehouse edition.
:param pulumi.Input[str] server_name: The name of the server.
:param pulumi.Input[str] source_database_deletion_date: Conditional. If createMode is Restore and sourceDatabaseId is the deleted database's original resource id when it existed (as opposed to its current restorable dropped database id), then this value is required. Specifies the time that the database was deleted.
:param pulumi.Input[str] source_database_id: Conditional. If createMode is Copy, NonReadableSecondary, OnlineSecondary, PointInTimeRestore, Recovery, or Restore, then this value is required. Specifies the resource ID of the source database. If createMode is NonReadableSecondary or OnlineSecondary, the name of the source database must be the same as the new database being created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[bool] zone_redundant: Whether or not this database is zone redundant, which means the replicas of this database will be spread across multiple availability zones.
"""
pulumi.log.warn("""Database is deprecated: The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-native:sql:Database'.""")
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['collation'] = collation
__props__['create_mode'] = create_mode
__props__['database_name'] = database_name
__props__['edition'] = edition
__props__['elastic_pool_name'] = elastic_pool_name
__props__['location'] = location
__props__['max_size_bytes'] = max_size_bytes
__props__['read_scale'] = read_scale
__props__['recovery_services_recovery_point_resource_id'] = recovery_services_recovery_point_resource_id
__props__['requested_service_objective_id'] = requested_service_objective_id
__props__['requested_service_objective_name'] = requested_service_objective_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['restore_point_in_time'] = restore_point_in_time
__props__['sample_name'] = sample_name
if server_name is None and not opts.urn:
raise TypeError("Missing required property 'server_name'")
__props__['server_name'] = server_name
__props__['source_database_deletion_date'] = source_database_deletion_date
__props__['source_database_id'] = source_database_id
__props__['tags'] = tags
__props__['zone_redundant'] = zone_redundant
__props__['containment_state'] = None
__props__['creation_date'] = None
__props__['current_service_objective_id'] = None
__props__['database_id'] = None
__props__['default_secondary_location'] = None
__props__['earliest_restore_date'] = None
__props__['failover_group_id'] = None
__props__['kind'] = None
__props__['name'] = None
__props__['recommended_index'] = None
__props__['service_level_objective'] = None
__props__['service_tier_advisors'] = None
__props__['status'] = None
__props__['transparent_data_encryption'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:sql/latest:Database"), pulumi.Alias(type_="azure-native:sql:Database"), pulumi.Alias(type_="azure-nextgen:sql:Database"), pulumi.Alias(type_="azure-native:sql/v20140401:Database"), pulumi.Alias(type_="azure-nextgen:sql/v20140401:Database"), pulumi.Alias(type_="azure-native:sql/v20170301preview:Database"), pulumi.Alias(type_="azure-nextgen:sql/v20170301preview:Database"), pulumi.Alias(type_="azure-native:sql/v20171001preview:Database"), pulumi.Alias(type_="azure-nextgen:sql/v20171001preview:Database"), pulumi.Alias(type_="azure-native:sql/v20190601preview:Database"), pulumi.Alias(type_="azure-nextgen:sql/v20190601preview:Database"), pulumi.Alias(type_="azure-native:sql/v20200202preview:Database"), pulumi.Alias(type_="azure-nextgen:sql/v20200202preview:Database"), pulumi.Alias(type_="azure-native:sql/v20200801preview:Database"), pulumi.Alias(type_="azure-nextgen:sql/v20200801preview:Database")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Database, __self__).__init__(
'azure-native:sql/latest:Database',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Database':
"""
Get an existing Database resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["collation"] = None
__props__["containment_state"] = None
__props__["create_mode"] = None
__props__["creation_date"] = None
__props__["current_service_objective_id"] = None
__props__["database_id"] = None
__props__["default_secondary_location"] = None
__props__["earliest_restore_date"] = None
__props__["edition"] = None
__props__["elastic_pool_name"] = None
__props__["failover_group_id"] = None
__props__["kind"] = None
__props__["location"] = None
__props__["max_size_bytes"] = None
__props__["name"] = None
__props__["read_scale"] = None
__props__["recommended_index"] = None
__props__["recovery_services_recovery_point_resource_id"] = None
__props__["requested_service_objective_id"] = None
__props__["requested_service_objective_name"] = None
__props__["restore_point_in_time"] = None
__props__["sample_name"] = None
__props__["service_level_objective"] = None
__props__["service_tier_advisors"] = None
__props__["source_database_deletion_date"] = None
__props__["source_database_id"] = None
__props__["status"] = None
__props__["tags"] = None
__props__["transparent_data_encryption"] = None
__props__["type"] = None
__props__["zone_redundant"] = None
return Database(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def collation(self) -> pulumi.Output[Optional[str]]:
"""
The collation of the database. If createMode is not Default, this value is ignored.
"""
return pulumi.get(self, "collation")
@property
@pulumi.getter(name="containmentState")
def containment_state(self) -> pulumi.Output[float]:
"""
The containment state of the database.
"""
return pulumi.get(self, "containment_state")
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the mode of database creation.
Default: regular database creation.
Copy: creates a database as a copy of an existing database. sourceDatabaseId must be specified as the resource ID of the source database.
OnlineSecondary/NonReadableSecondary: creates a database as a (readable or nonreadable) secondary replica of an existing database. sourceDatabaseId must be specified as the resource ID of the existing primary database.
PointInTimeRestore: Creates a database by restoring a point in time backup of an existing database. sourceDatabaseId must be specified as the resource ID of the existing database, and restorePointInTime must be specified.
Recovery: Creates a database by restoring a geo-replicated backup. sourceDatabaseId must be specified as the recoverable database resource ID to restore.
Restore: Creates a database by restoring a backup of a deleted database. sourceDatabaseId must be specified. If sourceDatabaseId is the database's original resource ID, then sourceDatabaseDeletionDate must be specified. Otherwise sourceDatabaseId must be the restorable dropped database resource ID and sourceDatabaseDeletionDate is ignored. restorePointInTime may also be specified to restore from an earlier point in time.
RestoreLongTermRetentionBackup: Creates a database by restoring from a long term retention vault. recoveryServicesRecoveryPointResourceId must be specified as the recovery point resource ID.
Copy, NonReadableSecondary, OnlineSecondary and RestoreLongTermRetentionBackup are not supported for DataWarehouse edition.
"""
return pulumi.get(self, "create_mode")
@property
@pulumi.getter(name="creationDate")
def creation_date(self) -> pulumi.Output[str]:
"""
The creation date of the database (ISO8601 format).
"""
return pulumi.get(self, "creation_date")
@property
@pulumi.getter(name="currentServiceObjectiveId")
def current_service_objective_id(self) -> pulumi.Output[str]:
"""
The current service level objective ID of the database. This is the ID of the service level objective that is currently active.
"""
return pulumi.get(self, "current_service_objective_id")
@property
@pulumi.getter(name="databaseId")
def database_id(self) -> pulumi.Output[str]:
"""
The ID of the database.
"""
return pulumi.get(self, "database_id")
@property
@pulumi.getter(name="defaultSecondaryLocation")
def default_secondary_location(self) -> pulumi.Output[str]:
"""
The default secondary region for this database.
"""
return pulumi.get(self, "default_secondary_location")
@property
@pulumi.getter(name="earliestRestoreDate")
def earliest_restore_date(self) -> pulumi.Output[str]:
"""
This records the earliest start date and time that restore is available for this database (ISO8601 format).
"""
return pulumi.get(self, "earliest_restore_date")
@property
@pulumi.getter
def edition(self) -> pulumi.Output[Optional[str]]:
"""
The edition of the database. The DatabaseEditions enumeration contains all the valid editions. If createMode is NonReadableSecondary or OnlineSecondary, this value is ignored.
The list of SKUs may vary by region and support offer. To determine the SKUs (including the SKU name, tier/edition, family, and capacity) that are available to your subscription in an Azure region, use the `Capabilities_ListByLocation` REST API or one of the following commands:
```azurecli
az sql db list-editions -l <location> -o table
````
```powershell
Get-AzSqlServerServiceObjective -Location <location>
````
"""
return pulumi.get(self, "edition")
@property
@pulumi.getter(name="elasticPoolName")
def elastic_pool_name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the elastic pool the database is in. If elasticPoolName and requestedServiceObjectiveName are both updated, the value of requestedServiceObjectiveName is ignored. Not supported for DataWarehouse edition.
"""
return pulumi.get(self, "elastic_pool_name")
@property
@pulumi.getter(name="failoverGroupId")
def failover_group_id(self) -> pulumi.Output[str]:
"""
The resource identifier of the failover group containing this database.
"""
return pulumi.get(self, "failover_group_id")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
Kind of database. This is metadata used for the Azure portal experience.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="maxSizeBytes")
def max_size_bytes(self) -> pulumi.Output[Optional[str]]:
"""
The max size of the database expressed in bytes. If createMode is not Default, this value is ignored. To see possible values, query the capabilities API (/subscriptions/{subscriptionId}/providers/Microsoft.Sql/locations/{locationID}/capabilities) referred to by operationId: "Capabilities_ListByLocation."
"""
return pulumi.get(self, "max_size_bytes")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="readScale")
def read_scale(self) -> pulumi.Output[Optional[str]]:
"""
Conditional. If the database is a geo-secondary, readScale indicates whether read-only connections are allowed to this database or not. Not supported for DataWarehouse edition.
"""
return pulumi.get(self, "read_scale")
@property
@pulumi.getter(name="recommendedIndex")
def recommended_index(self) -> pulumi.Output[Sequence['outputs.RecommendedIndexResponse']]:
"""
The recommended indices for this database.
"""
return pulumi.get(self, "recommended_index")
@property
@pulumi.getter(name="recoveryServicesRecoveryPointResourceId")
def recovery_services_recovery_point_resource_id(self) -> pulumi.Output[Optional[str]]:
"""
Conditional. If createMode is RestoreLongTermRetentionBackup, then this value is required. Specifies the resource ID of the recovery point to restore from.
"""
return pulumi.get(self, "recovery_services_recovery_point_resource_id")
@property
@pulumi.getter(name="requestedServiceObjectiveId")
def requested_service_objective_id(self) -> pulumi.Output[Optional[str]]:
"""
The configured service level objective ID of the database. This is the service level objective that is in the process of being applied to the database. Once successfully updated, it will match the value of currentServiceObjectiveId property. If requestedServiceObjectiveId and requestedServiceObjectiveName are both updated, the value of requestedServiceObjectiveId overrides the value of requestedServiceObjectiveName.
The list of SKUs may vary by region and support offer. To determine the service objective ids that are available to your subscription in an Azure region, use the `Capabilities_ListByLocation` REST API.
"""
return pulumi.get(self, "requested_service_objective_id")
@property
@pulumi.getter(name="requestedServiceObjectiveName")
def requested_service_objective_name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the configured service level objective of the database. This is the service level objective that is in the process of being applied to the database. Once successfully updated, it will match the value of serviceLevelObjective property.
The list of SKUs may vary by region and support offer. To determine the SKUs (including the SKU name, tier/edition, family, and capacity) that are available to your subscription in an Azure region, use the `Capabilities_ListByLocation` REST API or one of the following commands:
```azurecli
az sql db list-editions -l <location> -o table
````
```powershell
Get-AzSqlServerServiceObjective -Location <location>
````
"""
return pulumi.get(self, "requested_service_objective_name")
@property
@pulumi.getter(name="restorePointInTime")
def restore_point_in_time(self) -> pulumi.Output[Optional[str]]:
"""
Conditional. If createMode is PointInTimeRestore, this value is required. If createMode is Restore, this value is optional. Specifies the point in time (ISO8601 format) of the source database that will be restored to create the new database. Must be greater than or equal to the source database's earliestRestoreDate value.
"""
return pulumi.get(self, "restore_point_in_time")
@property
@pulumi.getter(name="sampleName")
def sample_name(self) -> pulumi.Output[Optional[str]]:
"""
Indicates the name of the sample schema to apply when creating this database. If createMode is not Default, this value is ignored. Not supported for DataWarehouse edition.
"""
return pulumi.get(self, "sample_name")
@property
@pulumi.getter(name="serviceLevelObjective")
def service_level_objective(self) -> pulumi.Output[str]:
"""
The current service level objective of the database.
"""
return pulumi.get(self, "service_level_objective")
@property
@pulumi.getter(name="serviceTierAdvisors")
def service_tier_advisors(self) -> pulumi.Output[Sequence['outputs.ServiceTierAdvisorResponse']]:
"""
The list of service tier advisors for this database. Expanded property
"""
return pulumi.get(self, "service_tier_advisors")
@property
@pulumi.getter(name="sourceDatabaseDeletionDate")
def source_database_deletion_date(self) -> pulumi.Output[Optional[str]]:
"""
Conditional. If createMode is Restore and sourceDatabaseId is the deleted database's original resource id when it existed (as opposed to its current restorable dropped database id), then this value is required. Specifies the time that the database was deleted.
"""
return pulumi.get(self, "source_database_deletion_date")
@property
@pulumi.getter(name="sourceDatabaseId")
def source_database_id(self) -> pulumi.Output[Optional[str]]:
"""
Conditional. If createMode is Copy, NonReadableSecondary, OnlineSecondary, PointInTimeRestore, Recovery, or Restore, then this value is required. Specifies the resource ID of the source database. If createMode is NonReadableSecondary or OnlineSecondary, the name of the source database must be the same as the new database being created.
"""
return pulumi.get(self, "source_database_id")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
The status of the database.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="transparentDataEncryption")
def transparent_data_encryption(self) -> pulumi.Output[Sequence['outputs.TransparentDataEncryptionResponse']]:
"""
The transparent data encryption info for this database.
"""
return pulumi.get(self, "transparent_data_encryption")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="zoneRedundant")
def zone_redundant(self) -> pulumi.Output[Optional[bool]]:
"""
Whether or not this database is zone redundant, which means the replicas of this database will be spread across multiple availability zones.
"""
return pulumi.get(self, "zone_redundant")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
py | 1a3670b198e4d2cf9f17e17d293a06a11a8dc72d | import datetime
import jwt
from app.core import config
'''
JWT RFC:
https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-25
sub - Subject of the JWT (Volunteer).
exp - Expiration time in which the JWT token will be invalid by the server.
iat - Issue time, identifies the time at which the JWT as issued.
iss - Issuer of the JWT.
'''
def create_access_token(token: str, exipres_delta: datetime.timedelta = datetime.timedelta(days=config.ACCESS_TOKEN_EXPIRE_DAYS)):
data = {"sub": token,
"exp": datetime.datetime.utcnow() + exipres_delta,
"iat": datetime.datetime.utcnow(),
"iss": config.SERVER_HOST,
}
return jwt.encode(data, key=config.JWT_SECRET_KEY, algorithm=config.JWT_ALGORITHM)
|
py | 1a36724a50261a651bb9f6199b49a0ec8b8cdf84 | import os
import uuid
from mlflow.entities import Experiment, Metric, Param, Run, RunData, RunInfo, RunStatus, RunTag, \
ViewType
from mlflow.store.abstract_store import AbstractStore
from mlflow.utils.validation import _validate_metric_name, _validate_param_name, _validate_run_id, \
_validate_tag_name
from mlflow.utils.env import get_env
from mlflow.utils.file_utils import (is_directory, list_subdirs, mkdir, exists, write_yaml,
read_yaml, find, read_file, build_path, write_to, append_to,
make_containing_dirs, mv)
from mlflow.utils.mlflow_tags import MLFLOW_RUN_NAME
from mlflow.utils.search_utils import does_run_match_clause
_TRACKING_DIR_ENV_VAR = "MLFLOW_TRACKING_DIR"
def _default_root_dir():
return get_env(_TRACKING_DIR_ENV_VAR) or os.path.abspath("mlruns")
class FileStore(AbstractStore):
TRASH_FOLDER_NAME = ".trash"
ARTIFACTS_FOLDER_NAME = "artifacts"
METRICS_FOLDER_NAME = "metrics"
PARAMS_FOLDER_NAME = "params"
TAGS_FOLDER_NAME = "tags"
META_DATA_FILE_NAME = "meta.yaml"
def __init__(self, root_directory=None, artifact_root_uri=None):
"""
Create a new FileStore with the given root directory and a given default artifact root URI.
"""
super(FileStore, self).__init__()
self.root_directory = root_directory or _default_root_dir()
self.artifact_root_uri = artifact_root_uri or self.root_directory
self.trash_folder = build_path(self.root_directory, FileStore.TRASH_FOLDER_NAME)
# Create root directory if needed
if not exists(self.root_directory):
mkdir(self.root_directory)
# Create trash folder if needed
if not exists(self.trash_folder):
mkdir(self.trash_folder)
# Create default experiment if needed
if not self._has_experiment(experiment_id=Experiment.DEFAULT_EXPERIMENT_ID):
self._create_experiment_with_id(name="Default",
experiment_id=Experiment.DEFAULT_EXPERIMENT_ID,
artifact_uri=None)
def _check_root_dir(self):
"""
Run checks before running directory operations.
"""
if not exists(self.root_directory):
raise Exception("'%s' does not exist." % self.root_directory)
if not is_directory(self.root_directory):
raise Exception("'%s' is not a directory." % self.root_directory)
def _get_experiment_path(self, experiment_id, view_type=ViewType.ALL):
parents = []
if view_type == ViewType.ACTIVE_ONLY or view_type == ViewType.ALL:
parents.append(self.root_directory)
if view_type == ViewType.DELETED_ONLY or view_type == ViewType.ALL:
parents.append(self.trash_folder)
for parent in parents:
exp_list = find(parent, str(experiment_id), full_path=True)
if len(exp_list) > 0:
return exp_list
return []
def _get_run_dir(self, experiment_id, run_uuid):
_validate_run_id(run_uuid)
return build_path(self._get_experiment_path(experiment_id)[0], run_uuid)
def _get_metric_path(self, experiment_id, run_uuid, metric_key):
_validate_run_id(run_uuid)
_validate_metric_name(metric_key)
return build_path(self._get_run_dir(experiment_id, run_uuid), FileStore.METRICS_FOLDER_NAME,
metric_key)
def _get_param_path(self, experiment_id, run_uuid, param_name):
_validate_run_id(run_uuid)
_validate_param_name(param_name)
return build_path(self._get_run_dir(experiment_id, run_uuid), FileStore.PARAMS_FOLDER_NAME,
param_name)
def _get_tag_path(self, experiment_id, run_uuid, tag_name):
_validate_run_id(run_uuid)
_validate_tag_name(tag_name)
return build_path(self._get_run_dir(experiment_id, run_uuid), FileStore.TAGS_FOLDER_NAME,
tag_name)
def _get_artifact_dir(self, experiment_id, run_uuid):
_validate_run_id(run_uuid)
artifacts_dir = build_path(self.get_experiment(experiment_id).artifact_location,
run_uuid,
FileStore.ARTIFACTS_FOLDER_NAME)
return artifacts_dir
def _get_active_experiments(self, full_path=False):
exp_list = list_subdirs(self.root_directory, full_path)
return [exp for exp in exp_list if not exp.endswith(FileStore.TRASH_FOLDER_NAME)]
def _get_deleted_experiments(self, full_path=False):
return list_subdirs(self.trash_folder, full_path)
def list_experiments(self, view_type=ViewType.ACTIVE_ONLY):
self._check_root_dir()
rsl = []
if view_type == ViewType.ACTIVE_ONLY or view_type == ViewType.ALL:
rsl += self._get_active_experiments(full_path=False)
if view_type == ViewType.DELETED_ONLY or view_type == ViewType.ALL:
rsl += self._get_deleted_experiments(full_path=False)
return [self._get_experiment(exp_id, view_type) for exp_id in rsl]
def _create_experiment_with_id(self, name, experiment_id, artifact_uri):
self._check_root_dir()
meta_dir = mkdir(self.root_directory, str(experiment_id))
artifact_uri = artifact_uri or build_path(self.artifact_root_uri, str(experiment_id))
experiment = Experiment(experiment_id, name, artifact_uri)
write_yaml(meta_dir, FileStore.META_DATA_FILE_NAME, dict(experiment))
return experiment_id
def create_experiment(self, name, artifact_location=None):
self._check_root_dir()
if name is None or name == "":
raise Exception("Invalid experiment name '%s'" % name)
experiment = self.get_experiment_by_name(name)
if experiment is not None:
raise Exception("Experiment '%s' already exists." % experiment.name)
# Get all existing experiments and find the one with largest ID.
# len(list_all(..)) would not work when experiments are deleted.
experiments_ids = [e.experiment_id for e in self.list_experiments(ViewType.ALL)]
experiment_id = max(experiments_ids) + 1
return self._create_experiment_with_id(name, experiment_id, artifact_location)
def _has_experiment(self, experiment_id):
return len(self._get_experiment_path(experiment_id)) > 0
def _get_experiment(self, experiment_id, view_type=ViewType.ALL):
self._check_root_dir()
experiment_dirs = self._get_experiment_path(experiment_id, view_type)
if len(experiment_dirs) == 0:
raise Exception("Could not find experiment with ID %s" % experiment_id)
meta = read_yaml(experiment_dirs[0], FileStore.META_DATA_FILE_NAME)
return Experiment.from_dictionary(meta)
def get_experiment(self, experiment_id):
"""
Fetches the experiment. This will search for active as well as deleted experiments.
:param experiment_id: Integer id for the experiment
:return: A single Experiment object if it exists, otherwise raises an Exception.
"""
return self._get_experiment(experiment_id)
def get_experiment_by_name(self, name):
self._check_root_dir()
for experiment in self.list_experiments(ViewType.ALL):
if experiment.name == name:
return experiment
return None
def delete_experiment(self, experiment_id):
experiment_dirs = self._get_experiment_path(experiment_id, ViewType.ACTIVE_ONLY)
if len(experiment_dirs) == 0:
raise Exception("Could not find experiment with ID %s" % experiment_id)
mv(experiment_dirs[0], self.trash_folder)
def restore_experiment(self, experiment_id):
experiment_dirs = self._get_experiment_path(experiment_id, ViewType.DELETED_ONLY)
if len(experiment_dirs) == 0:
raise Exception("Could not find deleted experiment with ID %d" % experiment_id)
conflict_experiment = self._get_experiment_path(experiment_id, ViewType.ACTIVE_ONLY)
if len(conflict_experiment) > 0:
raise Exception("Cannot restore eperiment with ID %d. "
"An experiment with same ID already exists." % experiment_id)
mv(experiment_dirs[0], self.root_directory)
def _find_run_root(self, run_uuid):
_validate_run_id(run_uuid)
self._check_root_dir()
all_experiments = self._get_active_experiments(True) + self._get_deleted_experiments(True)
for experiment_dir in all_experiments:
runs = find(experiment_dir, run_uuid, full_path=True)
if len(runs) == 0:
continue
return runs[0]
return None
def update_run_info(self, run_uuid, run_status, end_time):
_validate_run_id(run_uuid)
run_info = self.get_run(run_uuid).info
new_info = run_info.copy_with_overrides(run_status, end_time)
run_dir = self._get_run_dir(run_info.experiment_id, run_info.run_uuid)
new_info_dict = self._make_run_info_dict(new_info)
write_yaml(run_dir, FileStore.META_DATA_FILE_NAME, new_info_dict, overwrite=True)
return new_info
def create_run(self, experiment_id, user_id, run_name, source_type,
source_name, entry_point_name, start_time, source_version, tags):
"""
Creates a run with the specified attributes.
"""
if self.get_experiment(experiment_id) is None:
raise Exception("Could not create run under experiment with ID %s - no such experiment "
"exists." % experiment_id)
run_uuid = uuid.uuid4().hex
artifact_uri = self._get_artifact_dir(experiment_id, run_uuid)
run_info = RunInfo(run_uuid=run_uuid, experiment_id=experiment_id,
name="",
artifact_uri=artifact_uri, source_type=source_type,
source_name=source_name,
entry_point_name=entry_point_name, user_id=user_id,
status=RunStatus.RUNNING, start_time=start_time, end_time=None,
source_version=source_version)
# Persist run metadata and create directories for logging metrics, parameters, artifacts
run_dir = self._get_run_dir(run_info.experiment_id, run_info.run_uuid)
mkdir(run_dir)
write_yaml(run_dir, FileStore.META_DATA_FILE_NAME, self._make_run_info_dict(run_info))
mkdir(run_dir, FileStore.METRICS_FOLDER_NAME)
mkdir(run_dir, FileStore.PARAMS_FOLDER_NAME)
mkdir(run_dir, FileStore.ARTIFACTS_FOLDER_NAME)
for tag in tags:
self.set_tag(run_uuid, tag)
if run_name:
self.set_tag(run_uuid, RunTag(key=MLFLOW_RUN_NAME, value=run_name))
return Run(run_info=run_info, run_data=None)
def _make_run_info_dict(self, run_info):
# 'tags' was moved from RunInfo to RunData, so we must keep storing it in the meta.yaml for
# old mlflow versions to read
run_info_dict = dict(run_info)
run_info_dict['tags'] = []
return run_info_dict
def get_run(self, run_uuid):
_validate_run_id(run_uuid)
run_dir = self._find_run_root(run_uuid)
if run_dir is None:
raise Exception("Run '%s' not found" % run_uuid)
run_info = self.get_run_info(run_dir)
metrics = self.get_all_metrics(run_uuid)
params = self.get_all_params(run_uuid)
tags = self.get_all_tags(run_uuid)
return Run(run_info, RunData(metrics, params, tags))
@staticmethod
def get_run_info(run_dir):
meta = read_yaml(run_dir, FileStore.META_DATA_FILE_NAME)
return RunInfo.from_dictionary(meta)
def _get_run_files(self, run_uuid, resource_type):
_validate_run_id(run_uuid)
if resource_type == "metric":
subfolder_name = FileStore.METRICS_FOLDER_NAME
elif resource_type == "param":
subfolder_name = FileStore.PARAMS_FOLDER_NAME
elif resource_type == "tag":
subfolder_name = FileStore.TAGS_FOLDER_NAME
else:
raise Exception("Looking for unknown resource under run.")
run_dir = self._find_run_root(run_uuid)
if run_dir is None:
raise Exception("Run '%s' not found" % run_uuid)
source_dirs = find(run_dir, subfolder_name, full_path=True)
if len(source_dirs) == 0:
return run_dir, []
file_names = []
for root, _, files in os.walk(source_dirs[0]):
for name in files:
abspath = os.path.join(root, name)
file_names.append(os.path.relpath(abspath, source_dirs[0]))
return source_dirs[0], file_names
@staticmethod
def _get_metric_from_file(parent_path, metric_name):
_validate_metric_name(metric_name)
metric_data = read_file(parent_path, metric_name)
if len(metric_data) == 0:
raise Exception("Metric '%s' is malformed. No data found." % metric_name)
last_line = metric_data[-1]
timestamp, val = last_line.strip().split(" ")
return Metric(metric_name, float(val), int(timestamp))
def get_metric(self, run_uuid, metric_key):
_validate_run_id(run_uuid)
_validate_metric_name(metric_key)
parent_path, metric_files = self._get_run_files(run_uuid, "metric")
if metric_key not in metric_files:
raise Exception("Metric '%s' not found under run '%s'" % (metric_key, run_uuid))
return self._get_metric_from_file(parent_path, metric_key)
def get_all_metrics(self, run_uuid):
_validate_run_id(run_uuid)
parent_path, metric_files = self._get_run_files(run_uuid, "metric")
metrics = []
for metric_file in metric_files:
metrics.append(self._get_metric_from_file(parent_path, metric_file))
return metrics
def get_metric_history(self, run_uuid, metric_key):
_validate_run_id(run_uuid)
_validate_metric_name(metric_key)
parent_path, metric_files = self._get_run_files(run_uuid, "metric")
if metric_key not in metric_files:
raise Exception("Metric '%s' not found under run '%s'" % (metric_key, run_uuid))
metric_data = read_file(parent_path, metric_key)
rsl = []
for pair in metric_data:
ts, val = pair.strip().split(" ")
rsl.append(Metric(metric_key, float(val), int(ts)))
return rsl
@staticmethod
def _get_param_from_file(parent_path, param_name):
_validate_param_name(param_name)
param_data = read_file(parent_path, param_name)
if len(param_data) == 0:
raise Exception("Param '%s' is malformed. No data found." % param_name)
if len(param_data) > 1:
raise Exception("Unexpected data for param '%s'. Param recorded more than once"
% param_name)
return Param(param_name, str(param_data[0].strip()))
@staticmethod
def _get_tag_from_file(parent_path, tag_name):
_validate_tag_name(tag_name)
tag_data = read_file(parent_path, tag_name)
if len(tag_data) == 0:
raise Exception("Tag '%s' is malformed. No data found." % tag_name)
if len(tag_data) > 1:
raise Exception("Unexpected data for tag '%s'. Tag recorded more than once"
% tag_name)
return RunTag(tag_name, str(tag_data[0].strip()))
def get_param(self, run_uuid, param_name):
_validate_run_id(run_uuid)
_validate_param_name(param_name)
parent_path, param_files = self._get_run_files(run_uuid, "param")
if param_name not in param_files:
raise Exception("Param '%s' not found under run '%s'" % (param_name, run_uuid))
return self._get_param_from_file(parent_path, param_name)
def get_all_params(self, run_uuid):
parent_path, param_files = self._get_run_files(run_uuid, "param")
params = []
for param_file in param_files:
params.append(self._get_param_from_file(parent_path, param_file))
return params
def get_all_tags(self, run_uuid):
parent_path, tag_files = self._get_run_files(run_uuid, "tag")
tags = []
for tag_file in tag_files:
tags.append(self._get_tag_from_file(parent_path, tag_file))
return tags
def _list_run_uuids(self, experiment_id):
self._check_root_dir()
experiment_dir = self._get_experiment_path(experiment_id)[0]
return list_subdirs(experiment_dir, full_path=False)
def search_runs(self, experiment_ids, search_expressions):
run_uuids = []
if len(search_expressions) == 0:
for experiment_id in experiment_ids:
run_uuids.extend(self._list_run_uuids(experiment_id))
else:
for experiment_id in experiment_ids:
for run_uuid in self._list_run_uuids(experiment_id):
run = self.get_run(run_uuid)
if all([does_run_match_clause(run, s) for s in search_expressions]):
run_uuids.append(run_uuid)
return [self.get_run(run_uuid) for run_uuid in run_uuids]
def list_run_infos(self, experiment_id):
run_infos = []
for run_uuid in self._list_run_uuids(experiment_id):
run_infos.append(self.get_run_info(self._get_run_dir(experiment_id, run_uuid)))
return run_infos
def log_metric(self, run_uuid, metric):
_validate_run_id(run_uuid)
_validate_metric_name(metric.key)
run = self.get_run(run_uuid)
metric_path = self._get_metric_path(run.info.experiment_id, run_uuid, metric.key)
make_containing_dirs(metric_path)
append_to(metric_path, "%s %s\n" % (metric.timestamp, metric.value))
def log_param(self, run_uuid, param):
_validate_run_id(run_uuid)
_validate_param_name(param.key)
run = self.get_run(run_uuid)
param_path = self._get_param_path(run.info.experiment_id, run_uuid, param.key)
make_containing_dirs(param_path)
write_to(param_path, "%s\n" % param.value)
def set_tag(self, run_uuid, tag):
_validate_run_id(run_uuid)
_validate_tag_name(tag.key)
run = self.get_run(run_uuid)
tag_path = self._get_tag_path(run.info.experiment_id, run_uuid, tag.key)
make_containing_dirs(tag_path)
write_to(tag_path, "%s\n" % tag.value)
|
py | 1a3672ecab104aa386577326791c165ebbd27426 | import logging
from pprint import pprint # noqa
from followthemoney import model
from followthemoney.types import registry
from followthemoney.compare import compare
from aleph.core import db, es, celery
from aleph.model import Match
from aleph.index.indexes import entities_read_index
from aleph.index.entities import iter_proxies, entities_by_ids
from aleph.logic.entities.match import match_query
from aleph.index.util import unpack_result, none_query
from aleph.index.util import BULK_PAGE
from aleph.index.collections import get_collection
from aleph.logic.util import entity_url
log = logging.getLogger(__name__)
SCORE_CUTOFF = 0.05
def xref_item(proxy, collection_ids=None):
"""Cross-reference an entity or document, given as an indexed document."""
query = match_query(proxy, collection_ids=collection_ids)
if query == none_query():
return
query = {
'query': query,
'size': 100,
'_source': {'includes': ['schema', 'properties', 'collection_id']}
}
matchable = list(proxy.schema.matchable_schemata)
index = entities_read_index(schema=matchable)
result = es.search(index=index, body=query)
results = result.get('hits').get('hits')
for result in results:
result = unpack_result(result)
if result is not None:
other = model.get_proxy(result)
score = compare(model, proxy, other)
if score >= SCORE_CUTOFF:
yield score, result.get('collection_id'), other
@celery.task()
def xref_collection(collection_id, against_collection_ids=None):
"""Cross-reference all the entities and documents in a collection."""
matchable = [s.name for s in model if s.matchable]
entities = iter_proxies(collection_id=collection_id, schemata=matchable)
for entity in entities:
proxy = model.get_proxy(entity)
dq = db.session.query(Match)
dq = dq.filter(Match.entity_id == proxy.id)
dq.delete()
matches = xref_item(proxy, collection_ids=against_collection_ids)
for (score, other_id, other) in matches:
log.info("Xref [%.3f]: %s <=> %s", score, proxy, other)
obj = Match()
obj.entity_id = proxy.id
obj.collection_id = collection_id
obj.match_id = other.id
obj.match_collection_id = other_id
obj.score = score
db.session.add(obj)
db.session.commit()
def _format_date(proxy):
dates = proxy.get_type_values(registry.date)
if not len(dates):
return ''
return min(dates)
def _format_country(proxy):
countries = [c.upper() for c in proxy.countries]
return ', '.join(countries)
def _iter_match_batch(batch, authz):
matchable = [s.name for s in model if s.matchable]
entities = set()
for match in batch:
entities.add(match.entity_id)
entities.add(match.match_id)
entities = entities_by_ids(list(entities), schemata=matchable)
entities = {e.get('id'): e for e in entities}
for obj in batch:
if not authz.can(obj.match_collection_id, authz.READ):
continue
entity = entities.get(str(obj.entity_id))
match = entities.get(str(obj.match_id))
collection = get_collection(obj.match_collection_id)
if entity is None or match is None or collection is None:
continue
eproxy = model.get_proxy(entity)
mproxy = model.get_proxy(match)
yield (
int(obj.score * 100),
eproxy.caption,
_format_date(eproxy),
_format_country(eproxy),
collection.get('label'),
mproxy.caption,
_format_date(mproxy),
_format_country(mproxy),
entity_url(eproxy.id),
entity_url(mproxy.id),
)
def export_matches_csv(collection_id, authz):
"""Export the top N matches of cross-referencing for the given collection
to an Excel 2010 formatted export."""
dq = db.session.query(Match)
dq = dq.filter(Match.collection_id == collection_id)
dq = dq.order_by(Match.score.desc())
yield [
'Score',
'EntityName',
'EntityDate',
'EntityCountries',
'MatchCollection',
'MatchName',
'MatchDate',
'MatchCountries',
'EntityLink',
'MatchLink',
]
batch = []
for match in dq.yield_per(BULK_PAGE):
batch.append(match)
if len(batch) >= BULK_PAGE:
yield from _iter_match_batch(batch, authz)
batch = []
if len(batch):
yield from _iter_match_batch(batch, authz)
|
py | 1a36734080c69350fe22378559959770d85c798e | from src.spy import Spy
if __name__ == '__main__':
spy = Spy('http://vanban.chinhphu.vn/portal/page/portal/chinhphu/hethongvanban', '../output')
spy.crawl()
spy.__del__() |
py | 1a36744a5075fe86f3e259d680bb9b2b5aba140c | """Contains UI methods for LE user operations."""
import logging
import zope.component
from certbot import errors
from certbot import interfaces
from certbot import util
from certbot.compat import misc
from certbot.compat import os
from certbot.display import util as display_util
logger = logging.getLogger(__name__)
# Define a helper function to avoid verbose code
z_util = zope.component.getUtility
def get_email(invalid=False, optional=True):
"""Prompt for valid email address.
:param bool invalid: True if an invalid address was provided by the user
:param bool optional: True if the user can use
--register-unsafely-without-email to avoid providing an e-mail
:returns: e-mail address
:rtype: str
:raises errors.Error: if the user cancels
"""
invalid_prefix = "There seem to be problems with that address. "
msg = "Enter email address (used for urgent renewal and security notices)"
unsafe_suggestion = ("\n\nIf you really want to skip this, you can run "
"the client with --register-unsafely-without-email "
"but make sure you then backup your account key from "
"{0}\n\n".format(os.path.join(
misc.get_default_folder('config'), 'accounts')))
if optional:
if invalid:
msg += unsafe_suggestion
suggest_unsafe = False
else:
suggest_unsafe = True
else:
suggest_unsafe = False
while True:
try:
code, email = z_util(interfaces.IDisplay).input(
invalid_prefix + msg if invalid else msg,
force_interactive=True)
except errors.MissingCommandlineFlag:
msg = ("You should register before running non-interactively, "
"or provide --agree-tos and --email <email_address> flags.")
raise errors.MissingCommandlineFlag(msg)
if code != display_util.OK:
if optional:
raise errors.Error(
"An e-mail address or "
"--register-unsafely-without-email must be provided.")
else:
raise errors.Error("An e-mail address must be provided.")
elif util.safe_email(email):
return email
elif suggest_unsafe:
msg += unsafe_suggestion
suggest_unsafe = False # add this message at most once
invalid = bool(email)
def choose_account(accounts):
"""Choose an account.
:param list accounts: Containing at least one
:class:`~certbot._internal.account.Account`
"""
# Note this will get more complicated once we start recording authorizations
labels = [acc.slug for acc in accounts]
code, index = z_util(interfaces.IDisplay).menu(
"Please choose an account", labels, force_interactive=True)
if code == display_util.OK:
return accounts[index]
return None
def choose_values(values, question=None):
"""Display screen to let user pick one or multiple values from the provided
list.
:param list values: Values to select from
:returns: List of selected values
:rtype: list
"""
code, items = z_util(interfaces.IDisplay).checklist(
question, tags=values, force_interactive=True)
if code == display_util.OK and items:
return items
return []
def choose_names(installer, question=None):
"""Display screen to select domains to validate.
:param installer: An installer object
:type installer: :class:`certbot.interfaces.IInstaller`
:param `str` question: Overriding dialog question to ask the user if asked
to choose from domain names.
:returns: List of selected names
:rtype: `list` of `str`
"""
if installer is None:
logger.debug("No installer, picking names manually")
return _choose_names_manually()
domains = list(installer.get_all_names())
names = get_valid_domains(domains)
if not names:
return _choose_names_manually(
"No names were found in your configuration files. ")
code, names = _filter_names(names, question)
if code == display_util.OK and names:
return names
return []
def get_valid_domains(domains):
"""Helper method for choose_names that implements basic checks
on domain names
:param list domains: Domain names to validate
:return: List of valid domains
:rtype: list
"""
valid_domains = []
for domain in domains:
try:
valid_domains.append(util.enforce_domain_sanity(domain))
except errors.ConfigurationError:
continue
return valid_domains
def _sort_names(FQDNs):
"""Sort FQDNs by SLD (and if many, by their subdomains)
:param list FQDNs: list of domain names
:returns: Sorted list of domain names
:rtype: list
"""
return sorted(FQDNs, key=lambda fqdn: fqdn.split('.')[::-1][1:])
def _filter_names(names, override_question=None):
"""Determine which names the user would like to select from a list.
:param list names: domain names
:returns: tuple of the form (`code`, `names`) where
`code` - str display exit code
`names` - list of names selected
:rtype: tuple
"""
#Sort by domain first, and then by subdomain
sorted_names = _sort_names(names)
if override_question:
question = override_question
else:
question = "Which names would you like to activate HTTPS for?"
code, names = z_util(interfaces.IDisplay).checklist(
question, tags=sorted_names, cli_flag="--domains", force_interactive=True)
return code, [str(s) for s in names]
def _choose_names_manually(prompt_prefix=""):
"""Manually input names for those without an installer.
:param str prompt_prefix: string to prepend to prompt for domains
:returns: list of provided names
:rtype: `list` of `str`
"""
code, input_ = z_util(interfaces.IDisplay).input(
prompt_prefix +
"Please enter in your domain name(s) (comma and/or space separated) ",
cli_flag="--domains", force_interactive=True)
if code == display_util.OK:
invalid_domains = dict()
retry_message = ""
try:
domain_list = display_util.separate_list_input(input_)
except UnicodeEncodeError:
domain_list = []
retry_message = (
"Internationalized domain names are not presently "
"supported.{0}{0}Would you like to re-enter the "
"names?{0}").format(os.linesep)
for i, domain in enumerate(domain_list):
try:
domain_list[i] = util.enforce_domain_sanity(domain)
except errors.ConfigurationError as e:
invalid_domains[domain] = str(e)
if invalid_domains:
retry_message = (
"One or more of the entered domain names was not valid:"
"{0}{0}").format(os.linesep)
for domain in invalid_domains:
retry_message = retry_message + "{1}: {2}{0}".format(
os.linesep, domain, invalid_domains[domain])
retry_message = retry_message + (
"{0}Would you like to re-enter the names?{0}").format(
os.linesep)
if retry_message:
# We had error in input
retry = z_util(interfaces.IDisplay).yesno(retry_message,
force_interactive=True)
if retry:
return _choose_names_manually()
else:
return domain_list
return []
def success_installation(domains):
"""Display a box confirming the installation of HTTPS.
:param list domains: domain names which were enabled
"""
z_util(interfaces.IDisplay).notification(
"Congratulations! You have successfully enabled {0}{1}{1}"
"You should test your configuration at:{1}{2}".format(
_gen_https_names(domains),
os.linesep,
os.linesep.join(_gen_ssl_lab_urls(domains))),
pause=False)
def success_renewal(domains):
"""Display a box confirming the renewal of an existing certificate.
:param list domains: domain names which were renewed
"""
z_util(interfaces.IDisplay).notification(
"Your existing certificate has been successfully renewed, and the "
"new certificate has been installed.{1}{1}"
"The new certificate covers the following domains: {0}{1}{1}"
"You should test your configuration at:{1}{2}".format(
_gen_https_names(domains),
os.linesep,
os.linesep.join(_gen_ssl_lab_urls(domains))),
pause=False)
def success_revocation(cert_path):
"""Display a box confirming a certificate has been revoked.
:param list cert_path: path to certificate which was revoked.
"""
z_util(interfaces.IDisplay).notification(
"Congratulations! You have successfully revoked the certificate "
"that was located at {0}{1}{1}".format(
cert_path,
os.linesep),
pause=False)
def _gen_ssl_lab_urls(domains):
"""Returns a list of urls.
:param list domains: Each domain is a 'str'
"""
return ["https://www.ssllabs.com/ssltest/analyze.html?d=%s" % dom for dom in domains]
def _gen_https_names(domains):
"""Returns a string of the https domains.
Domains are formatted nicely with https:// prepended to each.
:param list domains: Each domain is a 'str'
"""
if len(domains) == 1:
return "https://{0}".format(domains[0])
elif len(domains) == 2:
return "https://{dom[0]} and https://{dom[1]}".format(dom=domains)
elif len(domains) > 2:
return "{0}{1}{2}".format(
", ".join("https://%s" % dom for dom in domains[:-1]),
", and https://",
domains[-1])
return ""
def _get_validated(method, validator, message, default=None, **kwargs):
if default is not None:
try:
validator(default)
except errors.Error as error:
logger.debug('Encountered invalid default value "%s" when prompting for "%s"',
default,
message,
exc_info=True)
raise AssertionError('Invalid default "{0}"'.format(default))
while True:
code, raw = method(message, default=default, **kwargs)
if code == display_util.OK:
try:
validator(raw)
return code, raw
except errors.Error as error:
logger.debug('Validator rejected "%s" when prompting for "%s"',
raw,
message,
exc_info=True)
zope.component.getUtility(interfaces.IDisplay).notification(str(error), pause=False)
else:
return code, raw
def validated_input(validator, *args, **kwargs):
"""Like `~certbot.interfaces.IDisplay.input`, but with validation.
:param callable validator: A method which will be called on the
supplied input. If the method raises a `errors.Error`, its
text will be displayed and the user will be re-prompted.
:param list `*args`: Arguments to be passed to `~certbot.interfaces.IDisplay.input`.
:param dict `**kwargs`: Arguments to be passed to `~certbot.interfaces.IDisplay.input`.
:return: as `~certbot.interfaces.IDisplay.input`
:rtype: tuple
"""
return _get_validated(zope.component.getUtility(interfaces.IDisplay).input,
validator, *args, **kwargs)
def validated_directory(validator, *args, **kwargs):
"""Like `~certbot.interfaces.IDisplay.directory_select`, but with validation.
:param callable validator: A method which will be called on the
supplied input. If the method raises a `errors.Error`, its
text will be displayed and the user will be re-prompted.
:param list `*args`: Arguments to be passed to `~certbot.interfaces.IDisplay.directory_select`.
:param dict `**kwargs`: Arguments to be passed to
`~certbot.interfaces.IDisplay.directory_select`.
:return: as `~certbot.interfaces.IDisplay.directory_select`
:rtype: tuple
"""
return _get_validated(zope.component.getUtility(interfaces.IDisplay).directory_select,
validator, *args, **kwargs)
|
py | 1a3674ee26cb8af1e9950212759a29fb9787cc5d | #!/usr/bin/python
import json
from random import randint
#if any changes are made to this plugin, kindly update the plugin version here.
PLUGIN_VERSION = "1"
#Setting this to true will alert you when there is a communication problem while posting plugin data to server
HEARTBEAT="true"
#Mention the units of your metrics . If any new metrics are added, make an entry here for its unit if needed.
METRICS_UNITS={'metric_1':'MB', 'metric_2':'ms'}
def metricCollector():
data = {}
data['plugin_version'] = PLUGIN_VERSION
data['heartbeat_required'] = HEARTBEAT
data['metric_1']=randint(0,1000)
data['metric_2']=randint(0,500)
data['metric_3']=randint(0,100)
data['units']=METRICS_UNITS
return data
if __name__ == "__main__":
result = metricCollector()
print(json.dumps(result, indent=4, sort_keys=True))
|
py | 1a367573e2ac7a41e69b661e32b488f1364db9c6 | # https://www.codewars.com/kata/5b2e5a02a454c82fb9000048
def get_neighbourhood(n_type, arr, coordinates):
x, y = coordinates
r, c = len(arr), len(arr[0])
if 0 > x or x >= r or 0 > y or y >= c: return []
if n_type == "moore":
return [
arr[i][j]
for i in range(x-1 if x > 0 else x, x+2 if x < r-1 else x+1)
for j in range(y-1 if y > 0 else y, y+2 if y < c-1 else y+1)
if (i, j) != (x, y)
]
return [
arr[c1][c2]
for i in (-1, 1)
for c1, c2 in [(x+i, y), (x, y+i)]
if c1 >= 0 and c2 >= 0 and c1 < r and c2 < c
]
|
py | 1a3675c51cb79396ab3fcfb3bdd15ce93af7d244 | import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
# check if MongoDB is enabled and running
def test_mongo_running_and_enabled(host):
mongo = host.service("mongod")
assert mongo.is_running
assert mongo.is_enabled
# check if configuration file contains the required line
def test_config_file(host):
config_file = host.file('/etc/mongod.conf')
assert config_file.contains('bindIp: 0.0.0.0')
assert config_file.is_file
# mongo port testing on 0.0.0.0:27017
def test_mongo_socket(host):
socket = host.socket("tcp://0.0.0.0:27017")
assert socket.is_listening
|
py | 1a3676f7df315270a3c4d94986b8f1a4cc3a76fe | from typing import List, Dict
from opentrons.calibration_storage import helpers
from opentrons.protocols.geometry.labware_geometry import LabwareGeometry
from opentrons.protocols.geometry.well_geometry import WellGeometry
from opentrons.protocols.implementations.interfaces.labware import \
LabwareInterface
from opentrons.protocols.implementations.tip_tracker import TipTracker
from opentrons.protocols.implementations.well import WellImplementation
from opentrons.protocols.implementations.well_grid import WellGrid
from opentrons.types import Point, Location
from opentrons_shared_data.labware.dev_types import LabwareParameters, \
LabwareDefinition
class LabwareImplementation(LabwareInterface):
def __init__(self,
definition: LabwareDefinition,
parent: Location,
label: str = None):
"""
Construct an implementation of a labware object.
:param definition: A dict representing all required data for a labware,
including metadata such as the display name of the
labware, a definition of the order to iterate over
wells, the shape of wells (shape, physical
dimensions, etc), and so on. The correct shape of
this definition is handled by the "labware-designer"
project in the Opentrons/opentrons repo.
:param parent: A :py:class:`.Location` representing the location where
the front and left most point of the outside of the
labware is (often the front-left corner of a slot on the
deck).
:param str label: An optional label to use instead of the displayName
from the definition's metadata element
"""
if label:
dn = label
self._name = dn
else:
dn = definition['metadata']['displayName']
self._name = definition['parameters']['loadName']
self._display_name = f"{dn} on {str(parent.labware)}"
# Directly from definition
self._well_definition = definition['wells']
self._parameters = definition['parameters']
self._definition = definition
self._geometry = LabwareGeometry(definition, parent)
# flatten list of list of well names.
self._ordering = [
well for col in definition['ordering'] for well in col
]
self._wells: List[WellImplementation] = []
self._well_name_grid = WellGrid(wells=self._wells)
self._tip_tracker = TipTracker(
columns=self._well_name_grid.get_columns()
)
self._calibrated_offset = Point(0, 0, 0)
# Will cause building of the wells
self.set_calibration(self._calibrated_offset)
def get_uri(self) -> str:
return helpers.uri_from_definition(self._definition)
def get_display_name(self) -> str:
return self._display_name
def get_name(self) -> str:
return self._name
def set_name(self, new_name: str) -> None:
self._name = new_name
def get_definition(self) -> LabwareDefinition:
return self._definition
def get_parameters(self) -> LabwareParameters:
return self._parameters
def get_quirks(self) -> List[str]:
return self._parameters.get('quirks', [])
def set_calibration(self, delta: Point) -> None:
self._calibrated_offset = Point(
x=self._geometry.offset.x + delta.x,
y=self._geometry.offset.y + delta.y,
z=self._geometry.offset.z + delta.z
)
# The wells must be rebuilt
self._wells = self._build_wells()
self._well_name_grid = WellGrid(wells=self._wells)
self._tip_tracker = TipTracker(
columns=self._well_name_grid.get_columns()
)
def get_calibrated_offset(self) -> Point:
return self._calibrated_offset
def is_tiprack(self) -> bool:
return self._parameters['isTiprack']
def get_tip_length(self) -> float:
return self._parameters['tipLength']
def set_tip_length(self, length: float):
self._parameters['tipLength'] = length
def reset_tips(self) -> None:
if self.is_tiprack():
for well in self._wells:
well.set_has_tip(True)
def get_tip_tracker(self) -> TipTracker:
return self._tip_tracker
def get_well_grid(self) -> WellGrid:
return self._well_name_grid
def get_wells(self) -> List[WellImplementation]:
return self._wells
def get_wells_by_name(self) -> Dict[str, WellImplementation]:
return {
well.get_name(): well for well in self._wells
}
def get_geometry(self) -> LabwareGeometry:
return self._geometry
@property
def highest_z(self):
return self._geometry.z_dimension + self._calibrated_offset.z
@property
def separate_calibration(self) -> bool:
return False
@property
def load_name(self) -> str:
return self._parameters['loadName']
def _build_wells(self) -> List[WellImplementation]:
return [
WellImplementation(
well_geometry=WellGeometry(
well_props=self._well_definition[well],
parent_point=self._calibrated_offset,
parent_object=self
),
display_name="{} of {}".format(well, self._display_name),
has_tip=self.is_tiprack(),
name=well
)
for well in self._ordering
]
|
py | 1a3677d854d0db56b51432af15eccd1d49d66748 | # -*- coding: utf-8 -*-
from socialregister.users.models import User
def create_user(strategy, details, user=None, *args, **kwargs):
if user:
return {'is_new': False}
if not details['email']:
username = details['username']
else:
username = details['email']
user = User.objects.get_or_create(
username=username,
defaults={
'email': details['email'], 'first_name': details['first_name'],
'last_name': details['last_name'], 'is_active': True})[0]
return {
'is_new': True,
'user': user
}
|
py | 1a3678642153d040ee48575c63d178cc1323b1fb | import numpy as np
from sklearn.decomposition import PCA
from sklearn.base import BaseEstimator, OutlierMixin
from sklearn.utils.validation import check_is_fitted, check_array, FLOAT_DTYPES
class PCAOutlierDetection(BaseEstimator, OutlierMixin):
"""
Does outlier detection based on the reconstruction error from PCA.
"""
def __init__(
self,
n_components=None,
threshold=None,
variant="relative",
whiten=False,
svd_solver="auto",
tol=0.0,
iterated_power="auto",
random_state=None,
):
self.n_components = n_components
self.threshold = threshold
self.whiten = whiten
self.variant = variant
self.svd_solver = svd_solver
self.tol = tol
self.iterated_power = iterated_power
self.random_state = random_state
def fit(self, X, y=None):
"""
Fit the model using X as training data.
:param X: array-like, shape=(n_columns, n_samples,) training data.
:param y: ignored but kept in for pipeline support
:return: Returns an instance of self.
"""
X = check_array(X, estimator=self, dtype=FLOAT_DTYPES)
if not self.threshold:
raise ValueError("The `threshold` value cannot be `None`.")
self.pca_ = PCA(
n_components=self.n_components,
whiten=self.whiten,
svd_solver=self.svd_solver,
tol=self.tol,
iterated_power=self.iterated_power,
random_state=self.random_state,
)
self.pca_.fit(X, y)
self.offset_ = -self.threshold
return self
def transform(self, X):
"""
Uses the underlying PCA method to transform the data.
"""
X = check_array(X, estimator=self, dtype=FLOAT_DTYPES)
check_is_fitted(self, ["pca_", "offset_"])
return self.pca_.transform(X)
def difference(self, X):
"""
Shows the calculated difference between original and reconstructed data. Row by row.
:param X: array-like, shape=(n_columns, n_samples, ) training data.
:return: array, shape=(n_samples,) the difference
"""
check_is_fitted(self, ["pca_", "offset_"])
reduced = self.pca_.transform(X)
diff = np.sum(np.abs(self.pca_.inverse_transform(reduced) - X), axis=1)
if self.variant == "relative":
diff = diff / X.sum(axis=1)
return diff
def decision_function(self, X):
return self.threshold - self.difference(X)
def score_samples(self, X):
return -self.difference(X)
def predict(self, X):
"""
Predict if a point is an outlier.
:param X: array-like, shape=(n_columns, n_samples, ) training data.
:return: array, shape=(n_samples,) the predicted data. 1 for inliers, -1 for outliers.
"""
X = check_array(X, estimator=self, dtype=FLOAT_DTYPES)
check_is_fitted(self, ["pca_", "offset_"])
result = np.ones(X.shape[0])
result[self.difference(X) > self.threshold] = -1
return result.astype(np.int)
|
py | 1a36794867f8878289d0a92d184c8c30fdda483e | # -*- coding: utf-8 -*-
'''
:codeauthor: Pedro Algarvio ([email protected])
=============
Class Mix-Ins
=============
Some reusable class Mixins
'''
# pylint: disable=repr-flag-used-in-string
# Import python libs
from __future__ import absolute_import, print_function
import os
import sys
import time
import types
import atexit
import pprint
import logging
import tempfile
import functools
import subprocess
import multiprocessing
# Import Salt Testing Libs
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch
from tests.support.runtests import RUNTIME_VARS
from tests.support.paths import CODE_DIR
# Import salt libs
import salt.config
import salt.utils.event
import salt.utils.files
import salt.utils.functools
import salt.utils.path
import salt.utils.stringutils
import salt.utils.yaml
import salt.version
import salt.exceptions
import salt.utils.process
from salt.utils.verify import verify_env
from salt.utils.immutabletypes import freeze
from salt._compat import ElementTree as etree
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import zip # pylint: disable=import-error,redefined-builtin
log = logging.getLogger(__name__)
class CheckShellBinaryNameAndVersionMixin(object):
'''
Simple class mix-in to subclass in companion to :class:`ShellTestCase<tests.support.case.ShellTestCase>` which
adds a test case to verify proper version report from Salt's CLI tools.
'''
_call_binary_ = None
_call_binary_expected_version_ = None
def test_version_includes_binary_name(self):
if getattr(self, '_call_binary_', None) is None:
self.skipTest('\'_call_binary_\' not defined.')
if self._call_binary_expected_version_ is None:
# Late import
self._call_binary_expected_version_ = salt.version.__version__
out = '\n'.join(self.run_script(self._call_binary_, '--version'))
self.assertIn(self._call_binary_, out)
self.assertIn(self._call_binary_expected_version_, out)
class AdaptedConfigurationTestCaseMixin(object):
__slots__ = ()
@staticmethod
def get_temp_config(config_for, **config_overrides):
rootdir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
conf_dir = os.path.join(rootdir, 'conf')
for key in ('cachedir', 'pki_dir', 'sock_dir'):
if key not in config_overrides:
config_overrides[key] = key
if 'log_file' not in config_overrides:
config_overrides['log_file'] = 'logs/{}.log'.format(config_for)
if 'user' not in config_overrides:
config_overrides['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
config_overrides['root_dir'] = rootdir
cdict = AdaptedConfigurationTestCaseMixin.get_config(config_for, from_scratch=True)
if config_for in ('master', 'client_config'):
rdict = salt.config.apply_master_config(config_overrides, cdict)
if config_for == 'minion':
rdict = salt.config.apply_minion_config(config_overrides, cdict)
verify_env([os.path.join(rdict['pki_dir'], 'minions'),
os.path.join(rdict['pki_dir'], 'minions_pre'),
os.path.join(rdict['pki_dir'], 'minions_rejected'),
os.path.join(rdict['pki_dir'], 'minions_denied'),
os.path.join(rdict['cachedir'], 'jobs'),
os.path.join(rdict['cachedir'], 'raet'),
os.path.join(rdict['cachedir'], 'tokens'),
os.path.join(rdict['root_dir'], 'cache', 'tokens'),
os.path.join(rdict['pki_dir'], 'accepted'),
os.path.join(rdict['pki_dir'], 'rejected'),
os.path.join(rdict['pki_dir'], 'pending'),
os.path.dirname(rdict['log_file']),
rdict['sock_dir'],
conf_dir
],
RUNTIME_VARS.RUNNING_TESTS_USER,
root_dir=rdict['root_dir'],
)
rdict['config_dir'] = conf_dir
rdict['conf_file'] = os.path.join(conf_dir, config_for)
with salt.utils.files.fopen(rdict['conf_file'], 'w') as wfh:
salt.utils.yaml.safe_dump(rdict, wfh, default_flow_style=False)
return rdict
@staticmethod
def get_config(config_for, from_scratch=False):
if from_scratch:
if config_for in ('master', 'syndic_master'):
return salt.config.master_config(
AdaptedConfigurationTestCaseMixin.get_config_file_path(config_for)
)
elif config_for in ('minion', 'sub_minion'):
return salt.config.minion_config(
AdaptedConfigurationTestCaseMixin.get_config_file_path(config_for)
)
elif config_for in ('syndic',):
return salt.config.syndic_config(
AdaptedConfigurationTestCaseMixin.get_config_file_path(config_for),
AdaptedConfigurationTestCaseMixin.get_config_file_path('minion')
)
elif config_for == 'client_config':
return salt.config.client_config(
AdaptedConfigurationTestCaseMixin.get_config_file_path('master')
)
if config_for not in RUNTIME_VARS.RUNTIME_CONFIGS:
if config_for in ('master', 'syndic_master'):
RUNTIME_VARS.RUNTIME_CONFIGS[config_for] = freeze(
salt.config.master_config(
AdaptedConfigurationTestCaseMixin.get_config_file_path(config_for)
)
)
elif config_for in ('minion', 'sub_minion'):
RUNTIME_VARS.RUNTIME_CONFIGS[config_for] = freeze(
salt.config.minion_config(
AdaptedConfigurationTestCaseMixin.get_config_file_path(config_for)
)
)
elif config_for in ('syndic',):
RUNTIME_VARS.RUNTIME_CONFIGS[config_for] = freeze(
salt.config.syndic_config(
AdaptedConfigurationTestCaseMixin.get_config_file_path(config_for),
AdaptedConfigurationTestCaseMixin.get_config_file_path('minion')
)
)
elif config_for == 'client_config':
RUNTIME_VARS.RUNTIME_CONFIGS[config_for] = freeze(
salt.config.client_config(
AdaptedConfigurationTestCaseMixin.get_config_file_path('master')
)
)
return RUNTIME_VARS.RUNTIME_CONFIGS[config_for]
@staticmethod
def get_config_dir():
return RUNTIME_VARS.TMP_CONF_DIR
@staticmethod
def get_config_file_path(filename):
if filename == 'syndic_master':
return os.path.join(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR, 'master')
if filename == 'syndic':
return os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, 'minion')
if filename == 'sub_minion':
return os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, 'minion')
return os.path.join(RUNTIME_VARS.TMP_CONF_DIR, filename)
@property
def master_opts(self):
'''
Return the options used for the master
'''
return self.get_config('master')
@property
def minion_opts(self):
'''
Return the options used for the minion
'''
return self.get_config('minion')
@property
def sub_minion_opts(self):
'''
Return the options used for the sub_minion
'''
return self.get_config('sub_minion')
class SaltClientTestCaseMixin(AdaptedConfigurationTestCaseMixin):
'''
Mix-in class that provides a ``client`` attribute which returns a Salt
:class:`LocalClient<salt:salt.client.LocalClient>`.
.. code-block:: python
class LocalClientTestCase(TestCase, SaltClientTestCaseMixin):
def test_check_pub_data(self):
just_minions = {'minions': ['m1', 'm2']}
jid_no_minions = {'jid': '1234', 'minions': []}
valid_pub_data = {'minions': ['m1', 'm2'], 'jid': '1234'}
self.assertRaises(EauthAuthenticationError,
self.client._check_pub_data, None)
self.assertDictEqual({},
self.client._check_pub_data(just_minions),
'Did not handle lack of jid correctly')
self.assertDictEqual(
{},
self.client._check_pub_data({'jid': '0'}),
'Passing JID of zero is not handled gracefully')
'''
_salt_client_config_file_name_ = 'master'
@property
def client(self):
# Late import
import salt.client
if 'runtime_client' not in RUNTIME_VARS.RUNTIME_CONFIGS:
mopts = self.get_config(self._salt_client_config_file_name_, from_scratch=True)
RUNTIME_VARS.RUNTIME_CONFIGS['runtime_client'] = salt.client.get_local_client(mopts=mopts)
return RUNTIME_VARS.RUNTIME_CONFIGS['runtime_client']
class ShellCaseCommonTestsMixin(CheckShellBinaryNameAndVersionMixin):
_call_binary_expected_version_ = salt.version.__version__
def test_salt_with_git_version(self):
if getattr(self, '_call_binary_', None) is None:
self.skipTest('\'_call_binary_\' not defined.')
from salt.version import __version_info__, SaltStackVersion
git = salt.utils.path.which('git')
if not git:
self.skipTest('The git binary is not available')
opts = {
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
'cwd': CODE_DIR,
}
if not salt.utils.platform.is_windows():
opts['close_fds'] = True
# Let's get the output of git describe
process = subprocess.Popen(
[git, 'describe', '--tags', '--first-parent', '--match', 'v[0-9]*'],
**opts
)
out, err = process.communicate()
if process.returncode != 0:
process = subprocess.Popen(
[git, 'describe', '--tags', '--match', 'v[0-9]*'],
**opts
)
out, err = process.communicate()
if not out:
self.skipTest(
'Failed to get the output of \'git describe\'. '
'Error: \'{0}\''.format(
salt.utils.stringutils.to_str(err)
)
)
parsed_version = SaltStackVersion.parse(out)
if parsed_version.info < __version_info__:
self.skipTest(
'We\'re likely about to release a new version. This test '
'would fail. Parsed(\'{0}\') < Expected(\'{1}\')'.format(
parsed_version.info, __version_info__
)
)
elif parsed_version.info != __version_info__:
self.skipTest(
'In order to get the proper salt version with the '
'git hash you need to update salt\'s local git '
'tags. Something like: \'git fetch --tags\' or '
'\'git fetch --tags upstream\' if you followed '
'salt\'s contribute documentation. The version '
'string WILL NOT include the git hash.'
)
out = '\n'.join(self.run_script(self._call_binary_, '--version'))
self.assertIn(parsed_version.string, out)
class _FixLoaderModuleMockMixinMroOrder(type):
'''
This metaclass will make sure that LoaderModuleMockMixin will always come as the first
base class in order for LoaderModuleMockMixin.setUp to actually run
'''
def __new__(mcs, cls_name, cls_bases, cls_dict):
if cls_name == 'LoaderModuleMockMixin':
return super(_FixLoaderModuleMockMixinMroOrder, mcs).__new__(mcs, cls_name, cls_bases, cls_dict)
bases = list(cls_bases)
for idx, base in enumerate(bases):
if base.__name__ == 'LoaderModuleMockMixin':
bases.insert(0, bases.pop(idx))
break
# Create the class instance
instance = super(_FixLoaderModuleMockMixinMroOrder, mcs).__new__(mcs, cls_name, tuple(bases), cls_dict)
# Apply our setUp function decorator
instance.setUp = LoaderModuleMockMixin.__setup_loader_modules_mocks__(instance.setUp)
return instance
class LoaderModuleMockMixin(six.with_metaclass(_FixLoaderModuleMockMixinMroOrder, object)):
'''
This class will setup salt loader dunders.
Please check `set_up_loader_mocks` above
'''
# Define our setUp function decorator
@staticmethod
def __setup_loader_modules_mocks__(setup_func):
@functools.wraps(setup_func)
def wrapper(self):
if NO_MOCK:
self.skipTest(NO_MOCK_REASON)
loader_modules_configs = self.setup_loader_modules()
if not isinstance(loader_modules_configs, dict):
raise RuntimeError(
'{}.setup_loader_modules() must return a dictionary where the keys are the '
'modules that require loader mocking setup and the values, the global module '
'variables for each of the module being mocked. For example \'__salt__\', '
'\'__opts__\', etc.'.format(self.__class__.__name__)
)
salt_dunders = (
'__opts__', '__salt__', '__runner__', '__context__', '__utils__',
'__ext_pillar__', '__thorium__', '__states__', '__serializers__', '__ret__',
'__grains__', '__pillar__', '__sdb__',
# Proxy is commented out on purpose since some code in salt expects a NameError
# and is most of the time not a required dunder
# '__proxy__'
)
for module, module_globals in six.iteritems(loader_modules_configs):
if not isinstance(module, types.ModuleType):
raise RuntimeError(
'The dictionary keys returned by {}.setup_loader_modules() '
'must be an imported module, not {}'.format(
self.__class__.__name__,
type(module)
)
)
if not isinstance(module_globals, dict):
raise RuntimeError(
'The dictionary values returned by {}.setup_loader_modules() '
'must be a dictionary, not {}'.format(
self.__class__.__name__,
type(module_globals)
)
)
module_blacklisted_dunders = module_globals.pop('blacklisted_dunders', ())
minion_funcs = {}
if '__salt__' in module_globals and module_globals['__salt__'] == 'autoload':
if '__opts__' not in module_globals:
raise RuntimeError(
'You must provide \'__opts__\' on the {} module globals dictionary '
'to auto load the minion functions'.format(module.__name__)
)
import salt.loader
ctx = {}
if '__utils__' not in module_globals:
utils = salt.loader.utils(module_globals['__opts__'],
context=module_globals.get('__context__') or ctx)
module_globals['__utils__'] = utils
minion_funcs = salt.loader.minion_mods(
module_globals['__opts__'],
context=module_globals.get('__context__') or ctx,
utils=module_globals.get('__utils__'),
)
module_globals['__salt__'] = minion_funcs
for dunder_name in salt_dunders:
if dunder_name not in module_globals:
if dunder_name in module_blacklisted_dunders:
continue
module_globals[dunder_name] = {}
sys_modules = module_globals.pop('sys.modules', None)
if sys_modules is not None:
if not isinstance(sys_modules, dict):
raise RuntimeError(
'\'sys.modules\' must be a dictionary not: {}'.format(
type(sys_modules)
)
)
patcher = patch.dict(sys.modules, sys_modules)
patcher.start()
def cleanup_sys_modules(patcher, sys_modules):
patcher.stop()
del patcher
del sys_modules
self.addCleanup(cleanup_sys_modules, patcher, sys_modules)
for key in module_globals:
if not hasattr(module, key):
if key in salt_dunders:
setattr(module, key, {})
else:
setattr(module, key, None)
if module_globals:
patcher = patch.multiple(module, **module_globals)
patcher.start()
def cleanup_module_globals(patcher, module_globals):
patcher.stop()
del patcher
del module_globals
self.addCleanup(cleanup_module_globals, patcher, module_globals)
if minion_funcs:
# Since we autoloaded the minion_funcs, let's namespace the functions with the globals
# used to patch above
import salt.utils
for func in minion_funcs:
minion_funcs[func] = salt.utils.functools.namespaced_function(
minion_funcs[func],
module_globals,
preserve_context=True
)
return setup_func(self)
return wrapper
def setup_loader_modules(self):
raise NotImplementedError(
'\'{}.setup_loader_modules()\' must be implemented'.format(self.__class__.__name__)
)
class XMLEqualityMixin(object):
def assertEqualXML(self, e1, e2):
if six.PY3 and isinstance(e1, bytes):
e1 = e1.decode('utf-8')
if six.PY3 and isinstance(e2, bytes):
e2 = e2.decode('utf-8')
if isinstance(e1, six.string_types):
e1 = etree.XML(e1)
if isinstance(e2, six.string_types):
e2 = etree.XML(e2)
if e1.tag != e2.tag:
return False
if e1.text != e2.text:
return False
if e1.tail != e2.tail:
return False
if e1.attrib != e2.attrib:
return False
if len(e1) != len(e2):
return False
return all(self.assertEqualXML(c1, c2) for c1, c2 in zip(e1, e2))
class SaltReturnAssertsMixin(object):
def assertReturnSaltType(self, ret):
try:
self.assertTrue(isinstance(ret, dict))
except AssertionError:
raise AssertionError(
'{0} is not dict. Salt returned: {1}'.format(
type(ret).__name__, ret
)
)
def assertReturnNonEmptySaltType(self, ret):
self.assertReturnSaltType(ret)
try:
self.assertNotEqual(ret, {})
except AssertionError:
raise AssertionError(
'{} is equal to {}. Salt returned an empty dictionary.'
)
def __return_valid_keys(self, keys):
if isinstance(keys, tuple):
# If it's a tuple, turn it into a list
keys = list(keys)
elif isinstance(keys, six.string_types):
# If it's a string, make it a one item list
keys = [keys]
elif not isinstance(keys, list):
# If we've reached here, it's a bad type passed to keys
raise RuntimeError('The passed keys need to be a list')
return keys
def __getWithinSaltReturn(self, ret, keys):
self.assertReturnNonEmptySaltType(ret)
ret_data = []
for part in six.itervalues(ret):
keys = self.__return_valid_keys(keys)
okeys = keys[:]
try:
ret_item = part[okeys.pop(0)]
except (KeyError, TypeError):
raise AssertionError(
'Could not get ret{0} from salt\'s return: {1}'.format(
''.join(['[\'{0}\']'.format(k) for k in keys]), part
)
)
while okeys:
try:
ret_item = ret_item[okeys.pop(0)]
except (KeyError, TypeError):
raise AssertionError(
'Could not get ret{0} from salt\'s return: {1}'.format(
''.join(['[\'{0}\']'.format(k) for k in keys]), part
)
)
ret_data.append(ret_item)
return ret_data
def assertSaltTrueReturn(self, ret):
try:
for saltret in self.__getWithinSaltReturn(ret, 'result'):
self.assertTrue(saltret)
except AssertionError:
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
try:
raise AssertionError(
'{result} is not True. Salt Comment:\n{comment}'.format(
**(next(six.itervalues(ret)))
)
)
except (AttributeError, IndexError):
raise AssertionError(
'Failed to get result. Salt Returned:\n{0}'.format(
pprint.pformat(ret)
)
)
def assertSaltFalseReturn(self, ret):
try:
for saltret in self.__getWithinSaltReturn(ret, 'result'):
self.assertFalse(saltret)
except AssertionError:
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
try:
raise AssertionError(
'{result} is not False. Salt Comment:\n{comment}'.format(
**(next(six.itervalues(ret)))
)
)
except (AttributeError, IndexError):
raise AssertionError(
'Failed to get result. Salt Returned: {0}'.format(ret)
)
def assertSaltNoneReturn(self, ret):
try:
for saltret in self.__getWithinSaltReturn(ret, 'result'):
self.assertIsNone(saltret)
except AssertionError:
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
try:
raise AssertionError(
'{result} is not None. Salt Comment:\n{comment}'.format(
**(next(six.itervalues(ret)))
)
)
except (AttributeError, IndexError):
raise AssertionError(
'Failed to get result. Salt Returned: {0}'.format(ret)
)
def assertInSaltComment(self, in_comment, ret):
for saltret in self.__getWithinSaltReturn(ret, 'comment'):
self.assertIn(in_comment, saltret)
def assertNotInSaltComment(self, not_in_comment, ret):
for saltret in self.__getWithinSaltReturn(ret, 'comment'):
self.assertNotIn(not_in_comment, saltret)
def assertSaltCommentRegexpMatches(self, ret, pattern):
return self.assertInSaltReturnRegexpMatches(ret, pattern, 'comment')
def assertInSaltStateWarning(self, in_comment, ret):
for saltret in self.__getWithinSaltReturn(ret, 'warnings'):
self.assertIn(in_comment, saltret)
def assertNotInSaltStateWarning(self, not_in_comment, ret):
for saltret in self.__getWithinSaltReturn(ret, 'warnings'):
self.assertNotIn(not_in_comment, saltret)
def assertInSaltReturn(self, item_to_check, ret, keys):
for saltret in self.__getWithinSaltReturn(ret, keys):
self.assertIn(item_to_check, saltret)
def assertNotInSaltReturn(self, item_to_check, ret, keys):
for saltret in self.__getWithinSaltReturn(ret, keys):
self.assertNotIn(item_to_check, saltret)
def assertInSaltReturnRegexpMatches(self, ret, pattern, keys=()):
for saltret in self.__getWithinSaltReturn(ret, keys):
self.assertRegex(saltret, pattern)
def assertSaltStateChangesEqual(self, ret, comparison, keys=()):
keys = ['changes'] + self.__return_valid_keys(keys)
for saltret in self.__getWithinSaltReturn(ret, keys):
self.assertEqual(saltret, comparison)
def assertSaltStateChangesNotEqual(self, ret, comparison, keys=()):
keys = ['changes'] + self.__return_valid_keys(keys)
for saltret in self.__getWithinSaltReturn(ret, keys):
self.assertNotEqual(saltret, comparison)
def _fetch_events(q):
'''
Collect events and store them
'''
def _clean_queue():
print('Cleaning queue!')
while not q.empty():
queue_item = q.get()
queue_item.task_done()
atexit.register(_clean_queue)
a_config = AdaptedConfigurationTestCaseMixin()
event = salt.utils.event.get_event('minion', sock_dir=a_config.get_config('minion')['sock_dir'], opts=a_config.get_config('minion'))
while True:
try:
events = event.get_event(full=False)
except Exception:
# This is broad but we'll see all kinds of issues right now
# if we drop the proc out from under the socket while we're reading
pass
q.put(events)
class SaltMinionEventAssertsMixin(object):
'''
Asserts to verify that a given event was seen
'''
def __new__(cls, *args, **kwargs):
# We have to cross-call to re-gen a config
cls.q = multiprocessing.Queue()
cls.fetch_proc = salt.utils.process.SignalHandlingMultiprocessingProcess(
target=_fetch_events, args=(cls.q,)
)
cls.fetch_proc.start()
return object.__new__(cls)
def __exit__(self, *args, **kwargs):
self.fetch_proc.join()
def assertMinionEventFired(self, tag):
#TODO
raise salt.exceptions.NotImplemented('assertMinionEventFired() not implemented')
def assertMinionEventReceived(self, desired_event):
queue_wait = 5 # 2.5s
while self.q.empty():
time.sleep(0.5) # Wait for events to be pushed into the queue
queue_wait -= 1
if queue_wait <= 0:
raise AssertionError('Queue wait timer expired')
while not self.q.empty(): # This is not thread-safe and may be inaccurate
event = self.q.get()
if isinstance(event, dict):
event.pop('_stamp')
if desired_event == event:
self.fetch_proc.terminate()
return True
self.fetch_proc.terminate()
raise AssertionError('Event {0} was not received by minion'.format(desired_event))
|
py | 1a367951526d15236c081694c45d99dfb6f0752b | import random
import mmcv
import numpy as np
import torch
import torch.nn as nn
from mmcv.runner.checkpoint import _load_checkpoint_with_prefix
from mmgen.core.runners.fp16_utils import auto_fp16
from mmgen.models.architectures import PixelNorm
from mmgen.models.architectures.common import get_module_device
from mmgen.models.architectures.stylegan.generator_discriminator_v2 import (
StyleGAN2Discriminator, StyleGANv2Generator)
from mmgen.models.architectures.stylegan.modules.styleganv2_modules import (
ConstantInput, ConvDownLayer, EqualLinearActModule, ModMBStddevLayer,
ModulatedStyleConv)
from mmgen.models.builder import MODULES
from .modules.swagan_modules import (ConvBlock, HaarTransform,
InverseHaarTransform, ModulatedFromRGB,
ModulatedToRGB)
@MODULES.register_module()
class SwaganGenerator(StyleGANv2Generator):
r"""StyleGAN2 Generator.
In StyleGAN2, we use a static architecture composing of a style mapping
module and number of convolutional style blocks. More details can be found
in: Analyzing and Improving the Image Quality of StyleGAN CVPR2020.
You can load pretrained model through passing information into
``pretrained`` argument. We have already offered official weights as
follows:
- stylegan2-ffhq-config-f: https://download.openmmlab.com/mmgen/stylegan2/official_weights/stylegan2-ffhq-config-f-official_20210327_171224-bce9310c.pth # noqa
- stylegan2-horse-config-f: https://download.openmmlab.com/mmgen/stylegan2/official_weights/stylegan2-horse-config-f-official_20210327_173203-ef3e69ca.pth # noqa
- stylegan2-car-config-f: https://download.openmmlab.com/mmgen/stylegan2/official_weights/stylegan2-car-config-f-official_20210327_172340-8cfe053c.pth # noqa
- stylegan2-cat-config-f: https://download.openmmlab.com/mmgen/stylegan2/official_weights/stylegan2-cat-config-f-official_20210327_172444-15bc485b.pth # noqa
- stylegan2-church-config-f: https://download.openmmlab.com/mmgen/stylegan2/official_weights/stylegan2-church-config-f-official_20210327_172657-1d42b7d1.pth # noqa
If you want to load the ema model, you can just use following codes:
.. code-block:: python
# ckpt_http is one of the valid path from http source
generator = StyleGANv2Generator(1024, 512,
pretrained=dict(
ckpt_path=ckpt_http,
prefix='generator_ema'))
Of course, you can also download the checkpoint in advance and set
``ckpt_path`` with local path. If you just want to load the original
generator (not the ema model), please set the prefix with 'generator'.
Note that our implementation allows to generate BGR image, while the
original StyleGAN2 outputs RGB images by default. Thus, we provide
``bgr2rgb`` argument to convert the image space.
Args:
out_size (int): The output size of the StyleGAN2 generator.
style_channels (int): The number of channels for style code.
num_mlps (int, optional): The number of MLP layers. Defaults to 8.
channel_multiplier (int, optional): The multiplier factor for the
channel number. Defaults to 2.
blur_kernel (list, optional): The blurry kernel. Defaults
to [1, 3, 3, 1].
lr_mlp (float, optional): The learning rate for the style mapping
layer. Defaults to 0.01.
default_style_mode (str, optional): The default mode of style mixing.
In training, we defaultly adopt mixing style mode. However, in the
evaluation, we use 'single' style mode. `['mix', 'single']` are
currently supported. Defaults to 'mix'.
eval_style_mode (str, optional): The evaluation mode of style mixing.
Defaults to 'single'.
mix_prob (float, optional): Mixing probability. The value should be
in range of [0, 1]. Defaults to ``0.9``.
num_fp16_scales (int, optional): The number of resolutions to use auto
fp16 training. Different from ``fp16_enabled``, this argument
allows users to adopt FP16 training only in several blocks.
This behaviour is much more similar to the offical implementation
by Tero. Defaults to 0.
fp16_enabled (bool, optional): Whether to use fp16 training in this
module. If this flag is `True`, the whole module will be wrapped
with ``auto_fp16``. Defaults to False.
pretrained (dict | None, optional): Information for pretained models.
The necessary key is 'ckpt_path'. Besides, you can also provide
'prefix' to load the generator part from the whole state dict.
Defaults to None.
"""
def __init__(self,
out_size,
style_channels,
num_mlps=8,
channel_multiplier=2,
blur_kernel=[1, 3, 3, 1],
lr_mlp=0.01,
default_style_mode='mix',
eval_style_mode='single',
mix_prob=0.9,
num_fp16_scales=0,
fp16_enabled=False,
pretrained=None):
nn.Module.__init__(self)
self.out_size = out_size
self.style_channels = style_channels
self.num_mlps = num_mlps
self.channel_multiplier = channel_multiplier
self.lr_mlp = lr_mlp
self._default_style_mode = default_style_mode
self.default_style_mode = default_style_mode
self.eval_style_mode = eval_style_mode
self.mix_prob = mix_prob
self.num_fp16_scales = num_fp16_scales
self.fp16_enabled = fp16_enabled
# define style mapping layers
mapping_layers = [PixelNorm()]
for _ in range(num_mlps):
mapping_layers.append(
EqualLinearActModule(
style_channels,
style_channels,
equalized_lr_cfg=dict(lr_mul=lr_mlp, gain=1.),
act_cfg=dict(type='fused_bias')))
self.style_mapping = nn.Sequential(*mapping_layers)
self.channels = {
4: 512,
8: 512,
16: 512,
32: 512,
64: 256 * channel_multiplier,
128: 128 * channel_multiplier,
256: 64 * channel_multiplier,
512: 32 * channel_multiplier,
1024: 16 * channel_multiplier,
}
# constant input layer
self.constant_input = ConstantInput(self.channels[4])
# 4x4 stage
self.conv1 = ModulatedStyleConv(
self.channels[4],
self.channels[4],
kernel_size=3,
style_channels=style_channels,
blur_kernel=blur_kernel)
self.to_rgb1 = ModulatedToRGB(
self.channels[4],
style_channels,
upsample=False,
fp16_enabled=fp16_enabled)
# generator backbone (8x8 --> higher resolutions)
self.log_size = int(np.log2(self.out_size)) - 1
self.convs = nn.ModuleList()
self.upsamples = nn.ModuleList()
self.to_rgbs = nn.ModuleList()
in_channels_ = self.channels[4]
for i in range(3, self.log_size + 1):
out_channels_ = self.channels[2**i]
# If `fp16_enabled` is True, all of layers will be run in auto
# FP16. In the case of `num_fp16_sacles` > 0, only partial
# layers will be run in fp16.
_use_fp16 = (self.log_size - i) < num_fp16_scales or fp16_enabled
self.convs.append(
ModulatedStyleConv(
in_channels_,
out_channels_,
3,
style_channels,
upsample=True,
blur_kernel=blur_kernel,
fp16_enabled=_use_fp16))
self.convs.append(
ModulatedStyleConv(
out_channels_,
out_channels_,
3,
style_channels,
upsample=False,
blur_kernel=blur_kernel,
fp16_enabled=_use_fp16))
self.to_rgbs.append(
ModulatedToRGB(
out_channels_,
style_channels,
upsample=True,
fp16_enabled=_use_fp16)) # set to global fp16
in_channels_ = out_channels_
self.num_latents = self.log_size * 2 - 2
self.num_injected_noises = self.num_latents - 1
self.iwt = InverseHaarTransform()
# register buffer for injected noises
for layer_idx in range(self.num_injected_noises):
res = (layer_idx + 5) // 2
shape = [1, 1, 2**res, 2**res]
self.register_buffer(f'injected_noise_{layer_idx}',
torch.randn(*shape))
if pretrained is not None:
self._load_pretrained_model(**pretrained)
@auto_fp16()
def forward(self,
styles,
num_batches=-1,
return_noise=False,
return_latents=False,
inject_index=None,
truncation=1,
truncation_latent=None,
input_is_latent=False,
injected_noise=None,
randomize_noise=True):
"""Forward function.
This function has been integrated with the truncation trick. Please
refer to the usage of `truncation` and `truncation_latent`.
Args:
styles (torch.Tensor | list[torch.Tensor] | callable | None): In
StyleGAN2, you can provide noise tensor or latent tensor. Given
a list containing more than one noise or latent tensors, style
mixing trick will be used in training. Of course, You can
directly give a batch of noise through a ``torch.Tensor`` or
offer a callable function to sample a batch of noise data.
Otherwise, the ``None`` indicates to use the default noise
sampler.
num_batches (int, optional): The number of batch size.
Defaults to 0.
return_noise (bool, optional): If True, ``noise_batch`` will be
returned in a dict with ``fake_img``. Defaults to False.
return_latents (bool, optional): If True, ``latent`` will be
returned in a dict with ``fake_img``. Defaults to False.
inject_index (int | None, optional): The index number for mixing
style codes. Defaults to None.
truncation (float, optional): Truncation factor. Give value less
than 1., the truncation trick will be adopted. Defaults to 1.
truncation_latent (torch.Tensor, optional): Mean truncation latent.
Defaults to None.
input_is_latent (bool, optional): If `True`, the input tensor is
the latent tensor. Defaults to False.
injected_noise (torch.Tensor | None, optional): Given a tensor, the
random noise will be fixed as this input injected noise.
Defaults to None.
randomize_noise (bool, optional): If `False`, images are sampled
with the buffered noise tensor injected to the style conv
block. Defaults to True.
Returns:
torch.Tensor | dict: Generated image tensor or dictionary \
containing more data.
"""
# receive noise and conduct sanity check.
if isinstance(styles, torch.Tensor):
assert styles.shape[1] == self.style_channels
styles = [styles]
elif mmcv.is_seq_of(styles, torch.Tensor):
for t in styles:
assert t.shape[-1] == self.style_channels
# receive a noise generator and sample noise.
elif callable(styles):
device = get_module_device(self)
noise_generator = styles
assert num_batches > 0
if self.default_style_mode == 'mix' and random.random(
) < self.mix_prob:
styles = [
noise_generator((num_batches, self.style_channels))
for _ in range(2)
]
else:
styles = [noise_generator((num_batches, self.style_channels))]
styles = [s.to(device) for s in styles]
# otherwise, we will adopt default noise sampler.
else:
device = get_module_device(self)
assert num_batches > 0 and not input_is_latent
if self.default_style_mode == 'mix' and random.random(
) < self.mix_prob:
styles = [
torch.randn((num_batches, self.style_channels))
for _ in range(2)
]
else:
styles = [torch.randn((num_batches, self.style_channels))]
styles = [s.to(device) for s in styles]
if not input_is_latent:
noise_batch = styles
styles = [self.style_mapping(s) for s in styles]
else:
noise_batch = None
if injected_noise is None:
if randomize_noise:
injected_noise = [None] * self.num_injected_noises
else:
injected_noise = [
getattr(self, f'injected_noise_{i}')
for i in range(self.num_injected_noises)
]
# use truncation trick
if truncation < 1:
style_t = []
# calculate truncation latent on the fly
if truncation_latent is None and not hasattr(
self, 'truncation_latent'):
self.truncation_latent = self.get_mean_latent()
truncation_latent = self.truncation_latent
elif truncation_latent is None and hasattr(self,
'truncation_latent'):
truncation_latent = self.truncation_latent
for style in styles:
style_t.append(truncation_latent + truncation *
(style - truncation_latent))
styles = style_t
# no style mixing
if len(styles) < 2:
inject_index = self.num_latents
if styles[0].ndim < 3:
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
else:
latent = styles[0]
# style mixing
else:
if inject_index is None:
inject_index = random.randint(1, self.num_latents - 1)
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
latent2 = styles[1].unsqueeze(1).repeat(
1, self.num_latents - inject_index, 1)
latent = torch.cat([latent, latent2], 1)
# 4x4 stage
out = self.constant_input(latent)
out = self.conv1(out, latent[:, 0], noise=injected_noise[0])
skip = self.to_rgb1(out, latent[:, 1])
_index = 1
# 8x8 ---> higher resolutions
for up_conv, conv, noise1, noise2, to_rgb in zip(
self.convs[::2], self.convs[1::2], injected_noise[1::2],
injected_noise[2::2], self.to_rgbs):
out = up_conv(out, latent[:, _index], noise=noise1)
out = conv(out, latent[:, _index + 1], noise=noise2)
skip = to_rgb(out, latent[:, _index + 2], skip)
_index += 2
img = self.iwt(skip)
# make sure the output image is torch.float32 to avoid RunTime Error
# in other modules
img = img.to(torch.float32)
if return_latents or return_noise:
output_dict = dict(
fake_img=img,
latent=latent,
inject_index=inject_index,
noise_batch=noise_batch)
return output_dict
return img
@MODULES.register_module()
class SwaganDiscriminator(StyleGAN2Discriminator):
"""StyleGAN2 Discriminator.
The architecture of this discriminator is proposed in StyleGAN2. More
details can be found in: Analyzing and Improving the Image Quality of
StyleGAN CVPR2020.
You can load pretrained model through passing information into
``pretrained`` argument. We have already offered official weights as
follows:
- stylegan2-ffhq-config-f: https://download.openmmlab.com/mmgen/stylegan2/official_weights/stylegan2-ffhq-config-f-official_20210327_171224-bce9310c.pth # noqa
- stylegan2-horse-config-f: https://download.openmmlab.com/mmgen/stylegan2/official_weights/stylegan2-horse-config-f-official_20210327_173203-ef3e69ca.pth # noqa
- stylegan2-car-config-f: https://download.openmmlab.com/mmgen/stylegan2/official_weights/stylegan2-car-config-f-official_20210327_172340-8cfe053c.pth # noqa
- stylegan2-cat-config-f: https://download.openmmlab.com/mmgen/stylegan2/official_weights/stylegan2-cat-config-f-official_20210327_172444-15bc485b.pth # noqa
- stylegan2-church-config-f: https://download.openmmlab.com/mmgen/stylegan2/official_weights/stylegan2-church-config-f-official_20210327_172657-1d42b7d1.pth # noqa
If you want to load the ema model, you can just use following codes:
.. code-block:: python
# ckpt_http is one of the valid path from http source
discriminator = StyleGAN2Discriminator(1024, 512,
pretrained=dict(
ckpt_path=ckpt_http,
prefix='discriminator'))
Of course, you can also download the checkpoint in advance and set
``ckpt_path`` with local path.
Note that our implementation adopts BGR image as input, while the
original StyleGAN2 provides RGB images to the discriminator. Thus, we
provide ``bgr2rgb`` argument to convert the image space. If your images
follow the RGB order, please set it to ``True`` accordingly.
Args:
in_size (int): The input size of images.
channel_multiplier (int, optional): The multiplier factor for the
channel number. Defaults to 2.
blur_kernel (list, optional): The blurry kernel. Defaults
to [1, 3, 3, 1].
mbstd_cfg (dict, optional): Configs for minibatch-stddev layer.
Defaults to dict(group_size=4, channel_groups=1).
num_fp16_scales (int, optional): The number of resolutions to use auto
fp16 training. Defaults to 0.
fp16_enabled (bool, optional): Whether to use fp16 training in this
module. Defaults to False.
out_fp32 (bool, optional): Whether to convert the output feature map to
`torch.float32`. Defaults to `True`.
convert_input_fp32 (bool, optional): Whether to convert input type to
fp32 if not `fp16_enabled`. This argument is designed to deal with
the cases where some modules are run in FP16 and others in FP32.
Defaults to True.
pretrained (dict | None, optional): Information for pretained models.
The necessary key is 'ckpt_path'. Besides, you can also provide
'prefix' to load the generator part from the whole state dict.
Defaults to None.
"""
def __init__(self,
in_size,
channel_multiplier=2,
blur_kernel=[1, 3, 3, 1],
mbstd_cfg=dict(group_size=4, channel_groups=1),
num_fp16_scales=0,
fp16_enabled=False,
out_fp32=True,
convert_input_fp32=True,
pretrained=None):
nn.Module.__init__(self)
self.num_fp16_scale = num_fp16_scales
self.fp16_enabled = fp16_enabled
self.convert_input_fp32 = convert_input_fp32
self.out_fp32 = out_fp32
channels = {
4: 512,
8: 512,
16: 512,
32: 512,
64: 256 * channel_multiplier,
128: 128 * channel_multiplier,
256: 64 * channel_multiplier,
512: 32 * channel_multiplier,
1024: 16 * channel_multiplier,
}
log_size = int(np.log2(in_size)) - 1
in_channels = channels[in_size]
_use_fp16 = num_fp16_scales > 0
from_rgbs = []
convs = []
for i in range(log_size, 2, -1):
out_channel = channels[2**(i - 1)]
# add fp16 training for higher resolutions
_use_fp16 = (log_size - i) < num_fp16_scales or fp16_enabled
from_rgbs.append(
ModulatedFromRGB(
in_channels,
downsample=i != log_size,
fp16_enabled=_use_fp16,
convert_input_fp32=convert_input_fp32))
convs.append(
ConvBlock(
in_channels,
out_channel,
blur_kernel,
fp16_enabled=_use_fp16,
convert_input_fp32=convert_input_fp32))
in_channels = out_channel
from_rgbs.append(
ModulatedFromRGB(
channels[4],
downsample=True,
fp16_enabled=_use_fp16,
convert_input_fp32=convert_input_fp32))
self.from_rgbs = nn.Sequential(*from_rgbs)
self.convs = nn.Sequential(*convs)
self.mbstd_layer = ModMBStddevLayer(**mbstd_cfg)
self.final_conv = ConvDownLayer(in_channels + 1, channels[4], 3)
self.final_linear = nn.Sequential(
EqualLinearActModule(
channels[4] * 4 * 4,
channels[4],
act_cfg=dict(type='fused_bias')),
EqualLinearActModule(channels[4], 1),
)
self.dwt = HaarTransform()
if pretrained is not None:
self._load_pretrained_model(**pretrained)
def _load_pretrained_model(self,
ckpt_path,
prefix='',
map_location='cpu',
strict=True):
state_dict = _load_checkpoint_with_prefix(prefix, ckpt_path,
map_location)
self.load_state_dict(state_dict, strict=strict)
mmcv.print_log(f'Load pretrained model from {ckpt_path}', 'mmgen')
@auto_fp16()
def forward(self, x):
"""Forward function.
Args:
x (torch.Tensor): Input image tensor.
Returns:
torch.Tensor: Predict score for the input image.
"""
x = self.dwt(x)
out = None
for from_rgb, conv in zip(self.from_rgbs, self.convs):
x, out = from_rgb(x, out)
out = conv(out)
_, out = self.from_rgbs[-1](x, out)
x = self.mbstd_layer(out)
if not self.final_conv.fp16_enabled and self.convert_input_fp32:
x = x.to(torch.float32)
x = self.final_conv(x)
x = x.view(x.shape[0], -1)
x = self.final_linear(x)
return x
|
py | 1a367ba28d59f685af29e748162aa545783ba490 | import hashlib
import os
from shutil import move
from tempfile import mkstemp
BLOCKSIZE = 65535
def find_hash(hash_file, plan_name):
# Try to find the hash in the hash file
filename = os.path.normpath(hash_file)
if os.path.isfile(filename):
plan_hashes = open(filename, 'r').readlines()
for line in plan_hashes:
parts = line.strip().split('=')
if len(parts) == 2 and parts[0] == plan_name:
return parts[1]
return None
def update_hash(hash_file, plan_name, hash_value):
# Do the update (create the file if it doesn't exist)
filename = os.path.normpath(hash_file)
# If it doesn't exist, we shortcut this
if not os.path.isfile(hash_file):
with open(hash_file, 'w') as new_file:
new_file.write('%s=%s\n' % (plan_name, hash_value))
return
# Otherwise, we need to rebuild the file
fh, abs_path = mkstemp()
is_written = False
with open(abs_path, 'w') as new_file:
with open(filename, 'r') as old_file:
# Handle existing entries in the file
for line in old_file:
parts = line.strip().split('=')
if parts[0] == plan_name:
is_written = True
new_file.write('%s=%s\n' % (plan_name, hash_value))
else:
new_file.write(line)
# If the hash wasn't already in the file
if not is_written:
new_file.write('%s=%s\n' % (plan_name, hash_value))
os.close(fh)
# Remove original file
os.remove(hash_file)
# Move new file
move(abs_path, hash_file)
def calc_hash(filename):
hasher = hashlib.md5()
with open(filename, 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
return hasher.hexdigest()
|
py | 1a367c3c265aca00e8ee4c1d24543ab6183b9b48 | __all__ = ["Captcha", "CaptchaPainter"]
__version__ = "1.0.1"
__description__ = "一个图形验证码生成工具"
__author__ = "AntonVanke"
__author_email__ = "[email protected]"
__url__ = "https://github.com/antonvanke/kaptcha"
from .captcha import Captcha, CaptchaPainter
|
py | 1a367d2ea30a2bec0167ef50b5a755dcba20cbe0 | # Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for input/output types in KFP SDK.
These are only compatible with v2 Pipelines.
"""
import os
from typing import Dict, Generic, List, Optional, Type, TypeVar, Union
_GCS_LOCAL_MOUNT_PREFIX = '/gcs/'
class Artifact(object):
"""Generic Artifact class.
This class is meant to represent the metadata around an input or output
machine-learning Artifact. Artifacts have URIs, which can either be a location
on disk (or Cloud storage) or some other resource identifier such as
an API resource name.
Artifacts carry a `metadata` field, which is a dictionary for storing
metadata related to this artifact.
"""
TYPE_NAME = 'system.Artifact'
def __init__(self,
name: Optional[str] = None,
uri: Optional[str] = None,
metadata: Optional[Dict] = None):
"""Initializes the Artifact with the given name, URI and metadata."""
self.uri = uri or ''
self.name = name or ''
self.metadata = metadata or {}
@property
def path(self):
return self._get_path()
@path.setter
def path(self, path):
self._set_path(path)
def _get_path(self) -> str:
if self.uri.startswith('gs://'):
return _GCS_LOCAL_MOUNT_PREFIX + self.uri[len('gs://'):]
def _set_path(self, path):
if path.startswith(_GCS_LOCAL_MOUNT_PREFIX):
path = 'gs://' + path[len(_GCS_LOCAL_MOUNT_PREFIX):]
self.uri = path
class Model(Artifact):
"""An artifact representing an ML Model."""
TYPE_NAME = 'system.Model'
def __init__(self,
name: Optional[str] = None,
uri: Optional[str] = None,
metadata: Optional[Dict] = None):
super().__init__(uri=uri, name=name, metadata=metadata)
@property
def framework(self) -> str:
return self._get_framework()
def _get_framework(self) -> str:
return self.metadata.get('framework', '')
@framework.setter
def framework(self, framework: str):
self._set_framework(framework)
def _set_framework(self, framework: str):
self.metadata['framework'] = framework
class Dataset(Artifact):
"""An artifact representing an ML Dataset."""
TYPE_NAME = 'system.Dataset'
def __init__(self,
name: Optional[str] = None,
uri: Optional[str] = None,
metadata: Optional[Dict] = None):
super().__init__(uri=uri, name=name, metadata=metadata)
class Metrics(Artifact):
"""Represent a simple base Artifact type to store key-value scalar metrics."""
TYPE_NAME = 'system.Metrics'
def __init__(self,
name: Optional[str] = None,
uri: Optional[str] = None,
metadata: Optional[Dict] = None):
super().__init__(uri=uri, name=name, metadata=metadata)
def log_metric(self, metric: str, value: float):
"""Sets a custom scalar metric.
Args:
metric: Metric key
value: Value of the metric.
"""
self.metadata[metric] = value
class ClassificationMetrics(Artifact):
"""Represents Artifact class to store Classification Metrics."""
TYPE_NAME = 'system.ClassificationMetrics'
def __init__(self,
name: Optional[str] = None,
uri: Optional[str] = None,
metadata: Optional[Dict] = None):
super().__init__(uri=uri, name=name, metadata=metadata)
def log_roc_data_point(self, fpr: float, tpr: float, threshold: float):
"""Logs a single data point in the ROC Curve.
Args:
fpr: False positive rate value of the data point.
tpr: True positive rate value of the data point.
threshold: Threshold value for the data point.
"""
roc_reading = {
'confidenceThreshold': threshold,
'recall': tpr,
'falsePositiveRate': fpr
}
if 'confidenceMetrics' not in self.metadata.keys():
self.metadata['confidenceMetrics'] = []
self.metadata['confidenceMetrics'].append(roc_reading)
def log_roc_curve(self, fpr: List[float], tpr: List[float],
threshold: List[float]):
"""Logs an ROC curve.
The list length of fpr, tpr and threshold must be the same.
Args:
fpr: List of false positive rate values.
tpr: List of true positive rate values.
threshold: List of threshold values.
"""
if len(fpr) != len(tpr) or len(fpr) != len(threshold) or len(tpr) != len(
threshold):
raise ValueError('Length of fpr, tpr and threshold must be the same. '
'Got lengths {}, {} and {} respectively.'.format(
len(fpr), len(tpr), len(threshold)))
for i in range(len(fpr)):
self.log_roc_data_point(fpr=fpr[i], tpr=tpr[i], threshold=threshold[i])
def set_confusion_matrix_categories(self, categories: List[str]):
"""Stores confusion matrix categories.
Args:
categories: List of strings specifying the categories.
"""
self._categories = []
annotation_specs = []
for category in categories:
annotation_spec = {'displayName': category}
self._categories.append(category)
annotation_specs.append(annotation_spec)
self._matrix = []
for row in range(len(self._categories)):
self._matrix.append({'row': [0] * len(self._categories)})
self._confusion_matrix = {}
self._confusion_matrix['annotationSpecs'] = annotation_specs
self._confusion_matrix['rows'] = self._matrix
self.metadata['confusionMatrix'] = self._confusion_matrix
def log_confusion_matrix_row(self, row_category: str, row: List[float]):
"""Logs a confusion matrix row.
Args:
row_category: Category to which the row belongs.
row: List of integers specifying the values for the row.
Raises:
ValueError: If row_category is not in the list of categories
set in set_categories call.
"""
if row_category not in self._categories:
raise ValueError('Invalid category: {} passed. Expected one of: {}'.\
format(row_category, self._categories))
if len(row) != len(self._categories):
raise ValueError('Invalid row. Expected size: {} got: {}'.\
format(len(self._categories), len(row)))
self._matrix[self._categories.index(row_category)] = {'row': row}
self.metadata['confusionMatrix'] = self._confusion_matrix
def log_confusion_matrix_cell(self, row_category: str, col_category: str,
value: int):
"""Logs a cell in the confusion matrix.
Args:
row_category: String representing the name of the row category.
col_category: String representing the name of the column category.
value: Int value of the cell.
Raises:
ValueError: If row_category or col_category is not in the list of
categories set in set_categories.
"""
if row_category not in self._categories:
raise ValueError('Invalid category: {} passed. Expected one of: {}'.\
format(row_category, self._categories))
if col_category not in self._categories:
raise ValueError('Invalid category: {} passed. Expected one of: {}'.\
format(row_category, self._categories))
self._matrix[self._categories.index(row_category)]['row'][
self._categories.index(col_category)] = value
self.metadata['confusionMatrix'] = self._confusion_matrix
def log_confusion_matrix(self, categories: List[str],
matrix: List[List[int]]):
"""Logs a confusion matrix.
Args:
categories: List of the category names.
matrix: Complete confusion matrix.
Raises:
ValueError: Length of categories does not match number of rows or columns.
"""
self.set_confusion_matrix_categories(categories)
if len(matrix) != len(categories):
raise ValueError('Invalid matrix: {} passed for categories: {}'.\
format(matrix, categories))
for index in range(len(categories)):
if len(matrix[index]) != len(categories):
raise ValueError('Invalid matrix: {} passed for categories: {}'.\
format(matrix, categories))
self.log_confusion_matrix_row(categories[index], matrix[index])
self.metadata['confusionMatrix'] = self._confusion_matrix
class SlicedClassificationMetrics(Artifact):
"""Metrics class representing Sliced Classification Metrics.
Similar to ClassificationMetrics clients using this class are expected to use
log methods of the class to log metrics with the difference being each log
method takes a slice to associate the ClassificationMetrics.
"""
TYPE_NAME = 'system.SlicedClassificationMetrics'
def __init__(self,
name: Optional[str] = None,
uri: Optional[str] = None,
metadata: Optional[Dict] = None):
super().__init__(uri=uri, name=name, metadata=metadata)
def _upsert_classification_metrics_for_slice(self, slice: str):
"""Upserts the classification metrics instance for a slice."""
if slice not in self._sliced_metrics:
self._sliced_metrics[slice] = ClassificationMetrics()
def _update_metadata(self, slice: str):
"""Updates metadata to adhere to the metrics schema."""
self.metadata = {}
self.metadata['evaluationSlices'] = []
for slice in self._sliced_metrics.keys():
slice_metrics = {
'slice': slice,
'sliceClassificationMetrics': self._sliced_metrics[slice].metadata
}
self.metadata['evaluationSlices'].append(slice_metrics)
def log_roc_reading(self, slice: str, threshold: float, tpr: float,
fpr: float):
"""Logs a single data point in the ROC Curve of a slice.
Args:
slice: String representing slice label.
threshold: Thresold value for the data point.
tpr: True positive rate value of the data point.
fpr: False positive rate value of the data point.
"""
self._upsert_classification_metrics_for_slice(slice)
self._sliced_metrics[slice].log_roc_reading(threshold, tpr, fpr)
self._update_metadata(slice)
def load_roc_readings(self, slice: str, readings: List[List[float]]):
"""Supports bulk loading ROC Curve readings for a slice.
Args:
slice: String representing slice label.
readings: A 2-D list providing ROC Curve data points.
The expected order of the data points is: threshold,
true_positive_rate, false_positive_rate.
"""
self._upsert_classification_metrics_for_slice(slice)
self._sliced_metrics[slice].load_roc_readings(readings)
self._update_metadata(slice)
def set_confusion_matrix_categories(self, slice: str, categories: List[str]):
"""Stores confusion matrix categories for a slice..
Categories are stored in the internal metrics_utils.ConfusionMatrix
instance of the slice.
Args:
slice: String representing slice label.
categories: List of strings specifying the categories.
"""
self._upsert_classification_metrics_for_slice(slice)
self._sliced_metrics[slice].set_confusion_matrix_categories(categories)
self._update_metadata(slice)
def log_confusion_matrix_row(self, slice: str, row_category: str,
row: List[int]):
"""Logs a confusion matrix row for a slice.
Row is updated on the internal metrics_utils.ConfusionMatrix
instance of the slice.
Args:
slice: String representing slice label.
row_category: Category to which the row belongs.
row: List of integers specifying the values for the row.
"""
self._upsert_classification_metrics_for_slice(slice)
self._sliced_metrics[slice].log_confusion_matrix_row(row_category, row)
self._update_metadata(slice)
def log_confusion_matrix_cell(self, slice: str, row_category: str,
col_category: str, value: int):
"""Logs a confusion matrix cell for a slice..
Cell is updated on the internal metrics_utils.ConfusionMatrix
instance of the slice.
Args:
slice: String representing slice label.
row_category: String representing the name of the row category.
col_category: String representing the name of the column category.
value: Int value of the cell.
"""
self._upsert_classification_metrics_for_slice(slice)
self._sliced_metrics[slice].log_confusion_matrix_cell(
row_category, col_category, value)
self._update_metadata(slice)
def load_confusion_matrix(self, slice: str, categories: List[str],
matrix: List[List[int]]):
"""Supports bulk loading the whole confusion matrix for a slice.
Args:
slice: String representing slice label.
categories: List of the category names.
matrix: Complete confusion matrix.
"""
self._upsert_classification_metrics_for_slice(slice)
self._sliced_metrics[slice].log_confusion_matrix_cell(categories, matrix)
self._update_metadata(slice)
T = TypeVar('T')
class InputAnnotation():
"""Marker type for input artifacts."""
pass
class OutputAnnotation():
"""Marker type for output artifacts."""
pass
# TODO: Use typing.Annotated instead of this hack.
# With typing.Annotated (Python 3.9+ or typing_extensions package), the
# following would look like:
# Input = typing.Annotated[T, InputAnnotation]
# Output = typing.Annotated[T, OutputAnnotation]
# Input represents an Input artifact of type T.
Input = Union[T, InputAnnotation]
# Output represents an Output artifact of type T.
Output = Union[T, OutputAnnotation]
def is_artifact_annotation(typ) -> bool:
if hasattr(typ, '_subs_tree'): # Python 3.6
subs_tree = typ._subs_tree()
return len(subs_tree) == 3 and subs_tree[0] == Union and subs_tree[2] in [InputAnnotation, OutputAnnotation]
if not hasattr(typ, '__origin__'):
return False
if typ.__origin__ != Union and type(typ.__origin__) != type(Union):
return False
if not hasattr(typ, '__args__') or len(typ.__args__) != 2:
return False
if typ.__args__[1] not in [InputAnnotation, OutputAnnotation]:
return False
return True
def is_input_artifact(typ) -> bool:
"""Returns True if typ is of type Input[T]."""
if not is_artifact_annotation(typ):
return False
if hasattr(typ, '_subs_tree'): # Python 3.6
subs_tree = typ._subs_tree()
return len(subs_tree) == 3 and subs_tree[2] == InputAnnotation
return typ.__args__[1] == InputAnnotation
def is_output_artifact(typ) -> bool:
"""Returns True if typ is of type Output[T]."""
if not is_artifact_annotation(typ):
return False
if hasattr(typ, '_subs_tree'): # Python 3.6
subs_tree = typ._subs_tree()
return len(subs_tree) == 3 and subs_tree[2] == OutputAnnotation
return typ.__args__[1] == OutputAnnotation
def get_io_artifact_class(typ):
if not is_artifact_annotation(typ):
return None
if typ == Input or typ == Output:
return None
if hasattr(typ, '_subs_tree'): # Python 3.6
subs_tree = typ._subs_tree()
if len(subs_tree) != 3:
return None
return subs_tree[1]
return typ.__args__[0]
def get_io_artifact_annotation(typ):
if not is_artifact_annotation(typ):
return None
if hasattr(typ, '_subs_tree'): # Python 3.6
subs_tree = typ._subs_tree()
if len(subs_tree) != 3:
return None
return subs_tree[2]
return typ.__args__[1]
_SCHEMA_TITLE_TO_TYPE: Dict[str, Artifact] = {
x.TYPE_NAME: x
for x in [Artifact, Model, Dataset, Metrics, ClassificationMetrics]
}
def create_runtime_artifact(runtime_artifact: Dict) -> Artifact:
"""Creates an Artifact instance from the specified RuntimeArtifact.
Args:
runtime_artifact: Dictionary representing JSON-encoded RuntimeArtifact.
"""
schema_title = runtime_artifact.get('type', {}).get('schemaTitle', '')
artifact_type = _SCHEMA_TITLE_TO_TYPE.get(schema_title)
if not artifact_type:
artifact_type = Artifact
return artifact_type(
uri=runtime_artifact.get('uri', ''),
name=runtime_artifact.get('name', ''),
metadata=runtime_artifact.get('metadata', {}),
)
|
py | 1a367d7de3d377f751007c97afbc45b8e14294ae | # Copyright 2020 ByteDance Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from neurst.layers.attentions.multi_head_attention import MultiHeadAttention, MultiHeadSelfAttention
from neurst.layers.common_layers import PrePostProcessingWrapper, TransformerFFN
from neurst.utils import compat
class TransformerEncoderLayer(tf.keras.layers.Layer):
""" Defines one transformer layer. """
def __init__(self,
hidden_size,
num_attention_heads,
filter_size,
ffn_activation="relu",
attention_dropout_rate=0.,
attention_type="dot_product",
ffn_dropout_rate=0.,
layer_postprocess_dropout_rate=0.,
layer_postprocess_epsilon=1e-6,
post_normalize=False,
name=None):
""" Initializes the transformer encoder layer.
Args:
hidden_size: The number of hidden units.
num_attention_heads: The number of self attention heads.
filter_size: The filter size of ffn layer.
ffn_activation: The activation function of ffn layer.
ffn_dropout_rate: The dropout rate for ffn layer.
attention_dropout_rate: The dropout rate for ffn layer.
attention_type: The self attention type.
layer_postprocess_dropout_rate: The dropout rate for each layer post process.
layer_postprocess_epsilon: The epsilon for layer norm.
post_normalize: Whether to apply layernorm after each block.
name: The name of this encoder.
"""
super(TransformerEncoderLayer, self).__init__(name=name)
self._hidden_size = hidden_size
self._num_attention_heads = num_attention_heads
self._filter_size = filter_size
self._ffn_activation = ffn_activation
self._attention_dropout_rate = attention_dropout_rate
self._attention_type = attention_type
self._ffn_dropout_rate = ffn_dropout_rate
self._layer_postprocess_dropout_rate = layer_postprocess_dropout_rate
self._layer_postprocess_epsilon = layer_postprocess_epsilon
self._post_normalize = post_normalize
def build(self, input_shape):
self._selfatt_layer = PrePostProcessingWrapper(
layer=MultiHeadSelfAttention(
num_heads=self._num_attention_heads,
num_units=self._hidden_size,
attention_dropout_rate=self._attention_dropout_rate,
attention_type=self._attention_type,
name="self_attention"),
dropout_rate=self._layer_postprocess_dropout_rate,
epsilon=self._layer_postprocess_epsilon,
pre_norm=(not self._post_normalize),
res_conn_factor=1.,
name="self_attention_prepost_wrapper")
self._ffn_layer = PrePostProcessingWrapper(
layer=TransformerFFN(
filter_size=self._filter_size,
output_size=self._hidden_size,
dropout_rate=self._ffn_dropout_rate,
activation=self._ffn_activation,
name="ffn"),
dropout_rate=self._layer_postprocess_dropout_rate,
epsilon=self._layer_postprocess_epsilon,
pre_norm=(not self._post_normalize),
res_conn_factor=1.,
name="ffn_prepost_wrapper")
super(TransformerEncoderLayer, self).build(input_shape)
def call(self, x, x_bias, cache=None, is_training=True):
y = self._selfatt_layer(
x, # x as query
cache=None if cache is None else cache["self_attention"],
bias=x_bias,
is_training=is_training)
# ffn
y = self._ffn_layer(y, is_training=is_training)
return y
def create_internal_cache(self):
num_units_per_head = self._hidden_size // self._num_attention_heads
return {
"self_attention": {
"keys": tf.zeros([0, self._num_attention_heads, num_units_per_head],
dtype=compat.CUSTOM_GLOBAL_FLOATX),
"values": tf.zeros([0, self._num_attention_heads, num_units_per_head],
dtype=compat.CUSTOM_GLOBAL_FLOATX)},
}
class TransformerDecoderLayer(tf.keras.layers.Layer):
""" Defines one transformer layer. """
def __init__(self,
hidden_size,
num_attention_heads,
filter_size,
ffn_activation="relu",
attention_dropout_rate=0.,
attention_type="dot_product",
ffn_dropout_rate=0.,
layer_postprocess_dropout_rate=0.,
layer_postprocess_epsilon=1e-6,
post_normalize=False,
with_cross_attention=True,
name=None):
""" Initializes the transformer encoder layer.
Args:
hidden_size: The number of hidden units.
num_attention_heads: The number of self attention heads.
filter_size: The filter size of ffn layer.
ffn_activation: The activation function of ffn layer.
ffn_dropout_rate: The dropout rate for ffn layer.
attention_dropout_rate: The dropout rate for ffn layer.
attention_type: The self attention type.
layer_postprocess_dropout_rate: The dropout rate for each layer post process.
layer_postprocess_epsilon: The epsilon for layer norm.
post_normalize: Whether to apply layernorm after each block.
with_cross_attention: Whether to involve cross attention.
name: The name of this encoder.
"""
super(TransformerDecoderLayer, self).__init__(name=name)
self._hidden_size = hidden_size
self._num_attention_heads = num_attention_heads
self._filter_size = filter_size
self._ffn_activation = ffn_activation
self._attention_dropout_rate = attention_dropout_rate
self._attention_type = attention_type
self._ffn_dropout_rate = ffn_dropout_rate
self._layer_postprocess_dropout_rate = layer_postprocess_dropout_rate
self._layer_postprocess_epsilon = layer_postprocess_epsilon
self._post_normalize = post_normalize
self._with_cross_attention = with_cross_attention
def memorize_memory(self, memory):
if not self._with_cross_attention:
raise ValueError("No need to call memorize memory without cross attention.")
k, v = self._crossatt_layer.layer.compute_kv(memory)
return {"memory": {"keys": k, "values": v}}
def create_decoding_internal_cache(self, decode_padded_length=None):
num_units_per_head = self._hidden_size // self._num_attention_heads
return {
"self_attention": {
"keys": tf.zeros([decode_padded_length or 0, self._num_attention_heads, num_units_per_head],
dtype=compat.CUSTOM_GLOBAL_FLOATX),
"values": tf.zeros([decode_padded_length or 0, self._num_attention_heads, num_units_per_head],
dtype=compat.CUSTOM_GLOBAL_FLOATX)},
}
def build(self, input_shape):
self._selfatt_layer = PrePostProcessingWrapper(
layer=MultiHeadSelfAttention(
num_heads=self._num_attention_heads,
num_units=self._hidden_size,
attention_dropout_rate=self._attention_dropout_rate,
attention_type=self._attention_type,
name="self_attention"),
dropout_rate=self._layer_postprocess_dropout_rate,
epsilon=self._layer_postprocess_epsilon,
pre_norm=(not self._post_normalize),
res_conn_factor=1.,
name="self_attention_prepost_wrapper")
if self._with_cross_attention:
self._crossatt_layer = PrePostProcessingWrapper(
layer=MultiHeadAttention(
num_heads=self._num_attention_heads,
num_units=self._hidden_size,
attention_dropout_rate=self._attention_dropout_rate,
attention_type=self._attention_type,
name="encdec_attention"),
dropout_rate=self._layer_postprocess_dropout_rate,
epsilon=self._layer_postprocess_epsilon,
pre_norm=(not self._post_normalize),
res_conn_factor=1.,
name="encdec_attention_prepost_wrapper")
self._ffn_layer = PrePostProcessingWrapper(
layer=TransformerFFN(
filter_size=self._filter_size,
output_size=self._hidden_size,
dropout_rate=self._ffn_dropout_rate,
activation=self._ffn_activation,
name="ffn"),
dropout_rate=self._layer_postprocess_dropout_rate,
epsilon=self._layer_postprocess_epsilon,
pre_norm=(not self._post_normalize),
res_conn_factor=1.,
name="ffn_prepost_wrapper"
)
super(TransformerDecoderLayer, self).build(input_shape)
def call(self, x, x_bias, cache,
memory=None, memory_bias=None,
is_training=True, decode_loop_step=None):
selfatt_cache = None if cache is None else cache["self_attention"]
y = self._selfatt_layer(
x, # x as query
bias=x_bias,
cache=selfatt_cache,
is_training=is_training,
decode_loop_step=decode_loop_step)
# enc-dec attention layer
if self._with_cross_attention:
crossatt_cache = None if cache is None else cache["memory"]
y = self._crossatt_layer(
y, # x as query
memory=memory, # None indicates self-attention
memory_bias=memory_bias,
cache=crossatt_cache,
is_training=is_training)
# ffn
y = self._ffn_layer(y, is_training=is_training)
return y
|
py | 1a367db781a145f8fa342f997721bb842c1ba849 | #!/usr/bin/env python3
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2021 Hans Baier <[email protected]>
# SPDX-License-Identifier: BSD-2-Clause
# https://www.aliexpress.com/item/1000006630084.html
import os
import argparse
from migen import *
from litex_boards.platforms import qmtech_xc7a35t
from litex.build.xilinx.vivado import vivado_build_args, vivado_build_argdict
from litex.soc.cores.clock import *
from litex.soc.integration.soc import SoCRegion
from litex.soc.integration.soc_core import *
from litex.soc.integration.builder import *
from litex.soc.cores.video import VideoVGAPHY
from litex.soc.cores.led import LedChaser
from litedram.modules import MT41J128M16
from litedram.phy import s7ddrphy
from liteeth.phy.mii import LiteEthPHYMII
# CRG ----------------------------------------------------------------------------------------------
class _CRG(Module):
def __init__(self, platform, sys_clk_freq, with_ethernet, with_vga):
self.rst = Signal()
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_sys4x = ClockDomain(reset_less=True)
self.clock_domains.cd_sys4x_dqs = ClockDomain(reset_less=True)
self.clock_domains.cd_idelay = ClockDomain()
self.clock_domains.cd_eth = ClockDomain()
if with_ethernet:
self.clock_domains.cd_eth = ClockDomain()
if with_vga:
self.clock_domains.cd_vga = ClockDomain(reset_less=True)
# # #
self.submodules.pll = pll = S7PLL(speedgrade=-1)
try:
reset_button = platform.request("cpu_reset")
self.comb += pll.reset.eq(~reset_button | self.rst)
except:
self.comb += pll.reset.eq(self.rst)
pll.register_clkin(platform.request("clk50"), 50e6)
pll.create_clkout(self.cd_sys, sys_clk_freq)
pll.create_clkout(self.cd_sys4x, 4*sys_clk_freq)
pll.create_clkout(self.cd_sys4x_dqs, 4*sys_clk_freq, phase=90)
pll.create_clkout(self.cd_idelay, 200e6)
if with_ethernet:
pll.create_clkout(self.cd_eth, 25e6)
if with_vga:
pll.create_clkout(self.cd_vga, 40e6)
platform.add_false_path_constraints(self.cd_sys.clk, pll.clkin) # Ignore sys_clk to pll.clkin path created by SoC's rst.
self.submodules.idelayctrl = S7IDELAYCTRL(self.cd_idelay)
# BaseSoC ------------------------------------------------------------------------------------------
class BaseSoC(SoCCore):
mem_map = {**SoCCore.mem_map, **{"spiflash": 0x80000000}}
def __init__(self, toolchain="vivado", sys_clk_freq=int(100e6), with_daughterboard=False,
with_ethernet=False, with_etherbone=False, eth_ip="192.168.1.50", eth_dynamic_ip=False,
with_led_chaser=True, with_video_terminal=False, with_video_framebuffer=False,
ident_version=True, with_jtagbone=True, with_spi_flash=False, **kwargs):
platform = qmtech_xc7a35t.Platform(toolchain=toolchain, with_daughterboard=with_daughterboard)
# SoCCore ----------------------------------------------------------------------------------
if kwargs["uart_name"] == "serial":
kwargs["uart_name"] = "jtag_uart"
SoCCore.__init__(self, platform, sys_clk_freq,
ident = "LiteX SoC on QMTech XC7A35T" + (" + Daughterboard" if with_daughterboard else ""),
ident_version = ident_version,
**kwargs)
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = _CRG(platform, sys_clk_freq, with_ethernet or with_etherbone, with_video_terminal or with_video_framebuffer)
# DDR3 SDRAM -------------------------------------------------------------------------------
if not self.integrated_main_ram_size:
self.submodules.ddrphy = s7ddrphy.A7DDRPHY(platform.request("ddram"),
memtype = "DDR3",
nphases = 4,
sys_clk_freq = sys_clk_freq)
self.add_sdram("sdram",
phy = self.ddrphy,
module = MT41J128M16(sys_clk_freq, "1:4"),
l2_cache_size = kwargs.get("l2_size", 8192)
)
# Ethernet / Etherbone ---------------------------------------------------------------------
if with_ethernet or with_etherbone:
self.submodules.ethphy = LiteEthPHYMII(
clock_pads = self.platform.request("eth_clocks"),
pads = self.platform.request("eth"))
if with_ethernet:
self.add_ethernet(phy=self.ethphy, dynamic_ip=eth_dynamic_ip)
if with_etherbone:
self.add_etherbone(phy=self.ethphy, ip_address=eth_ip)
# The daughterboard has the tx clock wired to a non-clock pin, so we can't help it
self.platform.add_platform_command("set_property CLOCK_DEDICATED_ROUTE FALSE [get_nets eth_clocks_tx_IBUF]")
# Jtagbone ---------------------------------------------------------------------------------
if with_jtagbone:
self.add_jtagbone()
# SPI Flash --------------------------------------------------------------------------------
if with_spi_flash:
from litespi.modules import MT25QL128
from litespi.opcodes import SpiNorFlashOpCodes as Codes
self.add_spi_flash(mode="4x", module=MT25QL128(Codes.READ_1_1_1), with_master=True)
# Video ------------------------------------------------------------------------------------
if with_video_terminal or with_video_framebuffer:
self.submodules.videophy = VideoVGAPHY(platform.request("vga"), clock_domain="vga")
if with_video_terminal:
self.add_video_terminal(phy=self.videophy, timings="800x600@60Hz", clock_domain="vga")
if with_video_framebuffer:
self.add_video_framebuffer(phy=self.videophy, timings="800x600@60Hz", clock_domain="vga")
# Leds -------------------------------------------------------------------------------------
if with_led_chaser:
self.submodules.leds = LedChaser(
pads = platform.request_all("user_led"),
sys_clk_freq = sys_clk_freq)
if not with_daughterboard and kwargs["uart_name"] == "serial":
kwargs["uart_name"] = "jtag_serial"
# Build --------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="LiteX SoC on QMTech XC7A35T")
parser.add_argument("--toolchain", default="vivado", help="Toolchain use to build (default: vivado)")
parser.add_argument("--build", action="store_true", help="Build bitstream")
parser.add_argument("--load", action="store_true", help="Load bitstream")
parser.add_argument("--sys-clk-freq", default=100e6, help="System clock frequency (default: 100MHz)")
parser.add_argument("--with-daughterboard", action="store_true", help="Whether the core board is plugged into the QMTech daughterboard")
ethopts = parser.add_mutually_exclusive_group()
ethopts.add_argument("--with-ethernet", action="store_true", help="Enable Ethernet support")
ethopts.add_argument("--with-etherbone", action="store_true", help="Enable Etherbone support")
parser.add_argument("--eth-ip", default="192.168.1.50", type=str, help="Ethernet/Etherbone IP address")
parser.add_argument("--eth-dynamic-ip", action="store_true", help="Enable dynamic Ethernet IP addresses setting")
sdopts = parser.add_mutually_exclusive_group()
sdopts.add_argument("--with-spi-sdcard", action="store_true", help="Enable SPI-mode SDCard support")
sdopts.add_argument("--with-sdcard", action="store_true", help="Enable SDCard support")
parser.add_argument("--no-ident-version", action="store_false", help="Disable build time output")
parser.add_argument("--with-jtagbone", action="store_true", help="Enable Jtagbone support")
parser.add_argument("--with-spi-flash", action="store_true", help="Enable SPI Flash (MMAPed)")
viopts = parser.add_mutually_exclusive_group()
viopts.add_argument("--with-video-terminal", action="store_true", help="Enable Video Terminal (VGA)")
viopts.add_argument("--with-video-framebuffer", action="store_true", help="Enable Video Framebuffer (VGA)")
builder_args(parser)
soc_core_args(parser)
vivado_build_args(parser)
args = parser.parse_args()
soc = BaseSoC(
toolchain = args.toolchain,
sys_clk_freq = int(float(args.sys_clk_freq)),
with_daughterboard = args.with_daughterboard,
with_ethernet = args.with_ethernet,
with_etherbone = args.with_etherbone,
eth_ip = args.eth_ip,
eth_dynamic_ip = args.eth_dynamic_ip,
ident_version = args.no_ident_version,
with_jtagbone = args.with_jtagbone,
with_spi_flash = args.with_spi_flash,
with_video_terminal = args.with_video_terminal,
with_video_framebuffer = args.with_video_framebuffer,
**soc_core_argdict(args)
)
if args.with_spi_sdcard:
soc.add_spi_sdcard()
if args.with_sdcard:
soc.add_sdcard()
builder = Builder(soc, **builder_argdict(args))
builder_kwargs = vivado_build_argdict(args) if args.toolchain == "vivado" else {}
builder.build(**builder_kwargs, run=args.build)
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".bit"))
if __name__ == "__main__":
main()
|
py | 1a367e18e2c7a915f7fe32bf4a56fa9131888fa2 | import os
import cv2
from PIL import Image
import numpy as np
import pickle
base = os.path.dirname(os.path.abspath(__file__))
image_dir = os.path.join(base, "images")
face_cascade = cv2.CascadeClassifier(
cv2.data.haarcascades + 'haarcascade_frontalface_alt2.xml')
# LBPH is a data algorithm that is used mostly for face recignition, there is a lot other than that, like AdaBoost algorithm etc
recognizer = cv2.face.LBPHFaceRecognizer_create()
currentid = 0
label_ids = {}
x_train = []
y_labels = []
for root, dirs, files in os.walk(image_dir):
for file in files:
if file.endswith("png") or file.endswith("jpg"):
path = os.path.join(root, file)
label = os.path.basename(root).replace(" ", "-").lower()
print(label, path)
if not label in label_ids:
label_ids[label] = currentid
currentid += 1
id_ = label_ids[label]
print(label_ids)
pil_image = Image.open(path).convert(
"L") # converting into grayscale
image_array = np.array(pil_image, "uint8")
print(image_array)
whatface = face_cascade.detectMultiScale(
image_array, scaleFactor=1.5, minNeighbors=5)
for (x, y, w, h) in whatface:
roi = image_array[y:y+h, x:x+w]
x_train.append(roi)
y_labels.append(id_)
# print(y_labels)
# print(x_train)
# the with open as model is a model to open a new file called "labels.pickle" and assigned it to "f" and "w" means the model (what are you trying to do with it)
# in this case "wb" means "writing binary" that means we want to write in the "f" file in binary mode.
with open("labels.pickle", "wb") as f:
# pickle.dump means we will dump up the labels_id which is full of id that we made before into the file "f" that was created before
pickle.dump(label_ids, f)
recognizer.train(x_train, np.array(y_labels))
recognizer.save("trainer.yml")
|
py | 1a367e9ee3c920df74bc5f6884a51099a9e66744 | # Source : https://leetcode.com/problems/find-all-anagrams-in-a-string/
# Author : YipingPan
# Date : 2020-08-13
#####################################################################################################
#
# Given a string s and a non-empty string p, find all the start indices of p's anagrams in s.
#
# Strings consists of lowercase English letters only and the length of both strings s and p will not
# be larger than 20,100.
#
# The order of output does not matter.
#
# Example 1:
#
# Input:
# s: "cbaebabacd" p: "abc"
#
# Output:
# [0, 6]
#
# Explanation:
# The substring with start index = 0 is "cba", which is an anagram of "abc".
# The substring with start index = 6 is "bac", which is an anagram of "abc".
#
# Example 2:
#
# Input:
# s: "abab" p: "ab"
#
# Output:
# [0, 1, 2]
#
# Explanation:
# The substring with start index = 0 is "ab", which is an anagram of "ab".
# The substring with start index = 1 is "ba", which is an anagram of "ab".
# The substring with start index = 2 is "ab", which is an anagram of "ab".
#
#####################################################################################################
class Solution:
def findAnagrams(self, s: str, p: str) -> List[int]:
if len(s)<len(p): return []
cp = [0]*26
cs = [0]*26
def idx(x):
return ord(x) - ord('a')
for x in p:
cp[idx(x)] += 1
for x in s[:len(p)]:
cs[idx(x)] += 1
res = []
i = len(p)-1
while (1):
if cs == cp:
res.append(i-len(p)+1)
i += 1
if i == len(s):
break
cs[idx(s[i-len(p)])] -= 1
cs[idx(s[i])] += 1
return res
|
py | 1a367eca325b9746211518c5a2c8e0bd7dadcd4b | # Python test set -- part 5, built-in exceptions
import copy
import gc
import os
import sys
import unittest
import pickle
import weakref
import errno
from test.support import (TESTFN, captured_stderr, check_impl_detail,
check_warnings, cpython_only, gc_collect,
no_tracing, unlink, import_module, script_helper,
SuppressCrashReport)
from test import support
class NaiveException(Exception):
def __init__(self, x):
self.x = x
class SlottedNaiveException(Exception):
__slots__ = ('x',)
def __init__(self, x):
self.x = x
class BrokenStrException(Exception):
def __str__(self):
raise Exception("str() is broken")
# XXX This is not really enough, each *operation* should be tested!
class ExceptionTests(unittest.TestCase):
def raise_catch(self, exc, excname):
try:
raise exc("spam")
except exc as err:
buf1 = str(err)
try:
raise exc("spam")
except exc as err:
buf2 = str(err)
self.assertEqual(buf1, buf2)
self.assertEqual(exc.__name__, excname)
def testRaising(self):
self.raise_catch(AttributeError, "AttributeError")
self.assertRaises(AttributeError, getattr, sys, "undefined_attribute")
self.raise_catch(EOFError, "EOFError")
fp = open(TESTFN, 'w')
fp.close()
fp = open(TESTFN, 'r')
savestdin = sys.stdin
try:
try:
import marshal
marshal.loads(b'')
except EOFError:
pass
finally:
sys.stdin = savestdin
fp.close()
unlink(TESTFN)
self.raise_catch(OSError, "OSError")
self.assertRaises(OSError, open, 'this file does not exist', 'r')
self.raise_catch(ImportError, "ImportError")
self.assertRaises(ImportError, __import__, "undefined_module")
self.raise_catch(IndexError, "IndexError")
x = []
self.assertRaises(IndexError, x.__getitem__, 10)
self.raise_catch(KeyError, "KeyError")
x = {}
self.assertRaises(KeyError, x.__getitem__, 'key')
self.raise_catch(KeyboardInterrupt, "KeyboardInterrupt")
self.raise_catch(MemoryError, "MemoryError")
self.raise_catch(NameError, "NameError")
try: x = undefined_variable
except NameError: pass
self.raise_catch(OverflowError, "OverflowError")
x = 1
for dummy in range(128):
x += x # this simply shouldn't blow up
self.raise_catch(RuntimeError, "RuntimeError")
self.raise_catch(RecursionError, "RecursionError")
self.raise_catch(SyntaxError, "SyntaxError")
try: exec('/\n')
except SyntaxError: pass
self.raise_catch(IndentationError, "IndentationError")
self.raise_catch(TabError, "TabError")
try: compile("try:\n\t1/0\n \t1/0\nfinally:\n pass\n",
'<string>', 'exec')
except TabError: pass
else: self.fail("TabError not raised")
self.raise_catch(SystemError, "SystemError")
self.raise_catch(SystemExit, "SystemExit")
self.assertRaises(SystemExit, sys.exit, 0)
self.raise_catch(TypeError, "TypeError")
try: [] + ()
except TypeError: pass
self.raise_catch(ValueError, "ValueError")
self.assertRaises(ValueError, chr, 17<<16)
self.raise_catch(ZeroDivisionError, "ZeroDivisionError")
try: x = 1/0
except ZeroDivisionError: pass
self.raise_catch(Exception, "Exception")
try: x = 1/0
except Exception as e: pass
self.raise_catch(StopAsyncIteration, "StopAsyncIteration")
def testSyntaxErrorMessage(self):
# make sure the right exception message is raised for each of
# these code fragments
def ckmsg(src, msg):
try:
compile(src, '<fragment>', 'exec')
except SyntaxError as e:
if e.msg != msg:
self.fail("expected %s, got %s" % (msg, e.msg))
else:
self.fail("failed to get expected SyntaxError")
s = '''if 1:
try:
continue
except:
pass'''
ckmsg(s, "'continue' not properly in loop")
ckmsg("continue\n", "'continue' not properly in loop")
def testSyntaxErrorMissingParens(self):
def ckmsg(src, msg, exception=SyntaxError):
try:
compile(src, '<fragment>', 'exec')
except exception as e:
if e.msg != msg:
self.fail("expected %s, got %s" % (msg, e.msg))
else:
self.fail("failed to get expected SyntaxError")
s = '''print "old style"'''
ckmsg(s, "Missing parentheses in call to 'print'. "
"Did you mean print(\"old style\")?")
s = '''print "old style",'''
ckmsg(s, "Missing parentheses in call to 'print'. "
"Did you mean print(\"old style\", end=\" \")?")
s = '''exec "old style"'''
ckmsg(s, "Missing parentheses in call to 'exec'")
# should not apply to subclasses, see issue #31161
s = '''if True:\nprint "No indent"'''
ckmsg(s, "expected an indented block", IndentationError)
s = '''if True:\n print()\n\texec "mixed tabs and spaces"'''
ckmsg(s, "inconsistent use of tabs and spaces in indentation", TabError)
def testSyntaxErrorOffset(self):
def check(src, lineno, offset, encoding='utf-8'):
with self.assertRaises(SyntaxError) as cm:
compile(src, '<fragment>', 'exec')
self.assertEqual(cm.exception.lineno, lineno)
self.assertEqual(cm.exception.offset, offset)
if cm.exception.text is not None:
if not isinstance(src, str):
src = src.decode(encoding, 'replace')
line = src.split('\n')[lineno-1]
self.assertEqual(cm.exception.text.rstrip('\n'), line)
check('def fact(x):\n\treturn x!\n', 2, 10)
check('1 +\n', 1, 4)
check('def spam():\n print(1)\n print(2)', 3, 10)
check('Python = "Python" +', 1, 20)
check('Python = "\u1e54\xfd\u0163\u0125\xf2\xf1" +', 1, 20)
check(b'# -*- coding: cp1251 -*-\nPython = "\xcf\xb3\xf2\xee\xed" +',
2, 19, encoding='cp1251')
check(b'Python = "\xcf\xb3\xf2\xee\xed" +', 1, 18)
check('x = "a', 1, 7)
check('lambda x: x = 2', 1, 1)
# Errors thrown by compile.c
check('class foo:return 1', 1, 11)
check('def f():\n continue', 2, 3)
check('def f():\n break', 2, 3)
check('try:\n pass\nexcept:\n pass\nexcept ValueError:\n pass', 2, 3)
# Errors thrown by tokenizer.c
check('(0x+1)', 1, 3)
check('x = 0xI', 1, 6)
check('0010 + 2', 1, 4)
check('x = 32e-+4', 1, 8)
check('x = 0o9', 1, 6)
check('\u03b1 = 0xI', 1, 6)
check(b'\xce\xb1 = 0xI', 1, 6)
check(b'# -*- coding: iso8859-7 -*-\n\xe1 = 0xI', 2, 6,
encoding='iso8859-7')
# Errors thrown by symtable.c
check('x = [(yield i) for i in range(3)]', 1, 5)
check('def f():\n from _ import *', 1, 1)
check('def f(x, x):\n pass', 1, 1)
check('def f(x):\n nonlocal x', 2, 3)
check('def f(x):\n x = 1\n global x', 3, 3)
check('nonlocal x', 1, 1)
check('def f():\n global x\n nonlocal x', 2, 3)
# Errors thrown by ast.c
check('for 1 in []: pass', 1, 5)
check('def f(*):\n pass', 1, 7)
check('[*x for x in xs]', 1, 2)
check('def f():\n x, y: int', 2, 3)
check('(yield i) = 2', 1, 1)
check('foo(x for x in range(10), 100)', 1, 5)
check('foo(1=2)', 1, 5)
# Errors thrown by future.c
check('from __future__ import doesnt_exist', 1, 1)
check('from __future__ import braces', 1, 1)
check('x=1\nfrom __future__ import division', 2, 1)
@cpython_only
def testSettingException(self):
# test that setting an exception at the C level works even if the
# exception object can't be constructed.
class BadException(Exception):
def __init__(self_):
raise RuntimeError("can't instantiate BadException")
class InvalidException:
pass
def test_capi1():
import _testcapi
try:
_testcapi.raise_exception(BadException, 1)
except TypeError as err:
exc, err, tb = sys.exc_info()
co = tb.tb_frame.f_code
self.assertEqual(co.co_name, "test_capi1")
self.assertTrue(co.co_filename.endswith('test_exceptions.py'))
else:
self.fail("Expected exception")
def test_capi2():
import _testcapi
try:
_testcapi.raise_exception(BadException, 0)
except RuntimeError as err:
exc, err, tb = sys.exc_info()
co = tb.tb_frame.f_code
self.assertEqual(co.co_name, "__init__")
self.assertTrue(co.co_filename.endswith('test_exceptions.py'))
co2 = tb.tb_frame.f_back.f_code
self.assertEqual(co2.co_name, "test_capi2")
else:
self.fail("Expected exception")
def test_capi3():
import _testcapi
self.assertRaises(SystemError, _testcapi.raise_exception,
InvalidException, 1)
if not sys.platform.startswith('java'):
test_capi1()
test_capi2()
test_capi3()
def test_WindowsError(self):
try:
WindowsError
except NameError:
pass
else:
self.assertIs(WindowsError, OSError)
self.assertEqual(str(OSError(1001)), "1001")
self.assertEqual(str(OSError(1001, "message")),
"[Errno 1001] message")
# POSIX errno (9 aka EBADF) is untranslated
w = OSError(9, 'foo', 'bar')
self.assertEqual(w.errno, 9)
self.assertEqual(w.winerror, None)
self.assertEqual(str(w), "[Errno 9] foo: 'bar'")
# ERROR_PATH_NOT_FOUND (win error 3) becomes ENOENT (2)
w = OSError(0, 'foo', 'bar', 3)
self.assertEqual(w.errno, 2)
self.assertEqual(w.winerror, 3)
self.assertEqual(w.strerror, 'foo')
self.assertEqual(w.filename, 'bar')
self.assertEqual(w.filename2, None)
self.assertEqual(str(w), "[WinError 3] foo: 'bar'")
# Unknown win error becomes EINVAL (22)
w = OSError(0, 'foo', None, 1001)
self.assertEqual(w.errno, 22)
self.assertEqual(w.winerror, 1001)
self.assertEqual(w.strerror, 'foo')
self.assertEqual(w.filename, None)
self.assertEqual(w.filename2, None)
self.assertEqual(str(w), "[WinError 1001] foo")
# Non-numeric "errno"
w = OSError('bar', 'foo')
self.assertEqual(w.errno, 'bar')
self.assertEqual(w.winerror, None)
self.assertEqual(w.strerror, 'foo')
self.assertEqual(w.filename, None)
self.assertEqual(w.filename2, None)
@unittest.skipUnless(sys.platform == 'win32',
'test specific to Windows')
def test_windows_message(self):
"""Should fill in unknown error code in Windows error message"""
ctypes = import_module('ctypes')
# this error code has no message, Python formats it as hexadecimal
code = 3765269347
with self.assertRaisesRegex(OSError, 'Windows Error 0x%x' % code):
ctypes.pythonapi.PyErr_SetFromWindowsErr(code)
def testAttributes(self):
# test that exception attributes are happy
exceptionList = [
(BaseException, (), {'args' : ()}),
(BaseException, (1, ), {'args' : (1,)}),
(BaseException, ('foo',),
{'args' : ('foo',)}),
(BaseException, ('foo', 1),
{'args' : ('foo', 1)}),
(SystemExit, ('foo',),
{'args' : ('foo',), 'code' : 'foo'}),
(OSError, ('foo',),
{'args' : ('foo',), 'filename' : None, 'filename2' : None,
'errno' : None, 'strerror' : None}),
(OSError, ('foo', 'bar'),
{'args' : ('foo', 'bar'),
'filename' : None, 'filename2' : None,
'errno' : 'foo', 'strerror' : 'bar'}),
(OSError, ('foo', 'bar', 'baz'),
{'args' : ('foo', 'bar'),
'filename' : 'baz', 'filename2' : None,
'errno' : 'foo', 'strerror' : 'bar'}),
(OSError, ('foo', 'bar', 'baz', None, 'quux'),
{'args' : ('foo', 'bar'), 'filename' : 'baz', 'filename2': 'quux'}),
(OSError, ('errnoStr', 'strErrorStr', 'filenameStr'),
{'args' : ('errnoStr', 'strErrorStr'),
'strerror' : 'strErrorStr', 'errno' : 'errnoStr',
'filename' : 'filenameStr'}),
(OSError, (1, 'strErrorStr', 'filenameStr'),
{'args' : (1, 'strErrorStr'), 'errno' : 1,
'strerror' : 'strErrorStr',
'filename' : 'filenameStr', 'filename2' : None}),
(SyntaxError, (), {'msg' : None, 'text' : None,
'filename' : None, 'lineno' : None, 'offset' : None,
'print_file_and_line' : None}),
(SyntaxError, ('msgStr',),
{'args' : ('msgStr',), 'text' : None,
'print_file_and_line' : None, 'msg' : 'msgStr',
'filename' : None, 'lineno' : None, 'offset' : None}),
(SyntaxError, ('msgStr', ('filenameStr', 'linenoStr', 'offsetStr',
'textStr')),
{'offset' : 'offsetStr', 'text' : 'textStr',
'args' : ('msgStr', ('filenameStr', 'linenoStr',
'offsetStr', 'textStr')),
'print_file_and_line' : None, 'msg' : 'msgStr',
'filename' : 'filenameStr', 'lineno' : 'linenoStr'}),
(SyntaxError, ('msgStr', 'filenameStr', 'linenoStr', 'offsetStr',
'textStr', 'print_file_and_lineStr'),
{'text' : None,
'args' : ('msgStr', 'filenameStr', 'linenoStr', 'offsetStr',
'textStr', 'print_file_and_lineStr'),
'print_file_and_line' : None, 'msg' : 'msgStr',
'filename' : None, 'lineno' : None, 'offset' : None}),
(UnicodeError, (), {'args' : (),}),
(UnicodeEncodeError, ('ascii', 'a', 0, 1,
'ordinal not in range'),
{'args' : ('ascii', 'a', 0, 1,
'ordinal not in range'),
'encoding' : 'ascii', 'object' : 'a',
'start' : 0, 'reason' : 'ordinal not in range'}),
(UnicodeDecodeError, ('ascii', bytearray(b'\xff'), 0, 1,
'ordinal not in range'),
{'args' : ('ascii', bytearray(b'\xff'), 0, 1,
'ordinal not in range'),
'encoding' : 'ascii', 'object' : b'\xff',
'start' : 0, 'reason' : 'ordinal not in range'}),
(UnicodeDecodeError, ('ascii', b'\xff', 0, 1,
'ordinal not in range'),
{'args' : ('ascii', b'\xff', 0, 1,
'ordinal not in range'),
'encoding' : 'ascii', 'object' : b'\xff',
'start' : 0, 'reason' : 'ordinal not in range'}),
(UnicodeTranslateError, ("\u3042", 0, 1, "ouch"),
{'args' : ('\u3042', 0, 1, 'ouch'),
'object' : '\u3042', 'reason' : 'ouch',
'start' : 0, 'end' : 1}),
(NaiveException, ('foo',),
{'args': ('foo',), 'x': 'foo'}),
(SlottedNaiveException, ('foo',),
{'args': ('foo',), 'x': 'foo'}),
]
try:
# More tests are in test_WindowsError
exceptionList.append(
(WindowsError, (1, 'strErrorStr', 'filenameStr'),
{'args' : (1, 'strErrorStr'),
'strerror' : 'strErrorStr', 'winerror' : None,
'errno' : 1,
'filename' : 'filenameStr', 'filename2' : None})
)
except NameError:
pass
for exc, args, expected in exceptionList:
try:
e = exc(*args)
except:
print("\nexc=%r, args=%r" % (exc, args), file=sys.stderr)
raise
else:
# Verify module name
if not type(e).__name__.endswith('NaiveException'):
self.assertEqual(type(e).__module__, 'builtins')
# Verify no ref leaks in Exc_str()
s = str(e)
for checkArgName in expected:
value = getattr(e, checkArgName)
self.assertEqual(repr(value),
repr(expected[checkArgName]),
'%r.%s == %r, expected %r' % (
e, checkArgName,
value, expected[checkArgName]))
# test for pickling support
for p in [pickle]:
for protocol in range(p.HIGHEST_PROTOCOL + 1):
s = p.dumps(e, protocol)
new = p.loads(s)
for checkArgName in expected:
got = repr(getattr(new, checkArgName))
want = repr(expected[checkArgName])
self.assertEqual(got, want,
'pickled "%r", attribute "%s' %
(e, checkArgName))
def testWithTraceback(self):
try:
raise IndexError(4)
except:
tb = sys.exc_info()[2]
e = BaseException().with_traceback(tb)
self.assertIsInstance(e, BaseException)
self.assertEqual(e.__traceback__, tb)
e = IndexError(5).with_traceback(tb)
self.assertIsInstance(e, IndexError)
self.assertEqual(e.__traceback__, tb)
class MyException(Exception):
pass
e = MyException().with_traceback(tb)
self.assertIsInstance(e, MyException)
self.assertEqual(e.__traceback__, tb)
def testInvalidTraceback(self):
try:
Exception().__traceback__ = 5
except TypeError as e:
self.assertIn("__traceback__ must be a traceback", str(e))
else:
self.fail("No exception raised")
def testInvalidAttrs(self):
self.assertRaises(TypeError, setattr, Exception(), '__cause__', 1)
self.assertRaises(TypeError, delattr, Exception(), '__cause__')
self.assertRaises(TypeError, setattr, Exception(), '__context__', 1)
self.assertRaises(TypeError, delattr, Exception(), '__context__')
def testNoneClearsTracebackAttr(self):
try:
raise IndexError(4)
except:
tb = sys.exc_info()[2]
e = Exception()
e.__traceback__ = tb
e.__traceback__ = None
self.assertEqual(e.__traceback__, None)
def testChainingAttrs(self):
e = Exception()
self.assertIsNone(e.__context__)
self.assertIsNone(e.__cause__)
e = TypeError()
self.assertIsNone(e.__context__)
self.assertIsNone(e.__cause__)
class MyException(OSError):
pass
e = MyException()
self.assertIsNone(e.__context__)
self.assertIsNone(e.__cause__)
def testChainingDescriptors(self):
try:
raise Exception()
except Exception as exc:
e = exc
self.assertIsNone(e.__context__)
self.assertIsNone(e.__cause__)
self.assertFalse(e.__suppress_context__)
e.__context__ = NameError()
e.__cause__ = None
self.assertIsInstance(e.__context__, NameError)
self.assertIsNone(e.__cause__)
self.assertTrue(e.__suppress_context__)
e.__suppress_context__ = False
self.assertFalse(e.__suppress_context__)
def testKeywordArgs(self):
# test that builtin exception don't take keyword args,
# but user-defined subclasses can if they want
self.assertRaises(TypeError, BaseException, a=1)
class DerivedException(BaseException):
def __init__(self, fancy_arg):
BaseException.__init__(self)
self.fancy_arg = fancy_arg
x = DerivedException(fancy_arg=42)
self.assertEqual(x.fancy_arg, 42)
@no_tracing
@unittest.skipIf(hasattr(sys, "pyston_version_info"), "Pyston disables recursion checking")
def testInfiniteRecursion(self):
def f():
return f()
self.assertRaises(RecursionError, f)
def g():
try:
return g()
except ValueError:
return -1
self.assertRaises(RecursionError, g)
def test_str(self):
# Make sure both instances and classes have a str representation.
self.assertTrue(str(Exception))
self.assertTrue(str(Exception('a')))
self.assertTrue(str(Exception('a', 'b')))
def testExceptionCleanupNames(self):
# Make sure the local variable bound to the exception instance by
# an "except" statement is only visible inside the except block.
try:
raise Exception()
except Exception as e:
self.assertTrue(e)
del e
self.assertNotIn('e', locals())
def testExceptionCleanupState(self):
# Make sure exception state is cleaned up as soon as the except
# block is left. See #2507
class MyException(Exception):
def __init__(self, obj):
self.obj = obj
class MyObj:
pass
def inner_raising_func():
# Create some references in exception value and traceback
local_ref = obj
raise MyException(obj)
# Qualified "except" with "as"
obj = MyObj()
wr = weakref.ref(obj)
try:
inner_raising_func()
except MyException as e:
pass
obj = None
obj = wr()
self.assertIsNone(obj)
# Qualified "except" without "as"
obj = MyObj()
wr = weakref.ref(obj)
try:
inner_raising_func()
except MyException:
pass
obj = None
obj = wr()
self.assertIsNone(obj)
# Bare "except"
obj = MyObj()
wr = weakref.ref(obj)
try:
inner_raising_func()
except:
pass
obj = None
obj = wr()
self.assertIsNone(obj)
# "except" with premature block leave
obj = MyObj()
wr = weakref.ref(obj)
for i in [0]:
try:
inner_raising_func()
except:
break
obj = None
obj = wr()
self.assertIsNone(obj)
# "except" block raising another exception
obj = MyObj()
wr = weakref.ref(obj)
try:
try:
inner_raising_func()
except:
raise KeyError
except KeyError as e:
# We want to test that the except block above got rid of
# the exception raised in inner_raising_func(), but it
# also ends up in the __context__ of the KeyError, so we
# must clear the latter manually for our test to succeed.
e.__context__ = None
obj = None
obj = wr()
# guarantee no ref cycles on CPython (don't gc_collect)
if check_impl_detail(cpython=False):
gc_collect()
self.assertIsNone(obj)
# Some complicated construct
obj = MyObj()
wr = weakref.ref(obj)
try:
inner_raising_func()
except MyException:
try:
try:
raise
finally:
raise
except MyException:
pass
obj = None
if check_impl_detail(cpython=False):
gc_collect()
obj = wr()
self.assertIsNone(obj)
# Inside an exception-silencing "with" block
class Context:
def __enter__(self):
return self
def __exit__ (self, exc_type, exc_value, exc_tb):
return True
obj = MyObj()
wr = weakref.ref(obj)
with Context():
inner_raising_func()
obj = None
if check_impl_detail(cpython=False):
gc_collect()
obj = wr()
self.assertIsNone(obj)
def test_exception_target_in_nested_scope(self):
# issue 4617: This used to raise a SyntaxError
# "can not delete variable 'e' referenced in nested scope"
def print_error():
e
try:
something
except Exception as e:
print_error()
# implicit "del e" here
def test_generator_leaking(self):
# Test that generator exception state doesn't leak into the calling
# frame
def yield_raise():
try:
raise KeyError("caught")
except KeyError:
yield sys.exc_info()[0]
yield sys.exc_info()[0]
yield sys.exc_info()[0]
g = yield_raise()
self.assertEqual(next(g), KeyError)
self.assertEqual(sys.exc_info()[0], None)
self.assertEqual(next(g), KeyError)
self.assertEqual(sys.exc_info()[0], None)
self.assertEqual(next(g), None)
# Same test, but inside an exception handler
try:
raise TypeError("foo")
except TypeError:
g = yield_raise()
self.assertEqual(next(g), KeyError)
self.assertEqual(sys.exc_info()[0], TypeError)
self.assertEqual(next(g), KeyError)
self.assertEqual(sys.exc_info()[0], TypeError)
self.assertEqual(next(g), TypeError)
del g
self.assertEqual(sys.exc_info()[0], TypeError)
def test_generator_leaking2(self):
# See issue 12475.
def g():
yield
try:
raise RuntimeError
except RuntimeError:
it = g()
next(it)
try:
next(it)
except StopIteration:
pass
self.assertEqual(sys.exc_info(), (None, None, None))
def test_generator_leaking3(self):
# See issue #23353. When gen.throw() is called, the caller's
# exception state should be save and restored.
def g():
try:
yield
except ZeroDivisionError:
yield sys.exc_info()[1]
it = g()
next(it)
try:
1/0
except ZeroDivisionError as e:
self.assertIs(sys.exc_info()[1], e)
gen_exc = it.throw(e)
self.assertIs(sys.exc_info()[1], e)
self.assertIs(gen_exc, e)
self.assertEqual(sys.exc_info(), (None, None, None))
def test_generator_leaking4(self):
# See issue #23353. When an exception is raised by a generator,
# the caller's exception state should still be restored.
def g():
try:
1/0
except ZeroDivisionError:
yield sys.exc_info()[0]
raise
it = g()
try:
raise TypeError
except TypeError:
# The caller's exception state (TypeError) is temporarily
# saved in the generator.
tp = next(it)
self.assertIs(tp, ZeroDivisionError)
try:
next(it)
# We can't check it immediately, but while next() returns
# with an exception, it shouldn't have restored the old
# exception state (TypeError).
except ZeroDivisionError as e:
self.assertIs(sys.exc_info()[1], e)
# We used to find TypeError here.
self.assertEqual(sys.exc_info(), (None, None, None))
def test_generator_doesnt_retain_old_exc(self):
def g():
self.assertIsInstance(sys.exc_info()[1], RuntimeError)
yield
self.assertEqual(sys.exc_info(), (None, None, None))
it = g()
try:
raise RuntimeError
except RuntimeError:
next(it)
self.assertRaises(StopIteration, next, it)
def test_generator_finalizing_and_exc_info(self):
# See #7173
def simple_gen():
yield 1
def run_gen():
gen = simple_gen()
try:
raise RuntimeError
except RuntimeError:
return next(gen)
run_gen()
gc_collect()
self.assertEqual(sys.exc_info(), (None, None, None))
def _check_generator_cleanup_exc_state(self, testfunc):
# Issue #12791: exception state is cleaned up as soon as a generator
# is closed (reference cycles are broken).
class MyException(Exception):
def __init__(self, obj):
self.obj = obj
class MyObj:
pass
def raising_gen():
try:
raise MyException(obj)
except MyException:
yield
obj = MyObj()
wr = weakref.ref(obj)
g = raising_gen()
next(g)
testfunc(g)
g = obj = None
obj = wr()
self.assertIsNone(obj)
def test_generator_throw_cleanup_exc_state(self):
def do_throw(g):
try:
g.throw(RuntimeError())
except RuntimeError:
pass
self._check_generator_cleanup_exc_state(do_throw)
def test_generator_close_cleanup_exc_state(self):
def do_close(g):
g.close()
self._check_generator_cleanup_exc_state(do_close)
def test_generator_del_cleanup_exc_state(self):
def do_del(g):
g = None
self._check_generator_cleanup_exc_state(do_del)
def test_generator_next_cleanup_exc_state(self):
def do_next(g):
try:
next(g)
except StopIteration:
pass
else:
self.fail("should have raised StopIteration")
self._check_generator_cleanup_exc_state(do_next)
def test_generator_send_cleanup_exc_state(self):
def do_send(g):
try:
g.send(None)
except StopIteration:
pass
else:
self.fail("should have raised StopIteration")
self._check_generator_cleanup_exc_state(do_send)
def test_3114(self):
# Bug #3114: in its destructor, MyObject retrieves a pointer to
# obsolete and/or deallocated objects.
class MyObject:
def __del__(self):
nonlocal e
e = sys.exc_info()
e = ()
try:
raise Exception(MyObject())
except:
pass
self.assertEqual(e, (None, None, None))
def test_unicode_change_attributes(self):
# See issue 7309. This was a crasher.
u = UnicodeEncodeError('baz', 'xxxxx', 1, 5, 'foo')
self.assertEqual(str(u), "'baz' codec can't encode characters in position 1-4: foo")
u.end = 2
self.assertEqual(str(u), "'baz' codec can't encode character '\\x78' in position 1: foo")
u.end = 5
u.reason = 0x345345345345345345
self.assertEqual(str(u), "'baz' codec can't encode characters in position 1-4: 965230951443685724997")
u.encoding = 4000
self.assertEqual(str(u), "'4000' codec can't encode characters in position 1-4: 965230951443685724997")
u.start = 1000
self.assertEqual(str(u), "'4000' codec can't encode characters in position 1000-4: 965230951443685724997")
u = UnicodeDecodeError('baz', b'xxxxx', 1, 5, 'foo')
self.assertEqual(str(u), "'baz' codec can't decode bytes in position 1-4: foo")
u.end = 2
self.assertEqual(str(u), "'baz' codec can't decode byte 0x78 in position 1: foo")
u.end = 5
u.reason = 0x345345345345345345
self.assertEqual(str(u), "'baz' codec can't decode bytes in position 1-4: 965230951443685724997")
u.encoding = 4000
self.assertEqual(str(u), "'4000' codec can't decode bytes in position 1-4: 965230951443685724997")
u.start = 1000
self.assertEqual(str(u), "'4000' codec can't decode bytes in position 1000-4: 965230951443685724997")
u = UnicodeTranslateError('xxxx', 1, 5, 'foo')
self.assertEqual(str(u), "can't translate characters in position 1-4: foo")
u.end = 2
self.assertEqual(str(u), "can't translate character '\\x78' in position 1: foo")
u.end = 5
u.reason = 0x345345345345345345
self.assertEqual(str(u), "can't translate characters in position 1-4: 965230951443685724997")
u.start = 1000
self.assertEqual(str(u), "can't translate characters in position 1000-4: 965230951443685724997")
def test_unicode_errors_no_object(self):
# See issue #21134.
klasses = UnicodeEncodeError, UnicodeDecodeError, UnicodeTranslateError
for klass in klasses:
self.assertEqual(str(klass.__new__(klass)), "")
@no_tracing
def test_badisinstance(self):
# Bug #2542: if issubclass(e, MyException) raises an exception,
# it should be ignored
class Meta(type):
def __subclasscheck__(cls, subclass):
raise ValueError()
class MyException(Exception, metaclass=Meta):
pass
with captured_stderr() as stderr:
try:
raise KeyError()
except MyException as e:
self.fail("exception should not be a MyException")
except KeyError:
pass
except:
self.fail("Should have raised KeyError")
else:
self.fail("Should have raised KeyError")
if not hasattr(sys, "pyston_version_info"):
def g():
try:
return g()
except RecursionError:
return sys.exc_info()
e, v, tb = g()
self.assertIsInstance(v, RecursionError, type(v))
self.assertIn("maximum recursion depth exceeded", str(v))
@cpython_only
def test_trashcan_recursion(self):
# See bpo-33930
def foo():
o = object()
for x in range(1_000_000):
# Create a big chain of method objects that will trigger
# a deep chain of calls when they need to be destructed.
o = o.__dir__
foo()
support.gc_collect()
@cpython_only
@unittest.skipIf(hasattr(sys, "pyston_version_info"), "Pyston disables recursion checking")
def test_recursion_normalizing_exception(self):
# Issue #22898.
# Test that a RecursionError is raised when tstate->recursion_depth is
# equal to recursion_limit in PyErr_NormalizeException() and check
# that a ResourceWarning is printed.
# Prior to #22898, the recursivity of PyErr_NormalizeException() was
# controlled by tstate->recursion_depth and a PyExc_RecursionErrorInst
# singleton was being used in that case, that held traceback data and
# locals indefinitely and would cause a segfault in _PyExc_Fini() upon
# finalization of these locals.
code = """if 1:
import sys
from _testcapi import get_recursion_depth
class MyException(Exception): pass
def setrecursionlimit(depth):
while 1:
try:
sys.setrecursionlimit(depth)
return depth
except RecursionError:
# sys.setrecursionlimit() raises a RecursionError if
# the new recursion limit is too low (issue #25274).
depth += 1
def recurse(cnt):
cnt -= 1
if cnt:
recurse(cnt)
else:
generator.throw(MyException)
def gen():
f = open(%a, mode='rb', buffering=0)
yield
generator = gen()
next(generator)
recursionlimit = sys.getrecursionlimit()
depth = get_recursion_depth()
try:
# Upon the last recursive invocation of recurse(),
# tstate->recursion_depth is equal to (recursion_limit - 1)
# and is equal to recursion_limit when _gen_throw() calls
# PyErr_NormalizeException().
recurse(setrecursionlimit(depth + 2) - depth - 1)
finally:
sys.setrecursionlimit(recursionlimit)
print('Done.')
""" % __file__
rc, out, err = script_helper.assert_python_failure("-Wd", "-c", code)
# Check that the program does not fail with SIGABRT.
self.assertEqual(rc, 1)
self.assertIn(b'RecursionError', err)
self.assertIn(b'ResourceWarning', err)
self.assertIn(b'Done.', out)
@cpython_only
def test_recursion_normalizing_infinite_exception(self):
# Issue #30697. Test that a RecursionError is raised when
# PyErr_NormalizeException() maximum recursion depth has been
# exceeded.
code = """if 1:
import _testcapi
try:
raise _testcapi.RecursingInfinitelyError
finally:
print('Done.')
"""
rc, out, err = script_helper.assert_python_failure("-c", code)
self.assertEqual(rc, 1)
self.assertIn(b'RecursionError: maximum recursion depth exceeded '
b'while normalizing an exception', err)
self.assertIn(b'Done.', out)
@unittest.skipIf(hasattr(sys, "pyston_version_info"), "Pyston disables memory hooks")
@cpython_only
def test_recursion_normalizing_with_no_memory(self):
# Issue #30697. Test that in the abort that occurs when there is no
# memory left and the size of the Python frames stack is greater than
# the size of the list of preallocated MemoryError instances, the
# Fatal Python error message mentions MemoryError.
code = """if 1:
import _testcapi
class C(): pass
def recurse(cnt):
cnt -= 1
if cnt:
recurse(cnt)
else:
_testcapi.set_nomemory(0)
C()
recurse(16)
"""
with SuppressCrashReport():
rc, out, err = script_helper.assert_python_failure("-c", code)
self.assertIn(b'Fatal Python error: Cannot recover from '
b'MemoryErrors while normalizing exceptions.', err)
@cpython_only
def test_MemoryError(self):
# PyErr_NoMemory always raises the same exception instance.
# Check that the traceback is not doubled.
import traceback
from _testcapi import raise_memoryerror
def raiseMemError():
try:
raise_memoryerror()
except MemoryError as e:
tb = e.__traceback__
else:
self.fail("Should have raises a MemoryError")
return traceback.format_tb(tb)
tb1 = raiseMemError()
tb2 = raiseMemError()
self.assertEqual(tb1, tb2)
@cpython_only
def test_exception_with_doc(self):
import _testcapi
doc2 = "This is a test docstring."
doc4 = "This is another test docstring."
self.assertRaises(SystemError, _testcapi.make_exception_with_doc,
"error1")
# test basic usage of PyErr_NewException
error1 = _testcapi.make_exception_with_doc("_testcapi.error1")
self.assertIs(type(error1), type)
self.assertTrue(issubclass(error1, Exception))
self.assertIsNone(error1.__doc__)
# test with given docstring
error2 = _testcapi.make_exception_with_doc("_testcapi.error2", doc2)
self.assertEqual(error2.__doc__, doc2)
# test with explicit base (without docstring)
error3 = _testcapi.make_exception_with_doc("_testcapi.error3",
base=error2)
self.assertTrue(issubclass(error3, error2))
# test with explicit base tuple
class C(object):
pass
error4 = _testcapi.make_exception_with_doc("_testcapi.error4", doc4,
(error3, C))
self.assertTrue(issubclass(error4, error3))
self.assertTrue(issubclass(error4, C))
self.assertEqual(error4.__doc__, doc4)
# test with explicit dictionary
error5 = _testcapi.make_exception_with_doc("_testcapi.error5", "",
error4, {'a': 1})
self.assertTrue(issubclass(error5, error4))
self.assertEqual(error5.a, 1)
self.assertEqual(error5.__doc__, "")
@cpython_only
def test_memory_error_cleanup(self):
# Issue #5437: preallocated MemoryError instances should not keep
# traceback objects alive.
from _testcapi import raise_memoryerror
class C:
pass
wr = None
def inner():
nonlocal wr
c = C()
wr = weakref.ref(c)
raise_memoryerror()
# We cannot use assertRaises since it manually deletes the traceback
try:
inner()
except MemoryError as e:
self.assertNotEqual(wr(), None)
else:
self.fail("MemoryError not raised")
self.assertEqual(wr(), None)
@no_tracing
@unittest.skipIf(hasattr(sys, "pyston_version_info"), "Pyston disables recursion checking")
def test_recursion_error_cleanup(self):
# Same test as above, but with "recursion exceeded" errors
class C:
pass
wr = None
def inner():
nonlocal wr
c = C()
wr = weakref.ref(c)
inner()
# We cannot use assertRaises since it manually deletes the traceback
try:
inner()
except RecursionError as e:
self.assertNotEqual(wr(), None)
else:
self.fail("RecursionError not raised")
self.assertEqual(wr(), None)
def test_errno_ENOTDIR(self):
# Issue #12802: "not a directory" errors are ENOTDIR even on Windows
with self.assertRaises(OSError) as cm:
os.listdir(__file__)
self.assertEqual(cm.exception.errno, errno.ENOTDIR, cm.exception)
def test_unraisable(self):
# Issue #22836: PyErr_WriteUnraisable() should give sensible reports
class BrokenDel:
def __del__(self):
exc = ValueError("del is broken")
# The following line is included in the traceback report:
raise exc
obj = BrokenDel()
with support.catch_unraisable_exception() as cm:
del obj
self.assertEqual(cm.unraisable.object, BrokenDel.__del__)
self.assertIsNotNone(cm.unraisable.exc_traceback)
def test_unhandled(self):
# Check for sensible reporting of unhandled exceptions
for exc_type in (ValueError, BrokenStrException):
with self.subTest(exc_type):
try:
exc = exc_type("test message")
# The following line is included in the traceback report:
raise exc
except exc_type:
with captured_stderr() as stderr:
sys.__excepthook__(*sys.exc_info())
report = stderr.getvalue()
self.assertIn("test_exceptions.py", report)
self.assertIn("raise exc", report)
self.assertIn(exc_type.__name__, report)
if exc_type is BrokenStrException:
self.assertIn("<exception str() failed>", report)
else:
self.assertIn("test message", report)
self.assertTrue(report.endswith("\n"))
@unittest.skipIf(hasattr(sys, "pyston_version_info"), "Pyston disables memory hooks")
@cpython_only
def test_memory_error_in_PyErr_PrintEx(self):
code = """if 1:
import _testcapi
class C(): pass
_testcapi.set_nomemory(0, %d)
C()
"""
# Issue #30817: Abort in PyErr_PrintEx() when no memory.
# Span a large range of tests as the CPython code always evolves with
# changes that add or remove memory allocations.
for i in range(1, 20):
rc, out, err = script_helper.assert_python_failure("-c", code % i)
self.assertIn(rc, (1, 120))
self.assertIn(b'MemoryError', err)
def test_yield_in_nested_try_excepts(self):
#Issue #25612
class MainError(Exception):
pass
class SubError(Exception):
pass
def main():
try:
raise MainError()
except MainError:
try:
yield
except SubError:
pass
raise
coro = main()
coro.send(None)
with self.assertRaises(MainError):
coro.throw(SubError())
def test_generator_doesnt_retain_old_exc2(self):
#Issue 28884#msg282532
def g():
try:
raise ValueError
except ValueError:
yield 1
self.assertEqual(sys.exc_info(), (None, None, None))
yield 2
gen = g()
try:
raise IndexError
except IndexError:
self.assertEqual(next(gen), 1)
self.assertEqual(next(gen), 2)
def test_raise_in_generator(self):
#Issue 25612#msg304117
def g():
yield 1
raise
yield 2
with self.assertRaises(ZeroDivisionError):
i = g()
try:
1/0
except:
next(i)
next(i)
def test_memory_error_subclasses(self):
# bpo-41654: MemoryError instances use a freelist of objects that are
# linked using the 'dict' attribute when they are inactive/dead.
# Subclasses of MemoryError should not participate in the freelist
# schema. This test creates a MemoryError object and keeps it alive
# (therefore advancing the freelist) and then it creates and destroys a
# subclass object. Finally, it checks that creating a new MemoryError
# succeeds, proving that the freelist is not corrupted.
class TestException(MemoryError):
pass
try:
raise MemoryError
except MemoryError as exc:
inst = exc
try:
raise TestException
except Exception:
pass
for _ in range(10):
try:
raise MemoryError
except MemoryError as exc:
pass
gc_collect()
class ImportErrorTests(unittest.TestCase):
def test_attributes(self):
# Setting 'name' and 'path' should not be a problem.
exc = ImportError('test')
self.assertIsNone(exc.name)
self.assertIsNone(exc.path)
exc = ImportError('test', name='somemodule')
self.assertEqual(exc.name, 'somemodule')
self.assertIsNone(exc.path)
exc = ImportError('test', path='somepath')
self.assertEqual(exc.path, 'somepath')
self.assertIsNone(exc.name)
exc = ImportError('test', path='somepath', name='somename')
self.assertEqual(exc.name, 'somename')
self.assertEqual(exc.path, 'somepath')
msg = "'invalid' is an invalid keyword argument for ImportError"
with self.assertRaisesRegex(TypeError, msg):
ImportError('test', invalid='keyword')
with self.assertRaisesRegex(TypeError, msg):
ImportError('test', name='name', invalid='keyword')
with self.assertRaisesRegex(TypeError, msg):
ImportError('test', path='path', invalid='keyword')
with self.assertRaisesRegex(TypeError, msg):
ImportError(invalid='keyword')
with self.assertRaisesRegex(TypeError, msg):
ImportError('test', invalid='keyword', another=True)
def test_reset_attributes(self):
exc = ImportError('test', name='name', path='path')
self.assertEqual(exc.args, ('test',))
self.assertEqual(exc.msg, 'test')
self.assertEqual(exc.name, 'name')
self.assertEqual(exc.path, 'path')
# Reset not specified attributes
exc.__init__()
self.assertEqual(exc.args, ())
self.assertEqual(exc.msg, None)
self.assertEqual(exc.name, None)
self.assertEqual(exc.path, None)
def test_non_str_argument(self):
# Issue #15778
with check_warnings(('', BytesWarning), quiet=True):
arg = b'abc'
exc = ImportError(arg)
self.assertEqual(str(arg), str(exc))
def test_copy_pickle(self):
for kwargs in (dict(),
dict(name='somename'),
dict(path='somepath'),
dict(name='somename', path='somepath')):
orig = ImportError('test', **kwargs)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
exc = pickle.loads(pickle.dumps(orig, proto))
self.assertEqual(exc.args, ('test',))
self.assertEqual(exc.msg, 'test')
self.assertEqual(exc.name, orig.name)
self.assertEqual(exc.path, orig.path)
for c in copy.copy, copy.deepcopy:
exc = c(orig)
self.assertEqual(exc.args, ('test',))
self.assertEqual(exc.msg, 'test')
self.assertEqual(exc.name, orig.name)
self.assertEqual(exc.path, orig.path)
if __name__ == '__main__':
unittest.main()
|
bzl | 1a367f75bb4c0de757c64ad70dec1fc0095c8427 | """
buildfarm dependencies that can be imported into other WORKSPACE files
"""
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file", "http_jar")
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
RULES_JVM_EXTERNAL_TAG = "3.3"
RULES_JVM_EXTERNAL_SHA = "d85951a92c0908c80bd8551002d66cb23c3434409c814179c0ff026b53544dab"
def archive_dependencies(third_party):
return [
{
"name": "rules_jvm_external",
"strip_prefix": "rules_jvm_external-%s" % RULES_JVM_EXTERNAL_TAG,
"sha256": RULES_JVM_EXTERNAL_SHA,
"url": "https://github.com/bazelbuild/rules_jvm_external/archive/%s.zip" % RULES_JVM_EXTERNAL_TAG,
},
# Needed for "well-known protos" and @com_google_protobuf//:protoc.
{
"name": "com_google_protobuf",
"sha256": "dd513a79c7d7e45cbaeaf7655289f78fd6b806e52dbbd7018ef4e3cf5cff697a",
"strip_prefix": "protobuf-3.15.8",
"urls": ["https://github.com/protocolbuffers/protobuf/archive/v3.15.8.zip"],
},
{
"name": "com_github_bazelbuild_buildtools",
"sha256": "a02ba93b96a8151b5d8d3466580f6c1f7e77212c4eb181cba53eb2cae7752a23",
"strip_prefix": "buildtools-3.5.0",
"urls": ["https://github.com/bazelbuild/buildtools/archive/3.5.0.tar.gz"],
},
# Needed for @grpc_java//compiler:grpc_java_plugin.
{
"name": "io_grpc_grpc_java",
"sha256": "101b21af120901e9bf342384988f57af3332b59d997f64d5f41a1e24ffb96f19",
"strip_prefix": "grpc-java-1.42.0",
"urls": ["https://github.com/grpc/grpc-java/archive/v1.42.0.zip"],
},
# The APIs that we implement.
{
"name": "googleapis",
"build_file": "%s:BUILD.googleapis" % third_party,
"patch_cmds": ["find google -name 'BUILD.bazel' -type f -delete"],
"patch_cmds_win": ["Remove-Item google -Recurse -Include *.bazel"],
"sha256": "745cb3c2e538e33a07e2e467a15228ccbecadc1337239f6740d57a74d9cdef81",
"strip_prefix": "googleapis-6598bb829c9e9a534be674649ffd1b4671a821f9",
"url": "https://github.com/googleapis/googleapis/archive/6598bb829c9e9a534be674649ffd1b4671a821f9.zip",
},
{
"name": "remote_apis",
"build_file": "%s:BUILD.remote_apis" % third_party,
"patch_args": ["-p1"],
"patches": ["%s/remote-apis:remote-apis.patch" % third_party],
"sha256": "1d69f5f2f694fe93ee78a630f196047892ae51878297a89601c98964486655c6",
"strip_prefix": "remote-apis-6345202a036a297b22b0a0e7531ef702d05f2130",
"url": "https://github.com/bazelbuild/remote-apis/archive/6345202a036a297b22b0a0e7531ef702d05f2130.zip",
},
{
"name": "rules_cc",
"sha256": "34b2ebd4f4289ebbc27c7a0d854dcd510160109bb0194c0ba331c9656ffcb556",
"strip_prefix": "rules_cc-daf6ace7cfeacd6a83e9ff2ed659f416537b6c74",
"url": "https://github.com/bazelbuild/rules_cc/archive/daf6ace7cfeacd6a83e9ff2ed659f416537b6c74.tar.gz",
},
# Used to format proto files
{
"name": "com_grail_bazel_toolchain",
"sha256": "54b54eedc71b93b278c44b6c056a737dc68545c6da75f63d0810676e1181f559",
"strip_prefix": "bazel-toolchain-76ce37e977a304acf8948eadabb82c516320e286",
"url": "https://github.com/grailbio/bazel-toolchain/archive/76ce37e977a304acf8948eadabb82c516320e286.tar.gz",
},
# Ideally we would use the 0.14.4 release of rules_docker,
# but that version introduced new pypi and pkg dependncies on tar-related targets making the upgrade difficult.
# Those dependencies were then removed afterward. We pick a stable commit after 0.14.4 instead of cherry-picking in the different changes.
# https://github.com/bazelbuild/rules_docker/issues/1622
# When a new version after 0.14.4 is released, we can go back to a pinned version.
{
"name": "io_bazel_rules_docker",
"patch_args": ["-p1"],
"patches": ["%s/io_bazel_rules_docker:entrypoint.patch" % third_party],
"sha256": "d5609b7858246fa11e76237aa9b3e681615bdc8acf2ed29058426cf7c4cea099",
"strip_prefix": "rules_docker-f4822f3921f0c343dd9e5ae65c760d0fb70be1b3",
"urls": ["https://github.com/bazelbuild/rules_docker/archive/f4822f3921f0c343dd9e5ae65c760d0fb70be1b3.tar.gz"],
},
# Bazel is referenced as a dependency so that buildfarm can access the linux-sandbox as a potential execution wrapper.
{
"name": "bazel",
"sha256": "bca2303a43c696053317a8c7ac09a5e6d90a62fec4726e55357108bb60d7a807",
"strip_prefix": "bazel-3.7.2",
"urls": ["https://github.com/bazelbuild/bazel/archive/3.7.2.tar.gz"],
"patch_args": ["-p1"],
"patches": ["%s/bazel:bazel_visibility.patch" % third_party],
},
# Optional execution wrappers
{
"name": "skip_sleep",
"build_file": "%s:BUILD.skip_sleep" % third_party,
"sha256": "03980702e8e9b757df68aa26493ca4e8573770f15dd8a6684de728b9cb8549f1",
"strip_prefix": "TARDIS-f54fa4743e67763bb1ad77039b3d15be64e2e564",
"url": "https://github.com/Unilang/TARDIS/archive/f54fa4743e67763bb1ad77039b3d15be64e2e564.zip",
},
]
def buildfarm_dependencies(repository_name = "build_buildfarm"):
"""
Define all 3rd party archive rules for buildfarm
Args:
repository_name: the name of the repository
"""
third_party = "@%s//third_party" % repository_name
for dependency in archive_dependencies(third_party):
params = {}
params.update(**dependency)
name = params.pop("name")
maybe(http_archive, name, **params)
# Enhanced jedis 3.2.0 containing several convenience, performance, and
# robustness changes.
# Notable features include:
# Cluster request pipelining, used for batching requests for operation
# monitors and CAS index.
# Blocking request (b* prefix) interruptibility, using client
# connection reset.
# Singleton-redis-as-cluster - support treating a non-clustered redis
# endpoint as a cluster of 1 node.
# Other changes are redis version-forward treatment of spop and visibility
# into errors in cluster unreachable and cluster retry exhaustion.
# Details at https://github.com/werkt/jedis/releases/tag/3.2.0-e82e68e2f7
maybe(
http_jar,
"jedis",
sha256 = "294ff5e4e6ae3fda5ff00f0a3c398fa50c1ffa3bc9313800b32e34a75fbb93f3",
urls = [
"https://github.com/werkt/jedis/releases/download/3.2.0-e82e68e2f7/jedis-3.2.0-e82e68e2f7.jar",
],
)
http_file(
name = "tini",
urls = ["https://github.com/krallin/tini/releases/download/v0.18.0/tini"],
)
|
py | 1a367f7af25a1f495b2c40ad07672eb53730bbdd | """
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2015 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import benchexec.util as util
import benchexec.tools.template
import benchexec.result as result
class Tool(benchexec.tools.template.BaseTool):
"""
This class serves as tool adaptor for ESBMC (http://www.esbmc.org/)
"""
REQUIRED_PATHS = ["cpachecker", "esbmc", "esbmc-wrapper.py", "tokenizer"]
def executable(self):
return util.find_executable("esbmc-wrapper.py")
def working_directory(self, executable):
executableDir = os.path.dirname(executable)
return executableDir
def version(self, executable):
return self._version_from_tool(executable, "-v")
def name(self):
return "ESBMC"
def cmdline(self, executable, options, tasks, propertyfile, rlimits):
assert len(tasks) == 1, "only one inputfile supported"
inputfile = tasks[0]
return [executable] + ["-p", propertyfile] + options + [inputfile]
def determine_result(self, returncode, returnsignal, output, isTimeout):
output = "\n".join(output)
status = result.RESULT_UNKNOWN
if self.allInText(["FALSE_DEREF"], output):
status = result.RESULT_FALSE_DEREF
elif self.allInText(["FALSE_FREE"], output):
status = result.RESULT_FALSE_FREE
elif self.allInText(["FALSE_MEMTRACK"], output):
status = result.RESULT_FALSE_MEMTRACK
elif self.allInText(["FALSE_OVERFLOW"], output):
status = result.RESULT_FALSE_OVERFLOW
elif self.allInText(["FALSE"], output):
status = result.RESULT_FALSE_REACH
elif "TRUE" in output:
status = result.RESULT_TRUE_PROP
elif "DONE" in output:
status = result.RESULT_DONE
if status == result.RESULT_UNKNOWN:
if isTimeout:
status = "TIMEOUT"
elif output.endswith(("error", "error\n")):
status = "ERROR"
return status
""" helper method """
def allInText(self, words, text):
"""
This function checks, if all the words appear in the given order in the text.
"""
index = 0
for word in words:
index = text[index:].find(word)
if index == -1:
return False
return True
|
py | 1a367f8da20c8233e7442ccc1bdf8fc36107dcb6 | ############################################################################
# Copyright (C) 2008 by Volker Christian #
# [email protected] #
# #
# This program is free software; you can redistribute it and#or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the #
# Free Software Foundation, Inc., #
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. #
############################################################################
from Plugins.Extensions.VlcPlayer.VlcPlayer import VlcPlayer
from Components.ActionMap import ActionMap
from YouTubeContextMenu import YouTubeEntryContextMenu, YouTubeEntryContextMenuList
class YouTubePlayer(VlcPlayer):
def __init__(self, session, server, currentList, contextMenuEntries, infoCallback, name):
VlcPlayer.__init__(self, session, server, currentList)
self.contextMenuEntries = contextMenuEntries
self.infoCallback = infoCallback
self.name = name
self["menuactions"] = ActionMap(["YouTubePlayerScreenActions"],
{
"menu" : self.openContextMenu,
"info" : self.showVideoInfo,
}, -1)
def showVideoInfo(self):
if self.shown:
self.hideInfobar()
self.infoCallback()
def openContextMenu(self):
if self.shown:
self.hideInfobar()
contextMenuList = YouTubeEntryContextMenuList()
for entry in self.contextMenuEntries:
contextMenuList.appendEntry(entry)
self.session.openWithCallback(self.menuActionCoosen, YouTubeEntryContextMenu, contextMenuList, self.name)
def menuActionCoosen(self, cookie):
if cookie is not None:
if cookie[1]:
self.stop()
cookie[0]()
|
py | 1a367f9b4a3cc910e12571e6940d29da5094666d | import os
def createFolder(directory):
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print ('Error: Creating directory. ' + directory)
# Example
createFolder('./data/')
# Creates a folder in the current directory called data
|
py | 1a367f9ebcfc43123c9fd915a70c84f3f5bb8cca | import json
import sys
from urllib import *
import argparse
from urllib.parse import urlparse, urlencode, parse_qs
from urllib.request import urlopen
YOUTUBE_COMMENT_URL = 'https://www.googleapis.com/youtube/v3/commentThreads'
YOUTUBE_SEARCH_URL = 'https://www.googleapis.com/youtube/v3/search'
arr = []
def retrive1():
return arr
class YouTubeApi():
def load_comments(self, mat):
for item in mat["items"]:
comment = item["snippet"]["topLevelComment"]
author = comment["snippet"]["authorDisplayName"]
text = comment["snippet"]["textDisplay"]
print("Comment by {}: {}".format(author, text))
if 'replies' in item.keys():
for reply in item['replies']['comments']:
rauthor = reply['snippet']['authorDisplayName']
rtext = reply["snippet"]["textDisplay"]
print("\n\tReply by {}: {}".format(rauthor, rtext), "\n")
def get_video_comment(self):
parser = argparse.ArgumentParser()
mxRes = 8
vid = str()
parser.add_argument("--c", help="calls comment function by keyword function", action='store_true')
parser.add_argument("--max", help="number of comments to return")
parser.add_argument("--videourl", help="Required URL for which comments to return")
parser.add_argument("--key", help="Required API key")
args = parser.parse_args()
if not args.max:
args.max = mxRes
if not args.videourl:
exit("Please specify video URL using the --videourl=parameter.")
if not args.key:
exit("Please specify API key using the --key=parameter.")
try:
video_id = urlparse(str(args.videourl))
q = parse_qs(video_id.query)
vid = q["v"][0]
except:
print("Invalid YouTube URL")
parms = {
'part': 'snippet,replies',
'maxResults': args.max,
'videoId': vid,
'textFormat': 'plainText',
'key': args.key
}
try:
matches = self.openURL(YOUTUBE_COMMENT_URL, parms)
i = 2
mat = json.loads(matches)
nextPageToken = mat.get("nextPageToken")
print("\nPage : 1")
print("------------------------------------------------------------------")
self.load_comments(mat)
while nextPageToken:
parms.update({'pageToken': nextPageToken})
matches = self.openURL(YOUTUBE_COMMENT_URL, parms)
mat = json.loads(matches)
nextPageToken = mat.get("nextPageToken")
print("\nPage : ", i)
print("------------------------------------------------------------------")
self.load_comments(mat)
i += 1
except KeyboardInterrupt:
print("User Aborted the Operation")
except:
print("Cannot Open URL or Fetch comments at a moment")
def load_search_res(self, search_response):
videos, channels, playlists = [], [], []
for search_result in search_response.get("items", []):
if search_result["id"]["kind"] == "youtube#video":
videos.append("{}".format(search_result["id"]["videoId"]))
arr.append("{}".format(search_result["id"]["videoId"]))
# elif search_result["id"]["kind"] == "youtube#channel":
# channels.append("{} ({})".format(search_result["snippet"]["title"],
# search_result["id"]["channelId"]))
# elif search_result["id"]["kind"] == "youtube#playlist":
# playlists.append("{} ({})".format(search_result["snippet"]["title"],
# search_result["id"]["playlistId"]))
print("Videos:\n", "\n".join(videos), "\n")
# print("Channels:\n", "\n".join(channels), "\n")
#print("Playlists:\n", "\n".join(playlists), "\n")
def search_keyword(self,word):
parser = argparse.ArgumentParser()
# word="hello"
mxRes = 2
parser.add_argument("--s", help="calls the search by keyword function", action='store_true')
parser.add_argument("--r", help="define country code for search results for specific country", default="IN")
parser.add_argument("--search", help="Search Term")
parser.add_argument("--max", help="number of results to return")
parser.add_argument("--key", help="Required API key", default="AIzaSyAP3or1BGNTc-H6gr9j26p3oWnwvcUonsc")
args = parser.parse_args()
if not args.max:
args.max = mxRes
if not args.search:
args.search = word
if not args.key:
exit("Please specify API key using the --key= parameter.")
parms = {
'q': args.search,
'part': 'id,snippet',
'maxResults': args.max,
'regionCode': args.r,
'key': args.key
}
try:
matches = self.openURL(YOUTUBE_SEARCH_URL, parms)
search_response = json.loads(matches)
i = 2
nextPageToken = search_response.get("nextPageToken")
print("\nPage : 1 --- Region : {}".format(args.r))
print("------------------------------------------------------------------")
self.load_search_res(search_response)
while nextPageToken:
parms.update({'pageToken': nextPageToken})
matches = self.openURL(YOUTUBE_SEARCH_URL, parms)
search_response = json.loads(matches)
nextPageToken = search_response.get("nextPageToken")
print("Page : {} --- Region : {}".format(i, args.r))
print("------------------------------------------------------------------")
self.load_search_res(search_response)
i += 1
if i==5:
break
except KeyboardInterrupt:
print("User Aborted the Operation")
except:
print("Cannot Open URL or Fetch comments at a moment")
def load_channel_vid(self, search_response):
videos = []
for search_result in search_response.get("items", []):
if search_result["id"]["kind"] == "youtube#video":
videos.append("{} ({})".format(search_result["snippet"]["title"],
search_result["id"]["videoId"]))
print("###Videos:###\n", "\n".join(videos), "\n")
def channel_videos(self):
parser = argparse.ArgumentParser()
mxRes = 8
parser.add_argument("--sc", help="calls the search by channel by keyword function", action='store_true')
parser.add_argument("--channelid", help="Search Term", default="Srce Cde")
parser.add_argument("--max", help="number of results to return")
parser.add_argument("--key", help="Required API key")
args = parser.parse_args()
if not args.max:
args.max = mxRes
if not args.channelid:
exit("Please specify channelid using the --channelid= parameter.")
if not args.key:
exit("Please specify API key using the --key= parameter.")
parms = {
'part': 'id,snippet',
'channelId': args.channelid,
'maxResults': args.max,
'key': args.key
}
try:
matches = self.openURL(YOUTUBE_SEARCH_URL, parms)
search_response = json.loads(matches)
i = 2
nextPageToken = search_response.get("nextPageToken")
print("\nPage : 1")
print("------------------------------------------------------------------")
self.load_channel_vid(search_response)
while nextPageToken:
parms.update({'pageToken': nextPageToken})
matches = self.openURL(YOUTUBE_SEARCH_URL, parms)
search_response = json.loads(matches)
nextPageToken = search_response.get("nextPageToken")
print("Page : ", i)
print("------------------------------------------------------------------")
self.load_channel_vid(search_response)
i += 1
except KeyboardInterrupt:
print("User Aborted the Operation")
except:
print("Cannot Open URL or Fetch comments at a moment")
def openURL(self, url, parms):
f = urlopen(url + '?' + urlencode(parms))
data = f.read()
f.close()
matches = data.decode("utf-8")
return matches
def main():
y = YouTubeApi()
y.search_keyword("hello")
print(arr)
if __name__ == '__main__':
main() |
py | 1a36805ab7f6e8eb259c017e010d78822bb10bb1 | """Data processing routines for MongoDB version
"""
import datetime
# import shutil
import pathlib
from pymongo import MongoClient
from pymongo.collection import Collection
cl = MongoClient()
db = cl.doctree_database
# support for older pymongo versions
try:
test = Collection.update_one
except AttributeError:
## Collection.insert_one = Collection.insert
Collection.update_one = Collection.update
Collection.replace_one = Collection.update
## # Collection.find_one_and_delete = Collection.remove
Collection.delete_many = Collection.remove
def _add_doc(filename, doc):
"""create new document in the dtree document
"""
try:
id_ = db[filename].insert_one(doc).inserted_id
except TypeError:
id_ = db[filename].insert(doc)
return id_
def _update_doc(filename, docid, doc):
"""change a document in the dtree document
"""
db[filename].update({'_id': docid}, doc)
def list_dtrees():
"""list all dtrees (collections) registered in the database
"""
return db.list_collection_names()
def create_new_dtree(filename):
"""set up a new dtree/collection
"""
if db[filename].find_one({'type': 'settings'}):
raise FileExistsError
db[filename].insert_one({'type': 'settings'})
# db[filename].insert_one({'type': 'textpos'})
db[filename].insert_one({'type': 'imagelist'})
def clear_dtree(filename, recreate=False):
"""remove (all data from) a dtree/collection
"""
if not db[filename].find_one({'type': 'settings'}):
raise FileNotFoundError
db[filename].drop()
if recreate:
create_new_dtree(filename)
def read_dtree(filename, readable=False):
"""read and return all data from a dtree/collection
"""
if not readable:
return db[filename].find()
views, itemdict, textpos = [], {}, {}
for item in read_dtree(filename):
if item['type'] == 'settings':
opts = item['data']
elif item['type'] == 'view':
views.append(item['data'])
elif item['type'] == 'textitem':
itemdict[item['textid']] = item['data']
textpos[item['textid']] = item['textpos']
# imagelist = [] # db[filename].find_one({'type': 'imagelist'})['data']
return opts, views, itemdict, textpos # , imagelist
def rename_dtree(filename, newname):
"""change the dtree/collection's name if possible
"""
if db[newname].find_one({'type': 'settings'}) is not None:
raise FileExistsError('new_name_taken')
db[filename].rename(newname)
# ----------- deze routines komen uit main - ombouwen voor mongodb
def read_from_files(this_file, other_file=''):
"(try to) load the data"
filename = other_file or this_file
if not filename:
return ['no file name given']
# read/init/check settings if possible, otherwise cancel
opts = db[filename].find_one({'type': 'settings'})['data']
if opts.get('Application', '') != 'DocTree':
return ["{} is not a valid Doctree data file".format(str(filename))] # is dit een Path?
# read views
views_from_db = db[filename].find({'type': 'view'})
views = [x['data'] for x in sorted(views_from_db, key=lambda x: x['viewno'])]
# read itemdict
# read text positions
data_from_db = list(db[filename].find({'type': 'textitem'}))
itemdict = {x['textid']: x['data'] for x in data_from_db}
text_positions = {x['textid']: x['textpos'] for x in data_from_db}
# als ik geen datafile aanmaak wil ik eigenlijk ook geen zipfile hebben
# hoe dan wel de plaatjes opslaan?
# imagelist = [] # db[filename].find_one({'type': 'imagelist'})['data']
# if not other_file:
# # if possible, build a list of referred-to image files
# ## path = os.path.dirname((self.project_file))
# path = str(this_file.parent)
# try:
# with zpf.ZipFile(str(this_file.with_suffix('.zip'))) as f_in:
# f_in.extractall(path=path)
# imagelist = f_in.namelist()
# except FileNotFoundError:
# pass
return opts, views, itemdict, text_positions # , imagelist
def write_to_files(filename, opts, views, itemdict, textpositions, toolkit, extra_images=None,
backup=True, save_images=True):
"""settings en tree data in een structuur omzetten en opslaan
images contained are saved in a separate zipfile (not needed for wx)
"""
# nt_data = {0: opts, 1: views, 2: itemdict, 3: textpositions}
# zipfile = filename.with_suffix('.zip')
# if backup:
# try:
# shutil.copyfile(str(filename), str(filename) + ".bak")
# shutil.copyfile(str(zipfile), str(zipfile) + ".bak")
# except FileNotFoundError:
# pass
# with filename.open("wb") as f_out:
# pck.dump(nt_data, f_out, protocol=2)
# nt_data = {'settings': opts, 'views': views, 'docdata': itemdict, 'textpos': textpositions}
db[filename].update_one({'type': 'settings'}, {'$set': {'data': opts}})
for seq, view in enumerate(views):
print(seq, view)
result = db[filename].update_one({'type': 'view', 'viewno': seq},
{'$set': {'data': view}}, upsert=True)
print(result.raw_result)
# kan dit met updatemany? Nou zo in elk geval niet:
# db[filename].update_many({'type': 'view', 'viewno': seq}, {'$set': {'data': view}},
# upsert=True) for (seq, view) in enumerate(views)
for docid, doc in itemdict.items():
pos = textpositions[docid]
db[filename].update_one({'type': 'textitem', 'textid': docid},
{'$set': {'data': doc, 'textpos': pos}}, upsert=True)
# db[filename].update_many({'type': 'textitem', 'textid': docid},
# {'$set': {'data': doc, 'textpos': textpositions[docid]}},
# upsert = True) for (docid, doc) in itemdict.items()
# -- nog even zonder plaatjes
# db[filename].update_one({'type': 'imagelist'}, {'$set': {'data': []}})
# if not save_images:
# return
# if extra_images is None:
# # scan de itemdict af op image files en zet ze in een list
# imagelist = []
# for _, data in nt_data[2].values():
# names = [img['src'] for img in bs.BeautifulSoup(data, 'lxml').find_all('img')]
# imagelist.extend(names)
# ## fname = os.path.basename(filename)
# mode = "w"
# else:
# imagelist = extra_images
# mode = "a"
# # rebuild zipfile or add extra images to the zipfile
# # FIXME: als er niks veranderd is hoeft het zipfile ook niet aangemaakt te worden?
# # kun je daarvoor imagelist vergelijken met self.imagelist?
# path = filename.parent # eventueel eerst absoluut maken
# zipped = []
# with zpf.ZipFile(str(zipfile), mode) as _out:
# for name in imagelist:
# # if name.startswith(str(filename)):
# imagepath = path / name # TODO: kijken of dit nu dubbel voorgevoegd wordt
# if imagepath.exists():
# ## _out.write(os.path.join(path, name), arcname=os.path.basename(name))
# # _out.write(str(path / name), arcname=pathlib.Path(name).name)
# _out.write(str(imagepath), arcname=name)
# zipped.append(name)
# return zipped
# misschien werkt het afhandelen van images als ik het als volgt doe:
# - een plaatje is opgeslagen als dtree001.png en wordt ook zo geïdentificeerd in de html
# - maak er een bytestream van door het te openen met PIL -> io
# img = PIL.Image.open('dtree001.png')
# img_bytes = io.BytesIO()
# img.save(img_bytes, format='PNG') # dit lijkt een ingewikkelde methode om dit hierin te krijgen
# zou het niet gewoon kunnen met
# img_bytes = open('dtree001.png', 'rb')
# de manier om dit mongodb in te krijgen lijkt in elk geval
# db[filename].insert_one({'type': 'image', 'name': 'dtree001.png', data: img_bytes})
# of misschien moet je dit nog omzetten met bson.Binary(img_bytes)
|
py | 1a3680a96ebdb867cc08931ac3cf2fd4cf979c40 | # -*- coding: utf-8 -*-
'''
pytestsalt.utils
~~~~~~~~~~~~~~~~
Some pytest fixtures used in pytest-salt
'''
# Import Python libs
from __future__ import absolute_import
import os
import re
import sys
import json
import time
import errno
import atexit
import signal
import socket
import logging
import subprocess
import threading
from operator import itemgetter
from collections import namedtuple
# Import 3rd party libs
import pytest
import psutil
try:
import setproctitle
HAS_SETPROCTITLE = True
except ImportError:
HAS_SETPROCTITLE = False
log = logging.getLogger(__name__)
if sys.platform.startswith('win'):
SIGINT = SIGTERM = signal.CTRL_BREAK_EVENT # pylint: disable=no-member
else:
SIGINT = signal.SIGINT
SIGTERM = signal.SIGTERM
def set_proc_title(title):
if HAS_SETPROCTITLE is False:
return
setproctitle.setproctitle('[{}] - {}'.format(title, setproctitle.getproctitle()))
def get_unused_localhost_port():
'''
Return a random unused port on localhost
'''
usock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
usock.bind(('127.0.0.1', 0))
port = usock.getsockname()[1]
usock.close()
return port
def collect_child_processes(pid):
'''
Try to collect any started child processes of the provided pid
'''
# Let's get the child processes of the started subprocess
try:
parent = psutil.Process(pid)
children = parent.children(recursive=True)
except psutil.NoSuchProcess:
children = []
return children
def _terminate_process_list(process_list, kill=False, slow_stop=False):
for process in process_list[:]: # Iterate over copy of the list
if not psutil.pid_exists(process.pid):
process_list.remove(process)
continue
try:
if not kill and process.status() == psutil.STATUS_ZOMBIE:
# Zombie processes will exit once child processes also exit
continue
try:
cmdline = process.cmdline()
except psutil.AccessDenied:
# OSX is more restrictive about the above information
cmdline = None
if not cmdline:
cmdline = process.as_dict()
if kill:
log.info('Killing process(%s): %s', process.pid, cmdline)
process.kill()
else:
log.info('Terminating process(%s): %s', process.pid, cmdline)
try:
if slow_stop:
# Allow coverage data to be written down to disk
process.send_signal(SIGTERM)
try:
process.wait(2)
except psutil.TimeoutExpired:
if psutil.pid_exists(process.pid):
continue
else:
process.terminate()
except OSError as exc:
if exc.errno not in (errno.ESRCH, errno.EACCES):
raise
if not psutil.pid_exists(process.pid):
process_list.remove(process)
except psutil.NoSuchProcess:
process_list.remove(process)
def terminate_process_list(process_list, kill=False, slow_stop=False):
def on_process_terminated(proc):
log.info('Process %s terminated with exit code: %s', getattr(proc, '_cmdline', proc), proc.returncode)
# Try to terminate processes with the provided kill and slow_stop parameters
log.info('Terminating process list. 1st step. kill: %s, slow stop: %s', kill, slow_stop)
# Cache the cmdline since that will be inaccessible once the process is terminated
for proc in process_list:
try:
cmdline = proc.cmdline()
except (psutil.NoSuchProcess, psutil.AccessDenied):
# OSX is more restrictive about the above information
cmdline = None
if not cmdline:
try:
cmdline = proc
except (psutil.NoSuchProcess, psutil.AccessDenied):
cmdline = '<could not be retrived; dead process: {}>'.format(proc)
proc._cmdline = cmdline
_terminate_process_list(process_list, kill=kill, slow_stop=slow_stop)
psutil.wait_procs(process_list, timeout=15, callback=on_process_terminated)
if process_list:
# If there's still processes to be terminated, retry and kill them if slow_stop is False
log.info('Terminating process list. 2nd step. kill: %s, slow stop: %s', slow_stop is False, slow_stop)
_terminate_process_list(process_list, kill=slow_stop is False, slow_stop=slow_stop)
psutil.wait_procs(process_list, timeout=10, callback=on_process_terminated)
if process_list:
# If there's still processes to be terminated, just kill them, no slow stopping now
log.info('Terminating process list. 3rd step. kill: True, slow stop: False')
_terminate_process_list(process_list, kill=True, slow_stop=False)
psutil.wait_procs(process_list, timeout=5, callback=on_process_terminated)
if process_list:
# In there's still processes to be terminated, log a warning about it
log.warning('Some processes failed to properly terminate: %s', process_list)
def terminate_process(pid=None, process=None, children=None, kill_children=False, slow_stop=False):
'''
Try to terminate/kill the started processe
'''
children = children or []
process_list = []
# Always kill children if kill the parent process.
kill_children = True if slow_stop is False else kill_children
if pid and not process:
try:
process = psutil.Process(pid)
process_list.append(process)
except psutil.NoSuchProcess:
# Process is already gone
process = None
if kill_children:
if process:
if not children:
children = collect_child_processes(process.pid)
else:
# Let's collect children again since there might be new ones
children.extend(collect_child_processes(pid))
if children:
process_list.extend(children)
if process_list:
if process:
log.info('Stopping process %s and respective children: %s', process, children)
else:
log.info('Terminating process list: %s', process_list)
terminate_process_list(process_list, kill=slow_stop is False, slow_stop=slow_stop)
def start_daemon(request,
daemon_name=None,
daemon_id=None,
daemon_log_prefix=None,
daemon_cli_script_name=None,
daemon_config=None,
daemon_config_dir=None,
daemon_class=None,
bin_dir_path=None,
fail_hard=False,
start_timeout=10,
slow_stop=True,
environ=None,
cwd=None,
max_attempts=3,
**kwargs):
'''
Returns a running salt daemon
'''
if fail_hard:
fail_method = pytest.fail
else:
fail_method = pytest.xfail
log.info('[%s] Starting pytest %s(%s)', daemon_name, daemon_log_prefix, daemon_id)
attempts = 0
process = None
while attempts <= max_attempts: # pylint: disable=too-many-nested-blocks
attempts += 1
process = daemon_class(request,
daemon_config,
daemon_config_dir,
bin_dir_path,
daemon_log_prefix,
cli_script_name=daemon_cli_script_name,
slow_stop=slow_stop,
environ=environ,
cwd=cwd,
**kwargs)
process.start()
if process.is_alive():
try:
connectable = process.wait_until_running(timeout=start_timeout)
if connectable is False:
connectable = process.wait_until_running(timeout=start_timeout/2)
if connectable is False:
process.terminate()
if attempts >= max_attempts:
fail_method(
'The pytest {}({}) has failed to confirm running status '
'after {} attempts'.format(daemon_name, daemon_id, attempts))
continue
except Exception as exc: # pylint: disable=broad-except
log.exception('[%s] %s', daemon_log_prefix, exc, exc_info=True)
terminate_process(process.pid, kill_children=True, slow_stop=slow_stop)
if attempts >= max_attempts:
fail_method(str(exc))
continue
log.info(
'[%s] The pytest %s(%s) is running and accepting commands '
'after %d attempts',
daemon_log_prefix,
daemon_name,
daemon_id,
attempts
)
def stop_daemon():
log.info('[%s] Stopping pytest %s(%s)', daemon_log_prefix, daemon_name, daemon_id)
terminate_process(process.pid, kill_children=True, slow_stop=slow_stop)
log.info('[%s] pytest %s(%s) stopped', daemon_log_prefix, daemon_name, daemon_id)
request.addfinalizer(stop_daemon)
return process
else:
terminate_process(process.pid, kill_children=True, slow_stop=slow_stop)
continue
else: # pylint: disable=useless-else-on-loop
# Wrong, we have a return, its not useless
if process is not None:
terminate_process(process.pid, kill_children=True, slow_stop=slow_stop)
fail_method(
'The pytest {}({}) has failed to start after {} attempts'.format(
daemon_name,
daemon_id,
attempts-1
)
)
class SaltScriptBase(object):
'''
Base class for Salt CLI scripts
'''
cli_display_name = None
def __init__(self,
request,
config,
config_dir,
bin_dir_path,
log_prefix,
cli_script_name=None,
slow_stop=False,
environ=None,
cwd=None):
self.request = request
self.config = config
if not isinstance(config_dir, str):
config_dir = config_dir.realpath().strpath
self.config_dir = config_dir
self.bin_dir_path = bin_dir_path
self.log_prefix = log_prefix
if cli_script_name is None:
raise RuntimeError('Please provide a value for the cli_script_name keyword argument')
self.cli_script_name = cli_script_name
if self.cli_display_name is None:
self.cli_display_name = '{}({})'.format(self.__class__.__name__,
self.cli_script_name)
self.slow_stop = slow_stop
self.environ = environ or os.environ.copy()
self.cwd = cwd or os.getcwd()
self._terminal = self._children = None
def get_script_path(self, script_name):
'''
Returns the path to the script to run
'''
script_path = os.path.join(self.bin_dir_path, script_name)
if not os.path.exists(script_path):
pytest.fail('The CLI script {!r} does not exist'.format(script_path))
return script_path
def get_base_script_args(self):
'''
Returns any additional arguments to pass to the CLI script
'''
return ['-c', self.config_dir]
def get_script_args(self): # pylint: disable=no-self-use
'''
Returns any additional arguments to pass to the CLI script
'''
return []
def init_terminal(self, cmdline, **kwargs):
'''
Instantiate a terminal with the passed cmdline and kwargs and return it.
Additionaly, it sets a reference to it in self._terminal and also collects
an initial listing of child processes which will be used when terminating the
terminal
'''
# Late import
import salt.utils.nb_popen as nb_popen
self._terminal = nb_popen.NonBlockingPopen(cmdline, **kwargs)
self._children = collect_child_processes(self._terminal.pid)
atexit.register(self.terminate)
return self._terminal
def terminate(self):
'''
Terminate the started daemon
'''
if self._terminal is None:
return
# Lets log and kill any child processes which salt left behind
if self._terminal.stdout:
self._terminal.stdout.close()
if self._terminal.stderr:
self._terminal.stderr.close()
terminate_process(pid=self._terminal.pid,
children=self._children,
kill_children=True,
slow_stop=self.slow_stop)
class SaltDaemonScriptBase(SaltScriptBase):
'''
Base class for Salt Daemon CLI scripts
'''
def __init__(self, *args, **kwargs):
self._process_cli_output_in_thread = kwargs.pop('process_cli_output_in_thread', True)
event_listener_config_dir = kwargs.pop('event_listener_config_dir', None)
if event_listener_config_dir and not isinstance(event_listener_config_dir, str):
event_listener_config_dir = event_listener_config_dir.realpath().strpath
self.event_listener_config_dir = event_listener_config_dir
super(SaltDaemonScriptBase, self).__init__(*args, **kwargs)
self._running = threading.Event()
self._connectable = threading.Event()
def is_alive(self):
'''
Returns true if the process is alive
'''
return self._running.is_set()
def get_check_ports(self): # pylint: disable=no-self-use
'''
Return a list of ports to check against to ensure the daemon is running
'''
return []
def get_check_events(self): # pylint: disable=no-self-use
'''
Return a list of event tags to check against to ensure the daemon is running
'''
return []
def get_salt_run_fixture(self):
if self.request.scope == 'session':
try:
return self.request.getfixturevalue('session_salt_run')
except AttributeError:
return self.request.getfuncargvalue('session_salt_run')
try:
return self.request.getfixturevalue('salt_run')
except AttributeError:
return self.request.getfuncargvalue('salt_run')
def start(self):
'''
Start the daemon subprocess
'''
# Late import
log.info('[%s][%s] Starting DAEMON in CWD: %s', self.log_prefix, self.cli_display_name, self.cwd)
proc_args = [
self.get_script_path(self.cli_script_name)
] + self.get_base_script_args() + self.get_script_args()
if sys.platform.startswith('win'):
# Windows needs the python executable to come first
proc_args.insert(0, sys.executable)
log.info('[%s][%s] Running \'%s\'...', self.log_prefix, self.cli_display_name, ' '.join(proc_args))
self.init_terminal(proc_args, env=self.environ, cwd=self.cwd)
self._running.set()
if self._process_cli_output_in_thread:
process_output_thread = threading.Thread(target=self._process_output_in_thread)
process_output_thread.daemon = True
process_output_thread.start()
return True
def _process_output_in_thread(self):
'''
The actual, coroutine aware, start method
'''
try:
while self._running.is_set() and self._terminal.poll() is None:
# We're not actually interested in processing the output, just consume it
if self._terminal.stdout is not None:
self._terminal.recv()
if self._terminal.stderr is not None:
self._terminal.recv_err()
time.sleep(0.125)
if self._terminal.poll() is not None:
self._running.clear()
except (SystemExit, KeyboardInterrupt):
self._running.clear()
finally:
if self._terminal.stdout:
self._terminal.stdout.close()
if self._terminal.stderr:
self._terminal.stderr.close()
@property
def pid(self):
terminal = getattr(self, '_terminal', None)
if not terminal:
return
return terminal.pid
def terminate(self):
'''
Terminate the started daemon
'''
# Let's get the child processes of the started subprocess
self._running.clear()
self._connectable.clear()
time.sleep(0.0125)
super(SaltDaemonScriptBase, self).terminate()
def wait_until_running(self, timeout=None):
'''
Blocking call to wait for the daemon to start listening
'''
# Late import
import salt.ext.six as six
if self._connectable.is_set():
return True
expire = time.time() + timeout
check_ports = self.get_check_ports()
if check_ports:
log.debug(
'[%s][%s] Checking the following ports to assure running status: %s',
self.log_prefix,
self.cli_display_name,
check_ports
)
check_events = self.get_check_events()
if check_events:
log.debug(
'[%s][%s] Checking the following event tags to assure running status: %s',
self.log_prefix,
self.cli_display_name,
check_events
)
log.debug('Wait until running expire: %s Timeout: %s Current Time: %s', expire, timeout, time.time())
event_listener = EventListener(
self.event_listener_config_dir or self.config_dir,
self.log_prefix
)
try:
while True:
if self._running.is_set() is False:
# No longer running, break
log.warning('No longer running!')
break
if time.time() > expire:
# Timeout, break
log.debug('Expired at %s(was set to %s)', time.time(), expire)
break
if not check_ports and not check_events:
self._connectable.set()
break
if check_events:
for tag in event_listener.wait_for_events(check_events, timeout=timeout - 0.5):
check_events.remove(tag)
if not check_events:
stop_sending_events_file = self.config.get('pytest_stop_sending_events_file')
if stop_sending_events_file and os.path.exists(stop_sending_events_file):
log.warning('Removing pytest_stop_sending_events_file: %s', stop_sending_events_file)
os.unlink(stop_sending_events_file)
for port in set(check_ports):
if isinstance(port, int):
log.debug('[%s][%s] Checking connectable status on port: %s',
self.log_prefix,
self.cli_display_name,
port)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn = sock.connect_ex(('localhost', port))
try:
if conn == 0:
log.debug('[%s][%s] Port %s is connectable!',
self.log_prefix,
self.cli_display_name,
port)
check_ports.remove(port)
sock.shutdown(socket.SHUT_RDWR)
except socket.error:
continue
finally:
sock.close()
del sock
elif isinstance(port, six.string_types):
salt_run = self.get_salt_run_fixture()
minions_joined = salt_run.run('manage.joined')
if minions_joined.exitcode == 0:
if minions_joined.json and port in minions_joined.json:
check_ports.remove(port)
log.warning('Removed ID %r Still left: %r', port, check_ports)
elif minions_joined.json is None:
log.debug('salt-run manage.join did not return any valid JSON: %s', minions_joined)
time.sleep(0.5)
except KeyboardInterrupt:
return self._connectable.is_set()
finally:
event_listener.terminate()
if self._connectable.is_set():
log.debug('[%s][%s] All ports checked. Running!', self.log_prefix, self.cli_display_name)
return self._connectable.is_set()
class ShellResult(namedtuple('Result', ('exitcode', 'stdout', 'stderr', 'json'))):
'''
This class serves the purpose of having a common result class which will hold the
data from the bigret backend(despite the backend being used).
This will allow filtering by access permissions and/or object ownership.
'''
__slots__ = ()
def __new__(cls, exitcode, stdout, stderr, json):
return super(ShellResult, cls).__new__(cls, exitcode, stdout, stderr, json)
# These are copied from the namedtuple verbose output in order to quiet down PyLint
exitcode = property(itemgetter(0), doc='Alias for field number 0')
stdout = property(itemgetter(1), doc='Alias for field number 1')
stderr = property(itemgetter(2), doc='Alias for field number 2')
json = property(itemgetter(3), doc='Alias for field number 3')
def __eq__(self, other):
'''
Allow comparison against the parsed JSON or the output
'''
if self.json:
return self.json == other
return self.stdout == other
class SaltCliScriptBase(SaltScriptBase):
'''
Base class which runs Salt's non daemon CLI scripts
'''
DEFAULT_TIMEOUT = 25
def __init__(self, *args, **kwargs):
self.default_timeout = kwargs.pop('default_timeout', self.DEFAULT_TIMEOUT)
super(SaltCliScriptBase, self).__init__(*args, **kwargs)
def get_base_script_args(self):
return SaltScriptBase.get_base_script_args(self) + ['--out=json']
def get_minion_tgt(self, **kwargs):
return kwargs.pop('minion_tgt', None)
def run(self, *args, **kwargs):
'''
Run the given command synchronously
'''
# Late import
import salt.ext.six as six
timeout = kwargs.get('timeout', self.default_timeout)
if 'fail_hard' in kwargs:
# Explicit fail_hard passed
fail_hard = kwargs.pop('fail_hard')
else:
# Get the value of the _salt_fail_hard fixture
try:
fail_hard = self.request.getfixturevalue('_salt_fail_hard')
except AttributeError:
fail_hard = self.request.getfuncargvalue('_salt_fail_hard')
if fail_hard is True:
fail_method = pytest.fail
else:
fail_method = pytest.xfail
log.info('The fail hard setting for %s is: %s', self.cli_script_name, fail_hard)
minion_tgt = self.get_minion_tgt(**kwargs)
timeout_expire = time.time() + kwargs.pop('timeout', self.default_timeout)
environ = self.environ.copy()
environ['PYTEST_LOG_PREFIX'] = '[{}] '.format(self.log_prefix)
environ['PYTHONUNBUFFERED'] = '1'
proc_args = [
self.get_script_path(self.cli_script_name)
] + self.get_base_script_args() + self.get_script_args()
if sys.platform.startswith('win'):
# Windows needs the python executable to come first
proc_args.insert(0, sys.executable)
if minion_tgt is not None:
proc_args.append(minion_tgt)
proc_args.extend(list(args))
for key in kwargs:
proc_args.append('{}={}'.format(key, kwargs[key]))
log.info('[%s][%s] Running \'%s\' in CWD: %s ...',
self.log_prefix, self.cli_display_name, ' '.join(proc_args), self.cwd)
terminal = self.init_terminal(proc_args,
cwd=self.cwd,
env=environ,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Consume the output
stdout = six.b('')
stderr = six.b('')
try:
while True:
# We're not actually interested in processing the output, just consume it
if terminal.stdout is not None:
try:
out = terminal.recv(4096)
except IOError:
out = six.b('')
if out:
stdout += out
if terminal.stderr is not None:
try:
err = terminal.recv_err(4096)
except IOError:
err = ''
if err:
stderr += err
if out is None and err is None:
break
if timeout_expire < time.time():
self.terminate()
fail_method(
'[{}][{}] Failed to run: args: {!r}; kwargs: {!r}; Error: {}'.format(
self.log_prefix,
self.cli_display_name,
args,
kwargs,
'[{}][{}] Timed out after {} seconds!'.format(self.log_prefix,
self.cli_display_name,
timeout)
)
)
except (SystemExit, KeyboardInterrupt):
pass
finally:
self.terminate()
if six.PY3:
# pylint: disable=undefined-variable
stdout = stdout.decode(__salt_system_encoding__)
stderr = stderr.decode(__salt_system_encoding__)
# pylint: enable=undefined-variable
exitcode = terminal.returncode
stdout, stderr, json_out = self.process_output(minion_tgt, stdout, stderr, cli_cmd=proc_args)
return ShellResult(exitcode, stdout, stderr, json_out)
def process_output(self, tgt, stdout, stderr, cli_cmd=None):
if stdout:
try:
json_out = json.loads(stdout)
except ValueError:
log.debug('[%s][%s] Failed to load JSON from the following output:\n%r',
self.log_prefix,
self.cli_display_name,
stdout)
json_out = None
else:
json_out = None
return stdout, stderr, json_out
class SaltRunEventListener(SaltCliScriptBase):
'''
Class which runs 'salt-run state.event *' to match agaist a provided set of event tags
'''
EVENT_MATCH_RE = re.compile(r'^(?P<tag>[\w/-]+)(?:[\s]+)(?P<data>[\S\W]+)$')
def get_base_script_args(self):
return SaltScriptBase.get_base_script_args(self)
def get_script_args(self): # pylint: disable=no-self-use
'''
Returns any additional arguments to pass to the CLI script
'''
return ['state.event']
def run(self, tags=(), timeout=10): # pylint: disable=arguments-differ
'''
Run the given command synchronously
'''
log.info('%s checking for tags: %s', self.__class__.__name__, tags)
# Late import
import salt.ext.six as six
exitcode = 0
timeout_expire = time.time() + timeout
environ = self.environ.copy()
environ['PYTEST_LOG_PREFIX'] = '{}[EventListen]'.format(self.log_prefix)
environ['PYTHONUNBUFFERED'] = '1'
proc_args = [
self.get_script_path(self.cli_script_name)
] + self.get_base_script_args() + self.get_script_args()
if sys.platform.startswith('win'):
# Windows needs the python executable to come first
proc_args.insert(0, sys.executable)
log.info('[%s][%s] Running \'%s\' in CWD: %s...',
self.log_prefix, self.cli_display_name, ' '.join(proc_args), self.cwd)
to_match_events = set(tags)
matched_events = {}
terminal = self.init_terminal(proc_args,
cwd=self.cwd,
env=environ,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Consume the output
stdout = six.b('')
stderr = six.b('')
process_output = six.b('')
events_processed = 0
try:
while True:
time.sleep(0.5)
if terminal.stdout is not None:
try:
out = terminal.recv(4096)
except IOError:
out = six.b('')
if out:
stdout += out
process_output += out
if terminal.stderr is not None:
try:
err = terminal.recv_err(4096)
except IOError:
err = ''
if err:
stderr += err
if out is None and err is None:
if to_match_events:
exitcode = 1
log.warning('[%s][%s] Premature exit?! Failed to find all of the required event tags. '
'Total events processed: %s',
self.log_prefix,
self.cli_display_name,
events_processed)
break
if process_output:
lines = process_output.split(b'}\n')
if lines[-1] != b'':
process_output = lines.pop()
else:
process_output = six.b('')
lines.pop()
for line in lines:
match = self.EVENT_MATCH_RE.match(line.decode(__salt_system_encoding__)) # pylint: disable=undefined-variable
if match:
events_processed += 1
tag, data = match.groups()
if tag in to_match_events:
matched_events[tag] = json.loads(data + '}')
to_match_events.remove(tag)
log.info('[%s][%s] Events processed so far: %d',
self.log_prefix,
self.cli_display_name,
events_processed)
if not to_match_events:
log.debug('[%s][%s] ALL EVENT TAGS FOUND!!!', self.log_prefix, self.cli_display_name)
break
if timeout_expire < time.time():
log.warning('[%s][%s] Failed to find all of the required event tags. Total events processed: %s',
self.log_prefix,
self.cli_display_name,
events_processed)
exitcode = 1
break
except (SystemExit, KeyboardInterrupt):
pass
finally:
self.terminate()
if six.PY3:
# pylint: disable=undefined-variable
stdout = stdout.decode(__salt_system_encoding__)
stderr = stderr.decode(__salt_system_encoding__)
# pylint: enable=undefined-variable
if to_match_events:
stop_sending_events_file = self.config.get('pytest_stop_sending_events_file')
if stop_sending_events_file and os.path.exists(stop_sending_events_file):
log.warning('Removing pytest_stop_sending_events_file: %s', stop_sending_events_file)
os.unlink(stop_sending_events_file)
json_out = {
'matched': matched_events,
'unmatched': to_match_events
}
return ShellResult(exitcode, stdout, stderr, json_out)
class EventListener:
DEFAULT_TIMEOUT = 60
def __init__(self, config_dir, log_prefix):
# Late import
self.config_dir = config_dir
self.log_prefix = '[{}][PyTestEventListener]'.format(log_prefix)
self._listener = None
def wait_for_events(self, check_events, timeout=None):
if timeout is None:
timeout = self.DEFAULT_TIMEOUT
log.info('%s waiting %s seconds for events: %s',
self.log_prefix,
timeout,
check_events)
matched_events = set()
events_to_match = set(check_events)
events_processed = 0
max_timeout = time.time() + timeout
while True:
if not events_to_match:
log.info('%s ALL EVENT TAGS FOUND!!!', self.log_prefix)
return matched_events
if time.time() > max_timeout:
log.warning(
'%s Failed to find all of the required event tags. '
'Total events processed: %s',
self.log_prefix,
events_processed
)
return matched_events
event = self.listener.get_event(full=True, auto_reconnect=True)
if event is None:
continue
tag = event['tag']
log.warning('Got event: %s', event)
if tag in events_to_match:
matched_events.add(tag)
events_to_match.remove(tag)
events_processed += 1
log.info('%s Events processed so far: %d',
self.log_prefix,
events_processed)
def terminate(self):
listener = self.listener
self._listener = None
listener.destroy()
@property
def listener(self):
if self._listener is None:
# Late import
import salt.config
import salt.utils.event
opts = salt.config.master_config(os.path.join(self.config_dir, 'master'))
self._listener = salt.utils.event.get_event('master', opts=opts, listen=True)
return self._listener
@pytest.mark.trylast
def pytest_configure(config):
pytest.helpers.utils.register(get_unused_localhost_port)
|
py | 1a368198d3dcc230a857bfd19e5f7755f3d5913f | from database import db
from flask_bcrypt import generate_password_hash, check_password_hash
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.INTEGER, primary_key=True, autoincrement=True)
email = db.Column(db.String(80), unique=True)
username = db.Column(db.String(45))
password = db.Column(db.String(100))
def __init__(self, email, username, password):
self.email = email
self.username = username
self.password = generate_password_hash(password.encode('utf-8')).decode('utf-8')
def check_password(self, password):
return check_password_hash(self.password, password)
class Room(db.Model):
__tablename__ = 'rooms'
id = db.Column(db.INTEGER, primary_key=True, autoincrement=True)
uploader = db.Column(db.String(45))
address = db.Column(db.String(50), unique=True)
contact = db.Column(db.String(50))
introduction = db.Column(db.String(200))
latitude = db.Column(db.FLOAT())
longitude = db.Column(db.FLOAT())
def __init__(self, uploader, address, contact, introduction, latitude, longitude):
self.uploader = uploader
self.address = address
self.contact = contact
self.introduction = introduction
self.latitude = latitude
self.longitude = longitude
|
py | 1a3681c56d9aa3fe9b4e74ab27d6d55bdff0b8f5 | import numpy as np
from pyhack.py_runko_aux import *
def analytical_gyro_full(time,conf):
vxref = []
vyref = []
xref = []
yref = []
# Relativistic gyro frequency (cgs): omega_B = qB/(gamma*m*c)
# Relativistic gyro frequency (runko): omega_B*dt = qh*Bh*/(gamma*mh*ch) * m-/m+
# lfreq = (conf.qe*conf.binit)/(conf.gamma*abs(conf.qe)*conf.cfl**2)
lfreq = conf.cfreq
lfreq2 = conf.cfreq2
# Phase-lag, particles start at a=0 radians
a = 0
# Parameter shorthands
vy = conf.vy
vy2 = conf.vy2
cx = conf.NxMesh/2.
cy = conf.NyMesh/2.
for t in time:
vxref.append(np.array([-vy*np.sin(lfreq*t+a),-vy2*np.sin(lfreq2*t+a)]))
vyref.append(np.array([vy*np.cos(lfreq*t+a),vy2*np.cos(lfreq2*t+a)]))
xref.append(np.array([cx+vy/lfreq*np.cos(lfreq*t+a),
cx+vy2/lfreq*np.cos(lfreq2*t+a)]))
yref.append(np.array([cy+vy/lfreq*np.sin(lfreq*t+a),
cy+vy2/lfreq*np.sin(lfreq2*t+a)]))
xref = np.array(xref)
yref = np.array(yref)
vxref = np.array(vxref)
vyref = np.array(vyref)
return xref,yref,vxref,vyref
def analytical_gyro_single(time,conf):
# Relativistic gyro frequency (cgs): omega_B = qB/(gamma*m*c)
# Relativistic gyro frequency (runko): omega_B*dt = qh*Bh*/(gamma*mh*ch) * m-/m+
# lfreq = (conf.qe*conf.binit)/(conf.gamma*abs(conf.qe)*conf.cfl**2)
lfreq = conf.cfreq
lfreq2 = conf.cfreq2
# Phase-lag, particles start at a=0 radians
a = 0
# Parameter shorthands
vy = conf.vy
vy2 = conf.vy2
cx = conf.NxMesh/2.
cy = conf.NyMesh/2.
t = time
vxref = np.array([-vy*np.sin(lfreq*t+a),-vy2*np.sin(lfreq2*t+a)])
vyref = np.array([vy*np.cos(lfreq*t+a),vy2*np.cos(lfreq2*t+a)])
xref = np.array([cx+vy/lfreq*np.cos(lfreq*t+a),
cx+vy2/lfreq*np.cos(lfreq2*t+a)])
yref = np.array([cy+vy/lfreq*np.sin(lfreq*t+a),
cy+vy2/lfreq*np.sin(lfreq2*t+a)])
return xref,yref,vxref,vyref
|
py | 1a3681daf327fda5ece76b7f3ab63aade24ed248 | """Config flow to configure the Synology DSM integration."""
from __future__ import annotations
import logging
from typing import Any
from urllib.parse import urlparse
from synology_dsm import SynologyDSM
from synology_dsm.exceptions import (
SynologyDSMException,
SynologyDSMLogin2SAFailedException,
SynologyDSMLogin2SARequiredException,
SynologyDSMLoginInvalidException,
SynologyDSMRequestException,
)
import voluptuous as vol
from homeassistant import exceptions
from homeassistant.components import ssdp
from homeassistant.config_entries import ConfigEntry, ConfigFlow, OptionsFlow
from homeassistant.const import (
CONF_DISKS,
CONF_HOST,
CONF_MAC,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_SCAN_INTERVAL,
CONF_SSL,
CONF_TIMEOUT,
CONF_USERNAME,
CONF_VERIFY_SSL,
)
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import DiscoveryInfoType
from .const import (
CONF_DEVICE_TOKEN,
CONF_VOLUMES,
DEFAULT_PORT,
DEFAULT_PORT_SSL,
DEFAULT_SCAN_INTERVAL,
DEFAULT_TIMEOUT,
DEFAULT_USE_SSL,
DEFAULT_VERIFY_SSL,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
CONF_OTP_CODE = "otp_code"
def _discovery_schema_with_defaults(discovery_info: DiscoveryInfoType) -> vol.Schema:
return vol.Schema(_ordered_shared_schema(discovery_info))
def _user_schema_with_defaults(user_input: dict[str, Any]) -> vol.Schema:
user_schema = {
vol.Required(CONF_HOST, default=user_input.get(CONF_HOST, "")): str,
}
user_schema.update(_ordered_shared_schema(user_input))
return vol.Schema(user_schema)
def _ordered_shared_schema(
schema_input: dict[str, Any]
) -> dict[vol.Required | vol.Optional, Any]:
return {
vol.Required(CONF_USERNAME, default=schema_input.get(CONF_USERNAME, "")): str,
vol.Required(CONF_PASSWORD, default=schema_input.get(CONF_PASSWORD, "")): str,
vol.Optional(CONF_PORT, default=schema_input.get(CONF_PORT, "")): str,
vol.Optional(
CONF_SSL, default=schema_input.get(CONF_SSL, DEFAULT_USE_SSL)
): bool,
vol.Optional(
CONF_VERIFY_SSL,
default=schema_input.get(CONF_VERIFY_SSL, DEFAULT_VERIFY_SSL),
): bool,
}
class SynologyDSMFlowHandler(ConfigFlow, domain=DOMAIN):
"""Handle a config flow."""
VERSION = 1
@staticmethod
@callback
def async_get_options_flow(
config_entry: ConfigEntry,
) -> SynologyDSMOptionsFlowHandler:
"""Get the options flow for this handler."""
return SynologyDSMOptionsFlowHandler(config_entry)
def __init__(self) -> None:
"""Initialize the synology_dsm config flow."""
self.saved_user_input: dict[str, Any] = {}
self.discovered_conf: dict[str, Any] = {}
async def _show_setup_form(
self,
user_input: dict[str, Any] | None = None,
errors: dict[str, str] | None = None,
) -> FlowResult:
"""Show the setup form to the user."""
if not user_input:
user_input = {}
if self.discovered_conf:
user_input.update(self.discovered_conf)
step_id = "link"
data_schema = _discovery_schema_with_defaults(user_input)
else:
step_id = "user"
data_schema = _user_schema_with_defaults(user_input)
return self.async_show_form(
step_id=step_id,
data_schema=data_schema,
errors=errors or {},
description_placeholders=self.discovered_conf or {},
)
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a flow initiated by the user."""
errors = {}
if user_input is None:
return await self._show_setup_form(user_input, None)
if self.discovered_conf:
user_input.update(self.discovered_conf)
host = user_input[CONF_HOST]
port = user_input.get(CONF_PORT)
username = user_input[CONF_USERNAME]
password = user_input[CONF_PASSWORD]
use_ssl = user_input.get(CONF_SSL, DEFAULT_USE_SSL)
verify_ssl = user_input.get(CONF_VERIFY_SSL, DEFAULT_VERIFY_SSL)
otp_code = user_input.get(CONF_OTP_CODE)
if not port:
if use_ssl is True:
port = DEFAULT_PORT_SSL
else:
port = DEFAULT_PORT
api = SynologyDSM(
host, port, username, password, use_ssl, verify_ssl, timeout=30
)
try:
serial = await self.hass.async_add_executor_job(
_login_and_fetch_syno_info, api, otp_code
)
except SynologyDSMLogin2SARequiredException:
return await self.async_step_2sa(user_input)
except SynologyDSMLogin2SAFailedException:
errors[CONF_OTP_CODE] = "otp_failed"
user_input[CONF_OTP_CODE] = None
return await self.async_step_2sa(user_input, errors)
except SynologyDSMLoginInvalidException as ex:
_LOGGER.error(ex)
errors[CONF_USERNAME] = "invalid_auth"
except SynologyDSMRequestException as ex:
_LOGGER.error(ex)
errors[CONF_HOST] = "cannot_connect"
except SynologyDSMException as ex:
_LOGGER.error(ex)
errors["base"] = "unknown"
except InvalidData:
errors["base"] = "missing_data"
if errors:
return await self._show_setup_form(user_input, errors)
# unique_id should be serial for services purpose
await self.async_set_unique_id(serial, raise_on_progress=False)
# Check if already configured
self._abort_if_unique_id_configured()
config_data = {
CONF_HOST: host,
CONF_PORT: port,
CONF_SSL: use_ssl,
CONF_VERIFY_SSL: verify_ssl,
CONF_USERNAME: username,
CONF_PASSWORD: password,
CONF_MAC: api.network.macs,
}
if otp_code:
config_data[CONF_DEVICE_TOKEN] = api.device_token
if user_input.get(CONF_DISKS):
config_data[CONF_DISKS] = user_input[CONF_DISKS]
if user_input.get(CONF_VOLUMES):
config_data[CONF_VOLUMES] = user_input[CONF_VOLUMES]
return self.async_create_entry(title=host, data=config_data)
async def async_step_ssdp(self, discovery_info: DiscoveryInfoType) -> FlowResult:
"""Handle a discovered synology_dsm."""
parsed_url = urlparse(discovery_info[ssdp.ATTR_SSDP_LOCATION])
friendly_name = (
discovery_info[ssdp.ATTR_UPNP_FRIENDLY_NAME].split("(", 1)[0].strip()
)
mac = discovery_info[ssdp.ATTR_UPNP_SERIAL].upper()
# Synology NAS can broadcast on multiple IP addresses, since they can be connected to multiple ethernets.
# The serial of the NAS is actually its MAC address.
if self._mac_already_configured(mac):
return self.async_abort(reason="already_configured")
await self.async_set_unique_id(mac)
self._abort_if_unique_id_configured()
self.discovered_conf = {
CONF_NAME: friendly_name,
CONF_HOST: parsed_url.hostname,
}
self.context["title_placeholders"] = self.discovered_conf
return await self.async_step_user()
async def async_step_import(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Import a config entry."""
return await self.async_step_user(user_input)
async def async_step_link(self, user_input: dict[str, Any]) -> FlowResult:
"""Link a config entry from discovery."""
return await self.async_step_user(user_input)
async def async_step_2sa(
self, user_input: dict[str, Any], errors: dict[str, str] | None = None
) -> FlowResult:
"""Enter 2SA code to anthenticate."""
if not self.saved_user_input:
self.saved_user_input = user_input
if not user_input.get(CONF_OTP_CODE):
return self.async_show_form(
step_id="2sa",
data_schema=vol.Schema({vol.Required(CONF_OTP_CODE): str}),
errors=errors or {},
)
user_input = {**self.saved_user_input, **user_input}
self.saved_user_input = {}
return await self.async_step_user(user_input)
def _mac_already_configured(self, mac: str) -> bool:
"""See if we already have configured a NAS with this MAC address."""
existing_macs = [
mac.replace("-", "")
for entry in self._async_current_entries()
for mac in entry.data.get(CONF_MAC, [])
]
return mac in existing_macs
class SynologyDSMOptionsFlowHandler(OptionsFlow):
"""Handle a option flow."""
def __init__(self, config_entry: ConfigEntry) -> None:
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle options flow."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
data_schema = vol.Schema(
{
vol.Optional(
CONF_SCAN_INTERVAL,
default=self.config_entry.options.get(
CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL
),
): cv.positive_int,
vol.Optional(
CONF_TIMEOUT,
default=self.config_entry.options.get(
CONF_TIMEOUT, DEFAULT_TIMEOUT
),
): cv.positive_int,
}
)
return self.async_show_form(step_id="init", data_schema=data_schema)
def _login_and_fetch_syno_info(api: SynologyDSM, otp_code: str) -> str:
"""Login to the NAS and fetch basic data."""
# These do i/o
api.login(otp_code)
api.utilisation.update()
api.storage.update()
api.network.update()
if (
not api.information.serial
or api.utilisation.cpu_user_load is None
or not api.storage.volumes_ids
or not api.network.macs
):
raise InvalidData
return api.information.serial # type: ignore[no-any-return]
class InvalidData(exceptions.HomeAssistantError):
"""Error to indicate we get invalid data from the nas."""
|
py | 1a36824e6880984bd3f9ee7b9dad507638e3215c | import pandas as pd
import sklearn.model_selection as ms
class CrossValidation:
def __init__(self, df, shuffle,random_state=None):
self.df = df
self.random_state = random_state
self.shuffle = shuffle
if shuffle is True:
self.df = df.sample(frac=1,
random_state=self.random_state).reset_index(drop=True)
def hold_out_split(self,percent,stratify=None):
if stratify is not None:
y = self.df[stratify]
train,val = ms.train_test_split(self.df, test_size=percent/100,
stratify=y, random_state=self.random_state)
return train,val
size = len(self.df) - int(len(self.df)*(percent/100))
train = self.df.iloc[:size,:]
val = self.df.iloc[size:,:]
return train,val
def kfold_split(self, splits, stratify=None):
if stratify is not None:
kf = ms.StratifiedKFold(n_splits=splits,
random_state=self.random_state)
y = self.df[stratify]
for train, val in kf.split(X=self.df,y=y):
t = self.df.iloc[train,:]
v = self.df.iloc[val, :]
yield t,v
else:
kf = ms.KFold(n_splits=splits, shuffle=self.shuffle,
random_state=self.random_state)
for train, val in kf.split(X=self.df):
t = self.df.iloc[train,:]
v = self.df.iloc[val, :]
yield t,v |
py | 1a36826aa4880d3680d4fbb628af3f21eb4cf870 | # -*- coding: utf-8 -*-
"""
Image Augmentation: Make it rain, make it snow. How to modify photos to train self-driving cars
by Ujjwal Saxena
https://medium.freecodecamp.org/image-augmentation-make-it-rain-make-it-snow-how-to-modify-a-photo-with-machine-learning-163c0cb3843f
"""
import numpy as np
import cv2
#
# Sunny and Shady
#
def add_brightness(img):
"""
The brightness of an image can be changed by changing the pixel values of the
“Lightness” channel [1] of the image in HLS color space. Converting the image
back to RGB gives the same image with enhanced or suppressed lighting.
"""
# Convert image to HLS.
img_HLS = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
img_HLS = np.array(img_HLS, dtype=np.float64)
# Generate a random value in [0.5, 1.5].
random_brightness_coefficient = np.random.uniform() + 0.5
# Scale pixel values up or down for channel 1 (Lightness)
img_HLS[:, :, 1] = img_HLS[:, :, 1] * random_brightness_coefficient
# Make sure the color value does not exceed 255.
img_HLS[:, :, 1][img_HLS[:, :, 1] > 255] = 255
# Convert image back to RGB.
img_HLS = np.array(img_HLS, dtype=np.uint8)
img_RGB = cv2.cvtColor(img_HLS, cv2.COLOR_HLS2RGB)
return img_RGB
#
# Shadows
#
def add_shadow(img, nshadows=1):
# Convert image to HLS.
img_HLS = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
# Add shadows to an initially empty mask image.
mask = np.zeros_like(img)
# Generate a list of shadow polygons.
shadow_list = generate_shadow_coordinates(img.shape, nshadows)
# Add all shadow polygons to the empty mask; single 255 denotes only red channel.
for shadow in shadow_list:
cv2.fillPoly(mask, shadow, 255)
# Use the mask to adjust pixels in the original image.
# If red channel is hot, the image "Lightness" channel's brightness is lowered.
img_HLS[:, :, 1][mask[:, :, 0] == 255] = img_HLS[:, :, 1][mask[:, :, 0] == 255] * 0.5
# Convert image back to RGB.
img_HLS = np.array(img_HLS, dtype=np.uint8)
img_RGB = cv2.cvtColor(img_HLS, cv2.COLOR_HLS2RGB)
return img_RGB
def generate_shadow_coordinates(imshape, nshadows=1):
shadow_list = []
for _ in range(nshadows):
shadow = []
# Dimensionality of the shadow polygon.
for _ in range(np.random.randint(3, 15)):
shadow.append((imshape[1] * np.random.uniform(), imshape[0] // 3 + imshape[0] * np.random.uniform()))
# Add vertices for a single shadow polygon.
shadow = np.array([shadow], dtype=np.int32)
shadow_list.append(shadow)
# List of shadow vertices.
return shadow_list
#
# Snow
#
def add_snow(img, snow_brightness=2.5, snow_point=140):
"""
Brighten the darkest parts of the image.
Increase 'snow_point' for more snow.
"""
# Convert image to HLS.
img_HLS = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
img_HLS = np.array(img_HLS, dtype=np.float64)
# Scale pixel values up for channel 1 (Lightness)
img_HLS[:, :, 1][img_HLS[:, :, 1] < snow_point] *= snow_brightness
# Make sure the color value does not exceed 255.
img_HLS[:, :, 1][img_HLS[:, :, 1] > 255] = 255
# Convert image back to RGB.
img_HLS = np.array(img_HLS, dtype=np.uint8)
img_RGB = cv2.cvtColor(img_HLS, cv2.COLOR_HLS2RGB)
return img_RGB
#
# Rain
#
def add_rain(img):
# Generate rain drops as lines.
slant_extreme = 10
slant = np.random.randint(-slant_extreme, slant_extreme)
drop_length = 10
drop_width = 2
drop_color = (200, 200, 200) # a shade of gray
rain_drops = generate_random_lines(img.shape, slant, drop_length)
# Add rain drops to the image.
for drop in rain_drops:
cv2.line(img, (drop[0], drop[1]),
(drop[0] + slant, drop[1] + drop_length),
drop_color, drop_width)
img = cv2.blur(img, (7, 7)) # Rainy views are blurry.
# Darken the image a bit - rainy days are usually shady.
brightness_coefficient = 0.7
# Convert image to HLS.
img_HLS = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
# Scale pixel values down for channel 1 (Lightness)
img_HLS[:, :, 1] = img_HLS[:, :, 1] * brightness_coefficient
# Convert image back to RGB.
img_HLS = np.array(img_HLS, dtype=np.uint8)
img_RGB = cv2.cvtColor(img_HLS, cv2.COLOR_HLS2RGB)
return img_RGB
def generate_random_lines(imshape, slant, drop_length, ndrops=500):
""" For heavy rain, try increasing 'ndrops'. """
drops = []
for _ in range(ndrops):
x = np.random.randint(slant, imshape[1]) if slant < 0 else \
np.random.randint(0, imshape[1] - slant)
y = np.random.randint(0, imshape[0] - drop_length)
drops.append((x, y))
return drops
#
# Fog
#
def add_fog(img, hw=100):
"""
Fog intensity is an important parameter to train a car for how much throttle it should give.
For coding such a function, you can take random patches from all over the image, and increase
the image’s lightness within those patches. With a simple blur, this gives a nice hazy effect.
"""
# Convert image to HLS.
img_HLS = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
img_HLS[:, :, 1] = img_HLS[:, :, 1] * 0.8
haze_list = generate_random_blur_coordinates(img.shape, hw)
for haze_points in haze_list:
# # Make sure the color value does not exceed 255.
# img_HLS[:, :, 1][img_HLS[:, :, 1] > 255] = 255
img_HLS = add_blur(img_HLS, haze_points[0], haze_points[1], hw)
# Convert image back to RGB.
img_HLS = np.array(img_HLS, dtype=np.uint8)
img_RGB = cv2.cvtColor(img_HLS, cv2.COLOR_HLS2RGB)
return img_RGB
def generate_random_blur_coordinates(imshape, hw):
blur_points = []
midx = imshape[1] // 2 - hw - 100
midy = imshape[0] // 2 - hw - 100
# radially generating coordinates
index = 1
while (midx > -100 or midy > -100):
for _ in range(250 * index):
x = np.random.randint(midx, imshape[1] - midx - hw)
y = np.random.randint(midy, imshape[0] - midy - hw)
blur_points.append((x, y))
midx -= 250 * imshape[1] // sum(imshape)
midy -= 250 * imshape[0] // sum(imshape)
index += 1
return blur_points
# def add_blur(img, x, y, hw):
# # Increase 'L' channel by 1.
# img[y:y + hw, x:x + hw, 1] = img[y:y + hw, x:x + hw, 1] + 1
# # Make sure the adjusted value does not exceed 255.
# img[:, :, 1][img[:, :, 1] > 255] = 255
# img = np.array(img, dtype=np.uint8)
# # Blur
# img[y:y + hw, x:x + hw, 1] = cv2.blur(img[y:y + hw, x:x + hw, 1], (5, 5))
# return img
def add_blur(img, x, y, hw):
# Create a grid of wrapped indices since numpy arrays do not handle
# slicing with negative values and wrap-around without some help.
wrappedRowIndices = np.arange(y, y + hw) % img.shape[0]
wrappedColIndices = np.arange(x, x + hw) % img.shape[1]
index_grid = np.ix_(wrappedRowIndices, wrappedColIndices, [1])
# Increase 'L' channel by 1.
img[index_grid] = img[index_grid] + 1
# Make sure the adjusted value does not exceed 255.
img[:, :, 1][img[:, :, 1] > 255] = 255
img = np.array(img, dtype=np.uint8)
# Blur
blur_patch = cv2.blur(img[index_grid], (5, 5)).reshape(hw, hw, 1)
img[index_grid] = blur_patch
return img
|
py | 1a36842b46c710d1cd4ef4a1ba90b5b7f39d46f7 | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.2
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError("Python 2.7 or later required")
# Import the low-level C/C++ module
if __package__ or "." in __name__:
from . import _communication
else:
import _communication
try:
import builtins as __builtin__
except ImportError:
import __builtin__
_swig_new_instance_method = _communication.SWIG_PyInstanceMethod_New
_swig_new_static_method = _communication.SWIG_PyStaticMethod_New
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if name == "thisown":
self.this.own(value)
elif name == "this":
set(self, name, value)
elif hasattr(self, name) and isinstance(getattr(type(self), name), property):
set(self, name, value)
else:
raise AttributeError("You cannot add instance attributes to %s" % self)
return set_instance_attr
def _swig_setattr_nondynamic_class_variable(set):
def set_class_attr(cls, name, value):
if hasattr(cls, name) and not isinstance(getattr(cls, name), property):
set(cls, name, value)
else:
raise AttributeError("You cannot add class attributes to %s" % cls)
return set_class_attr
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
MFEM_VERSION = _communication.MFEM_VERSION
MFEM_VERSION_STRING = _communication.MFEM_VERSION_STRING
MFEM_VERSION_TYPE = _communication.MFEM_VERSION_TYPE
MFEM_VERSION_TYPE_RELEASE = _communication.MFEM_VERSION_TYPE_RELEASE
MFEM_VERSION_TYPE_DEVELOPMENT = _communication.MFEM_VERSION_TYPE_DEVELOPMENT
MFEM_VERSION_MAJOR = _communication.MFEM_VERSION_MAJOR
MFEM_VERSION_MINOR = _communication.MFEM_VERSION_MINOR
MFEM_VERSION_PATCH = _communication.MFEM_VERSION_PATCH
MFEM_HYPRE_VERSION = _communication.MFEM_HYPRE_VERSION
import mfem._par.array
import mfem._par.mem_manager
import mfem._par.table
import mfem._par.sets
class MPI_Session(object):
r"""Proxy of C++ mfem::MPI_Session class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(MPI_Session self) -> MPI_Session
__init__(MPI_Session self, int & argc, char **& argv) -> MPI_Session
"""
_communication.MPI_Session_swiginit(self, _communication.new_MPI_Session(*args))
__swig_destroy__ = _communication.delete_MPI_Session
def WorldRank(self):
r"""WorldRank(MPI_Session self) -> int"""
return _communication.MPI_Session_WorldRank(self)
WorldRank = _swig_new_instance_method(_communication.MPI_Session_WorldRank)
def WorldSize(self):
r"""WorldSize(MPI_Session self) -> int"""
return _communication.MPI_Session_WorldSize(self)
WorldSize = _swig_new_instance_method(_communication.MPI_Session_WorldSize)
def Root(self):
r"""Root(MPI_Session self) -> bool"""
return _communication.MPI_Session_Root(self)
Root = _swig_new_instance_method(_communication.MPI_Session_Root)
# Register MPI_Session in _communication:
_communication.MPI_Session_swigregister(MPI_Session)
class GroupTopology(object):
r"""Proxy of C++ mfem::GroupTopology class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(GroupTopology self) -> GroupTopology
__init__(GroupTopology self, MPI_Comm comm) -> GroupTopology
__init__(GroupTopology self, GroupTopology gt) -> GroupTopology
"""
_communication.GroupTopology_swiginit(self, _communication.new_GroupTopology(*args))
def SetComm(self, comm):
r"""SetComm(GroupTopology self, MPI_Comm comm)"""
return _communication.GroupTopology_SetComm(self, comm)
SetComm = _swig_new_instance_method(_communication.GroupTopology_SetComm)
def GetComm(self):
r"""GetComm(GroupTopology self) -> MPI_Comm"""
return _communication.GroupTopology_GetComm(self)
GetComm = _swig_new_instance_method(_communication.GroupTopology_GetComm)
def MyRank(self):
r"""MyRank(GroupTopology self) -> int"""
return _communication.GroupTopology_MyRank(self)
MyRank = _swig_new_instance_method(_communication.GroupTopology_MyRank)
def NRanks(self):
r"""NRanks(GroupTopology self) -> int"""
return _communication.GroupTopology_NRanks(self)
NRanks = _swig_new_instance_method(_communication.GroupTopology_NRanks)
def Create(self, groups, mpitag):
r"""Create(GroupTopology self, ListOfIntegerSets groups, int mpitag)"""
return _communication.GroupTopology_Create(self, groups, mpitag)
Create = _swig_new_instance_method(_communication.GroupTopology_Create)
def NGroups(self):
r"""NGroups(GroupTopology self) -> int"""
return _communication.GroupTopology_NGroups(self)
NGroups = _swig_new_instance_method(_communication.GroupTopology_NGroups)
def GetNumNeighbors(self):
r"""GetNumNeighbors(GroupTopology self) -> int"""
return _communication.GroupTopology_GetNumNeighbors(self)
GetNumNeighbors = _swig_new_instance_method(_communication.GroupTopology_GetNumNeighbors)
def GetNeighborRank(self, i):
r"""GetNeighborRank(GroupTopology self, int i) -> int"""
return _communication.GroupTopology_GetNeighborRank(self, i)
GetNeighborRank = _swig_new_instance_method(_communication.GroupTopology_GetNeighborRank)
def IAmMaster(self, g):
r"""IAmMaster(GroupTopology self, int g) -> bool"""
return _communication.GroupTopology_IAmMaster(self, g)
IAmMaster = _swig_new_instance_method(_communication.GroupTopology_IAmMaster)
def GetGroupMaster(self, g):
r"""GetGroupMaster(GroupTopology self, int g) -> int"""
return _communication.GroupTopology_GetGroupMaster(self, g)
GetGroupMaster = _swig_new_instance_method(_communication.GroupTopology_GetGroupMaster)
def GetGroupMasterRank(self, g):
r"""GetGroupMasterRank(GroupTopology self, int g) -> int"""
return _communication.GroupTopology_GetGroupMasterRank(self, g)
GetGroupMasterRank = _swig_new_instance_method(_communication.GroupTopology_GetGroupMasterRank)
def GetGroupMasterGroup(self, g):
r"""GetGroupMasterGroup(GroupTopology self, int g) -> int"""
return _communication.GroupTopology_GetGroupMasterGroup(self, g)
GetGroupMasterGroup = _swig_new_instance_method(_communication.GroupTopology_GetGroupMasterGroup)
def GetGroupSize(self, g):
r"""GetGroupSize(GroupTopology self, int g) -> int"""
return _communication.GroupTopology_GetGroupSize(self, g)
GetGroupSize = _swig_new_instance_method(_communication.GroupTopology_GetGroupSize)
def GetGroup(self, g):
r"""GetGroup(GroupTopology self, int g) -> int const *"""
return _communication.GroupTopology_GetGroup(self, g)
GetGroup = _swig_new_instance_method(_communication.GroupTopology_GetGroup)
def Load(self, _in):
r"""Load(GroupTopology self, std::istream & _in)"""
return _communication.GroupTopology_Load(self, _in)
Load = _swig_new_instance_method(_communication.GroupTopology_Load)
def Copy(self, copy):
r"""Copy(GroupTopology self, GroupTopology copy)"""
return _communication.GroupTopology_Copy(self, copy)
Copy = _swig_new_instance_method(_communication.GroupTopology_Copy)
__swig_destroy__ = _communication.delete_GroupTopology
def Save(self, *args):
r"""
Save(GroupTopology self, std::ostream & out)
Save(GroupTopology self, char const * file, int precision=8)
"""
return _communication.GroupTopology_Save(self, *args)
Save = _swig_new_instance_method(_communication.GroupTopology_Save)
def SaveGZ(self, file, precision=8):
r"""SaveGZ(GroupTopology self, char const * file, int precision=8)"""
return _communication.GroupTopology_SaveGZ(self, file, precision)
SaveGZ = _swig_new_instance_method(_communication.GroupTopology_SaveGZ)
# Register GroupTopology in _communication:
_communication.GroupTopology_swigregister(GroupTopology)
class GroupCommunicator(object):
r"""Proxy of C++ mfem::GroupCommunicator class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
byGroup = _communication.GroupCommunicator_byGroup
byNeighbor = _communication.GroupCommunicator_byNeighbor
def __init__(self, *args, **kwargs):
r"""__init__(GroupCommunicator self, GroupTopology gt, mfem::GroupCommunicator::Mode m=byNeighbor) -> GroupCommunicator"""
_communication.GroupCommunicator_swiginit(self, _communication.new_GroupCommunicator(*args, **kwargs))
def Create(self, ldof_group):
r"""Create(GroupCommunicator self, intArray ldof_group)"""
return _communication.GroupCommunicator_Create(self, ldof_group)
Create = _swig_new_instance_method(_communication.GroupCommunicator_Create)
def GroupLDofTable(self, *args):
r"""
GroupLDofTable(GroupCommunicator self) -> Table
GroupLDofTable(GroupCommunicator self) -> Table
"""
return _communication.GroupCommunicator_GroupLDofTable(self, *args)
GroupLDofTable = _swig_new_instance_method(_communication.GroupCommunicator_GroupLDofTable)
def Finalize(self):
r"""Finalize(GroupCommunicator self)"""
return _communication.GroupCommunicator_Finalize(self)
Finalize = _swig_new_instance_method(_communication.GroupCommunicator_Finalize)
def SetLTDofTable(self, ldof_ltdof):
r"""SetLTDofTable(GroupCommunicator self, intArray ldof_ltdof)"""
return _communication.GroupCommunicator_SetLTDofTable(self, ldof_ltdof)
SetLTDofTable = _swig_new_instance_method(_communication.GroupCommunicator_SetLTDofTable)
def GetGroupTopology(self, *args):
r"""
GetGroupTopology(GroupCommunicator self) -> GroupTopology
GetGroupTopology(GroupCommunicator self) -> GroupTopology
"""
return _communication.GroupCommunicator_GetGroupTopology(self, *args)
GetGroupTopology = _swig_new_instance_method(_communication.GroupCommunicator_GetGroupTopology)
def GetNeighborLTDofTable(self, nbr_ltdof):
r"""GetNeighborLTDofTable(GroupCommunicator self, Table nbr_ltdof)"""
return _communication.GroupCommunicator_GetNeighborLTDofTable(self, nbr_ltdof)
GetNeighborLTDofTable = _swig_new_instance_method(_communication.GroupCommunicator_GetNeighborLTDofTable)
def GetNeighborLDofTable(self, nbr_ldof):
r"""GetNeighborLDofTable(GroupCommunicator self, Table nbr_ldof)"""
return _communication.GroupCommunicator_GetNeighborLDofTable(self, nbr_ldof)
GetNeighborLDofTable = _swig_new_instance_method(_communication.GroupCommunicator_GetNeighborLDofTable)
__swig_destroy__ = _communication.delete_GroupCommunicator
def PrintInfo(self, *args):
r"""
PrintInfo(GroupCommunicator self, std::ostream & out=mfem::out)
PrintInfo(GroupCommunicator self, char const * file, int precision=8)
"""
return _communication.GroupCommunicator_PrintInfo(self, *args)
PrintInfo = _swig_new_instance_method(_communication.GroupCommunicator_PrintInfo)
def PrintInfoGZ(self, file, precision=8):
r"""PrintInfoGZ(GroupCommunicator self, char const * file, int precision=8)"""
return _communication.GroupCommunicator_PrintInfoGZ(self, file, precision)
PrintInfoGZ = _swig_new_instance_method(_communication.GroupCommunicator_PrintInfoGZ)
# Register GroupCommunicator in _communication:
_communication.GroupCommunicator_swigregister(GroupCommunicator)
def ReorderRanksZCurve(comm):
r"""ReorderRanksZCurve(MPI_Comm comm) -> MPI_Comm"""
return _communication.ReorderRanksZCurve(comm)
ReorderRanksZCurve = _communication.ReorderRanksZCurve
|
py | 1a3684ec6ce7a3c6365ff0551e92a6a33344dbd0 | import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import misc
import argparse
import os
import model
def train(args):
if args.dataset.lower() == 'celeba':
train_loader, _, _ = misc.load_celebA(args.batch_s, args.img_s)
img_c = 3
elif args.dataset.lower() == 'lsun':
train_loader, val_loader, _ = misc.load_LSUN(args.batch_s, args.img_s)
img_c = 3
elif args.dataset.lower() == 'imagenet':
train_loader, val_loader, _ = misc.load_imagenet(args.batch_s, args.img_s)
img_c = 3
elif args.dataset.lower() == 'mnist':
train_loader, val_loader, _ = misc.load_mnist(args.batch_s, args.img_s)
img_c = 1
else:
raise NotImplementedError
fm_gen = [args.base_fm_n*pow(2,args.layer_n-1-l) for l in range(args.layer_n)]
fm_disc = [args.base_fm_n*pow(2,l) for l in range(args.layer_n)]
gen = model.Generator(args.z_dim, img_c, fm_gen).cuda()
gen.apply(model.init_weights)
disc = model.Discriminator(img_c, fm_disc).cuda()
disc.apply(model.init_weights)
criterion = nn.BCELoss()
label_real = 1
label_fake = 0
optim_gen = optim.Adam(gen.parameters(), lr=args.learning_rate, betas=(args.beta1, 0.999))
optim_disc = optim.Adam(disc.parameters(), lr=args.learning_rate, betas=(args.beta1, 0.999))
if args.resume:
filename = args.ckpt_dir + args.resume
if os.path.isfile(filename):
print("==> loading checkpoint '{}'".format(filename))
checkpoint = torch.load(filename)
start_epoch = checkpoint['epoch'] + 1
gen.load_state_dict(checkpoint['state_dict_gen'])
disc.load_state_dict(checkpoint['state_dict_disc'])
optim_gen.load_state_dict(checkpoint['optimizer_gen'])
optim_disc.load_state_dict(checkpoint['optimizer_disc'])
print("==> loaded checkpoint '{}' (epoch {})".format(filename, checkpoint['epoch']))
else:
print("==> no checkpoint found at '{}'".format(filename))
else:
start_epoch = 0
if not os.path.isdir(args.img_dir):
os.system('mkdir ' + args.img_dir)
if not os.path.isdir(args.ckpt_dir):
os.system('mkdir ' + args.ckpt_dir)
#########################################
#### Train
## 1. Update Discriminator: maximize log(D(x)) + log(1-D(G(z)))
# 1-1. with real image x
# 1-2. with fake image G(z)
## 2. Update Generator: maximize log(D(G(z)))
for e in range(args.epochs):
epoch = start_epoch + e
loss_meter_gen = AverageMeter()
loss_meter_disc = AverageMeter()
out_meter_disc_f = AverageMeter()
out_meter_disc_r = AverageMeter()
out_meter_disc_g = AverageMeter()
for i, data in enumerate(train_loader):
img_real, _ = data
img_real = img_real.cuda()
batch_s = img_real.size(0)
optim_disc.zero_grad()
# 1-1. with real image x
label_r = torch.full((batch_s, 1), label_real).cuda()
out_disc_r = disc(img_real).view(batch_s, -1)
error_disc_r = criterion(out_disc_r, label_r)
error_disc_r.backward()
# 1-2. with fake image G(z)
img_fake = gen(torch.randn(batch_s, args.z_dim, 1, 1).cuda())
label_f = torch.full((batch_s, 1), label_fake).cuda()
out_disc_f = disc(img_fake.detach()).view(batch_s, -1)
error_disc_f = criterion(out_disc_f, label_f)
error_disc = error_disc_r + error_disc_f
error_disc_f.backward()
optim_disc.step()
# 2. Update Generator
for g_iter in range(3):
img_fake = gen(torch.randn(batch_s, args.z_dim, 1, 1).cuda())
out_disc_g = disc(img_fake).view(batch_s, -1)
error_gen = criterion(out_disc_g, label_r)
optim_gen.zero_grad()
error_gen.backward()
optim_gen.step()
loss_meter_gen.update(error_gen.item(), batch_s)
loss_meter_disc.update(error_disc.item(), batch_s)
out_meter_disc_f.update(torch.sum(out_disc_f).item(), batch_s)
out_meter_disc_r.update(torch.sum(out_disc_r).item(), batch_s)
out_meter_disc_g.update(torch.sum(out_disc_g).item(), batch_s)
if i % args.log_term == 0:
print('epoch: %d, batch: %d \t Loss(D/G): %.4f / %.4f \t D(R/F/G): %.4f / %.4f / %.4f'
% (epoch, i, loss_meter_disc.avg, loss_meter_gen.avg,
out_meter_disc_r.avg/batch_s, out_meter_disc_f.avg/batch_s, out_meter_disc_g.avg/batch_s))
fd = open('save_log.txt', 'a')
fd.write('epoch: %d, batch: %d \t Loss(D/G): /%.4f / %.4f/ || D(R/F/G): /%.4f / %.4f / %.4f/ \n'
% (epoch, i, loss_meter_disc.avg, loss_meter_gen.avg,
out_meter_disc_r.avg, out_meter_disc_f.avg, out_meter_disc_g.avg))
fd.close()
misc.plot_samples_from_images(img_fake, batch_s, args.img_dir, 'img_e{}b{}.jpg'.format(epoch, i))
torch.save({
'epoch': epoch,
'state_dict_gen': gen.state_dict(),
'state_dict_disc': disc.state_dict(),
'optimizer_gen': optim_gen.state_dict(),
'optimizer_disc': optim_disc.state_dict()
},
args.ckpt_dir + 'checkpoint_e{}b{}.pt'.format(epoch, i))
loss_meter_gen = AverageMeter()
loss_meter_disc = AverageMeter()
out_meter_disc_f = AverageMeter()
out_meter_disc_r = AverageMeter()
out_meter_disc_g = AverageMeter()
def test(args):
raise NotImplementedError
class AverageMeter(object):
'''
from https://github.com/pytorch/examples/blob/master/imagenet/main.py.
Computes and stores the average and current values
'''
def __init__(self):
self.reset()
def reset(self):
self.val = 0.0
self.avg = 0.0
self.sum = 0.0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def main(args):
if args.train:
train(args)
else:
test(args)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Pytorch implementation of DCGAN')
parser.add_argument('--base_fm_n', default=64, type=int, help='The number of base FM')
parser.add_argument('--learning_rate', '-lr', default=0.0002, type=float, help='Learning rate for Adam')
parser.add_argument('--beta1', default=0.5, type=float, help='Beta1 for Adam')
parser.add_argument('--epochs', default=1000, type=int, help='Total epoch')
parser.add_argument('--dataset', default='celeba', type=str, help='Dataset name: MNIST, CelebA, LSUN or imagenet')
parser.add_argument('--z_dim', default=100, type=int, help='Dimension of z')
parser.add_argument('--resume', default='', type=str, help='Name of previouse checkpoint file (defalut: None)')
parser.add_argument('--img_dir', default='/export/scratch/a/choi574/saved_model/gan_face/plots/', type=str, help='Directory to save test plots')
parser.add_argument('--ckpt_dir', default='/export/scratch/a/choi574/saved_model/gan_face/', type=str, help='Name of previouse checkpoint dir')
parser.add_argument('--batch_s', default=128, type=int, help='Size of batch')
parser.add_argument('--img_s', default=64, type=int, help='Size of Image')
parser.add_argument('--layer_n', default=4, type=int, help='The number of layers')
parser.add_argument('--train', default=True, type=misc.str2bool, help='Train or generate')
parser.add_argument('--log_term', default=10, type=int, help='log recording term (save every N batch)')
args = parser.parse_args()
main(args)
|
py | 1a36852c6b709ba42523d7f15b7cc842b41e8522 | motor_list = {
}
led_list = {
} |
py | 1a36862930e500d775685edd65dad61eae19c9d3 | """Read and write notebooks as regular .py files.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from .rwbase import NotebookReader, NotebookWriter
from .nbbase import new_code_cell, new_text_cell, new_worksheet, new_notebook
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
class PyReaderError(Exception):
pass
class PyReader(NotebookReader):
def reads(self, s, **kwargs):
return self.to_notebook(s,**kwargs)
def to_notebook(self, s, **kwargs):
lines = s.splitlines()
cells = []
cell_lines = []
state = u'codecell'
for line in lines:
if line.startswith(u'# <nbformat>'):
pass
elif line.startswith(u'# <codecell>'):
cell = self.new_cell(state, cell_lines)
if cell is not None:
cells.append(cell)
state = u'codecell'
cell_lines = []
elif line.startswith(u'# <htmlcell>'):
cell = self.new_cell(state, cell_lines)
if cell is not None:
cells.append(cell)
state = u'htmlcell'
cell_lines = []
elif line.startswith(u'# <markdowncell>'):
cell = self.new_cell(state, cell_lines)
if cell is not None:
cells.append(cell)
state = u'markdowncell'
cell_lines = []
else:
cell_lines.append(line)
if cell_lines and state == u'codecell':
cell = self.new_cell(state, cell_lines)
if cell is not None:
cells.append(cell)
ws = new_worksheet(cells=cells)
nb = new_notebook(worksheets=[ws])
return nb
def new_cell(self, state, lines):
if state == u'codecell':
input = u'\n'.join(lines)
input = input.strip(u'\n')
if input:
return new_code_cell(input=input)
elif state == u'htmlcell':
text = self._remove_comments(lines)
if text:
return new_text_cell(u'html',source=text)
elif state == u'markdowncell':
text = self._remove_comments(lines)
if text:
return new_text_cell(u'markdown',source=text)
def _remove_comments(self, lines):
new_lines = []
for line in lines:
if line.startswith(u'#'):
new_lines.append(line[2:])
else:
new_lines.append(line)
text = u'\n'.join(new_lines)
text = text.strip(u'\n')
return text
def split_lines_into_blocks(self, lines):
if len(lines) == 1:
yield lines[0]
raise StopIteration()
import ast
source = '\n'.join(lines)
code = ast.parse(source)
starts = [x.lineno-1 for x in code.body]
for i in range(len(starts)-1):
yield '\n'.join(lines[starts[i]:starts[i+1]]).strip('\n')
yield '\n'.join(lines[starts[-1]:]).strip('\n')
class PyWriter(NotebookWriter):
def writes(self, nb, **kwargs):
lines = []
lines.extend([u'# <nbformat>2</nbformat>',''])
for ws in nb.worksheets:
for cell in ws.cells:
if cell.cell_type == u'code':
input = cell.get(u'input')
if input is not None:
lines.extend([u'# <codecell>',u''])
lines.extend(input.splitlines())
lines.append(u'')
elif cell.cell_type == u'html':
input = cell.get(u'source')
if input is not None:
lines.extend([u'# <htmlcell>',u''])
lines.extend([u'# ' + line for line in input.splitlines()])
lines.append(u'')
elif cell.cell_type == u'markdown':
input = cell.get(u'source')
if input is not None:
lines.extend([u'# <markdowncell>',u''])
lines.extend([u'# ' + line for line in input.splitlines()])
lines.append(u'')
lines.append('')
return unicode('\n'.join(lines))
_reader = PyReader()
_writer = PyWriter()
reads = _reader.reads
read = _reader.read
to_notebook = _reader.to_notebook
write = _writer.write
writes = _writer.writes
|
py | 1a36870b9dcf80a6a40cd86a89540d0ea0a58f92 | """This class provides an iterator. Under the covers it does multi-threaded
consumption of events, only providing information to the iterator when it's
been ordered correctly."""
import cloudpassage
class HaloEvents(object):
"""Instantiate with a donlib.ConfigHelper() object as an argument."""
def __init__(self, config):
self.halo_key = config.halo_api_key
self.halo_secret = config.halo_api_secret_key
self.halo_api_host = config.halo_api_host
self.halo_api_port = config.halo_api_port
self.ua = config.ua
self.start_timestamp = self.starting_event_time()
print("Event Collector: Starting timestamp: " + self.start_timestamp)
def __iter__(self):
"""This allows us to iterate through the events stream."""
session = self.build_halo_session()
streamer = cloudpassage.TimeSeries(session, self.start_timestamp,
"/v1/events", "events")
while True:
try:
for event in streamer:
yield event
except IndexError:
pass
def starting_event_time(self):
session = self.build_halo_session()
api = cloudpassage.HttpHelper(session)
url = "/v1/events?sort_by=created_at.desc&per_page=1"
resp = api.get(url)
return resp['events'][0]['created_at']
def build_halo_session(self):
"""This creates the halo session object for API interaction."""
session = cloudpassage.HaloSession(self.halo_key, self.halo_secret,
api_host=self.halo_api_host,
api_port=self.halo_api_port,
integration_string=self.ua)
return session
|
py | 1a3687d4817c1c12f850d0fb22bc9572ac258851 | import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import matplotlib.pyplot as plt
import numpy as np
import time
# Baseline: VGG-11 finetuning
def return_baseline():
net = torchvision.models.vgg11_bn(pretrained=True)
for param in net.parameters():
param.requires_grad = False
# Reshape to 5 classes...
num_in = net.classifier[6].in_features
net.classifier[6] = nn.Linear(num_in, 5)
return net
# Draw images
def show(images):
images = images * 255.0 # denormalize
np_images = images.numpy()
print(np_images.shape)
plt.imshow(np.transpose(np_images, (1, 2, 0)))
plt.show()
# Show random images
def visualize(loader, categories):
temp = iter(loader)
images, labels = temp.next()
show(torchvision.utils.make_grid(images))
print(' '.join('%5s' % categories[labels[j]] for j in range(labels.size(0))))
# Load data
def init(batch_size):
transform_train = transforms.Compose(
[transforms.Resize((256, 256)),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0, 0, 0), (255.0, 255.0, 255.0))]) # !!! Order matters
transform_dev = transforms.Compose(
[transforms.Resize((256, 256)),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize((0, 0, 0), (255.0, 255.0, 255.0))]) # !!! Order matters
train_set = torchvision.datasets.ImageFolder(root="../data/train", transform=transform_train)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size,
shuffle=True, num_workers=4)
dev_set = torchvision.datasets.ImageFolder(root="../data/dev", transform=transform_dev)
dev_loader = torch.utils.data.DataLoader(dev_set, batch_size=batch_size,
shuffle=False, num_workers=4)
print(train_set.classes)
print(dev_set.classes)
categories = ('cbb', 'cbsd', 'cgm', 'cmd', 'healthy')
return train_loader, dev_loader, categories
# Train data
def train(num_epochs, loader, device, optimizer, criterion, net):
for epoch in range(num_epochs):
running_loss = 0.0
time_now = time.time()
correct = 0
total = 0
for i, data in enumerate(loader, 0):
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
outputs = net(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 100 == 99:
print('[%d, %d] loss: %.4f' % (epoch + 1, i + 1, running_loss / 100))
running_loss = 0.0
print('Epoch time: %.2fs' % (time.time() - time_now))
print('Train acc: %f' % (100 * correct / total))
# Test
def inference(loader, device, net):
correct = 0
total = 0
with torch.no_grad():
for data in loader:
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Test acc: %f' % (100 * correct / total))
|
py | 1a3687dcf5362e44ec173c3bdf0043030935de65 | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
from keras.models import model_from_json
import math
from sklearn.metrics import mean_squared_error
import requests
from textblob import TextBlob
#dataset_main = pd.read_csv('Google_Stock_Price_Train.csv')
#dataset = dataset_main.iloc[0:1259, 1:2].values
file = open("Stocks/bmi.us.txt", "r")
dataset = [[]]
count = 0
for line in file:
tokens = line.split(',')
array = [0]
if count > 0 :
array[0] = float(tokens[1])
dataset.insert(count,array)
count = count + 1
#print (count)
dataset.pop(0)
#print (dataset)
sc = MinMaxScaler(feature_range = (0, 1))
dataset_scaled = sc.fit_transform(dataset)
def train():
#training_set = dataset.iloc[0:4001, 2:3].values
#training_set_scaled = sc.fit_transform(training_set)
plt.plot(dataset, color = 'blue', label = 'Price')
plt.title('Price')
plt.xlabel('Time')
plt.ylabel('Price')
plt.legend()
plt.show()
X_train = []
y_train = []
X_train = dataset_scaled[0:2899]
y_train = dataset_scaled[1:2900]
plt.plot(X_train, color = 'red', label = 'Scaled Price')
plt.title('Scaled Price')
plt.xlabel('Time')
plt.ylabel('Price')
plt.legend()
plt.show()
X_train, y_train = np.array(X_train), np.array(y_train)
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
regressor = Sequential()
regressor.add(LSTM(units = 4, activation = 'sigmoid', input_shape = (None, 1)))
regressor.add(Dense(units = 1))
regressor.compile(optimizer = 'adam', loss = 'mean_squared_error')
regressor.fit(X_train, y_train, epochs = 200, batch_size = 32)
model_json = regressor.to_json()
with open("modelTempantm.json", "w") as json_file:
json_file.write(model_json)
regressor.save_weights("modelTempantm.h5")
print("Saved model to disk")
def load():
test_set = dataset[2900:3000]
#test_set_scaled = sc.transform(test_set)
test_set_scaled = dataset_scaled[2900:3000]
json_file = open('modelTempantm.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("modelTempantm.h5")
print("Loaded model from disk")
test_set_reshaped = np.reshape(test_set_scaled, (test_set_scaled.shape[0], test_set_scaled.shape[1], 1))
predicted_temprature = loaded_model.predict(test_set_reshaped)
predicted_temprature = sc.inverse_transform(predicted_temprature)
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 12
fig_size[1] = 5
plt.rcParams["figure.figsize"] = fig_size
plt.plot(predicted_temprature, color = 'blue', label = 'Predicted Price')
plt.plot(test_set, color = 'red', label = 'Real Price')
plt.title('Price Prediction')
plt.xlabel('Time')
plt.ylabel('Price')
plt.legend()
plt.show()
rmse = math.sqrt(mean_squared_error(test_set, predicted_temprature)) / 10
print (rmse)
def prediction():
#test_set = dataset_main.iloc[4001:4101, 2:3].values
#test_set_scaled = sc.transform(test_set)
test_set_scaled = dataset_scaled[3000:3010]
json_file = open('modelTempantm.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("modelTempantm.h5")
test_set_reshaped = np.reshape(test_set_scaled, (test_set_scaled.shape[0], test_set_scaled.shape[1], 1))
predicted_temprature = loaded_model.predict(test_set_reshaped)
predicted_temprature = sc.inverse_transform(predicted_temprature)
#print(predicted_temprature)
return predicted_temprature
def senti():
url = ('https://newsapi.org/v2/everything?q=%20bmi%20stock%20market&apiKey=6e593f373865401e803d6874594f9063')
response = requests.get(url)
#print (response.json())
parsed_json = response.json()
#print(parsed_json['status'])
array = parsed_json['articles']
polarity = 0.0;
count = 0;
for i in array:
#print(i['description'])
blob = TextBlob(i['description'])
count = count + 1
polarity = polarity + blob.sentiment.polarity
polarity = polarity / count
#print(polarity)
return polarity
def run():
print('Prediction of bmi Stock Price in Next 10 Days :')
p = prediction()
s = senti()
print("Date Price")
d = 10
m = 1
y = 2019
for i in range(0,9):
if (d == 31):
d = 1;
m += 1;
if (m == 13):
m = 1;
print(str(d) + "-" + str(m) + "-"+ str(y)+": "+ str(p[i][0]))
d += 1
print('news polarity : ' + str(s))
if s > 0.5 :
print('User Demand Is Very High')
elif s > 0:
print('User Demand Is High')
elif s < -0.5:
print('User Demand Is Very Low')
elif s < 0:
print('User Demand IS Low')
|
py | 1a3687de75b7b7338b2d74235c0f75ddfe44a428 | import io
import xlsxwriter
from . import models
from sales import models as s_models, exports as s_exports
from datetime import datetime
import decimal
def date_parse(str, fmt='%Y-%m-%d %H:%M:%S'):
if str == None:
return None
return datetime.strptime(str, fmt)
def cell(i, j):
char = "A"
char = chr(ord(char[0]) + j - 1)
return f'{char}{i}'
def date_fmt(date):
if date == None:
return None
return date.strftime("%d/%m/%Y")
# def get_refs(sale):
# exit_ref = None
# c2_ref = None
# assessment_ref = None
# exit_doc = s_models.Document.objects.filter(doc_type=s_models.Document.DOC_EXIT, sale=sale).first()
# if exit_doc:
# exit_ref = exit_doc.ref_number
# assessment_doc = s_models.Document.objects.filter(doc_type=s_models.Document.DOC_ASSESSMENT, sale=sale).first()
# if assessment_doc:
# assessment_ref = assessment_doc.ref_number
# c2_doc = s_models.Document.objects.filter(doc_type=s_models.Document.DOC_C2, sale=sale).first()
# if c2_doc:
# c2_ref = c2_doc.ref_number
# return [c2_ref, assessment_ref, exit_ref]
def export_report_inv_details(request, sales):
output = io.BytesIO()
workbook = xlsxwriter.Workbook(output)
main = workbook.add_worksheet("Report")
headers = ['ID', 'TRANS_DATE', 'CUSTOMER', 'DELIVERY NOTE', 'VEH#',
'TAX INVOICE', 'SO#', 'PRODUCT', 'QTY(TONS)', 'DESTINATION', 'VEH# TRAILER', 'AGENT', 'C2', 'ASSESSMENT', 'EXIT', 'RATE/T', 'TOTAL VALUE EX VAT', 'VAT AMOUNT 18%', 'TOTAL VALUE INC VAT', 'INV NUMBER', 'ASSIGN#'
]
rows = []
for prj in sales:
comm_amt = prj.quantity2 * prj.invoice.commission
row = []
row.append(prj.id)
row.append(date_fmt(date_parse(prj.transaction_date)))
row.append(prj.customer_name)
row.append(prj.delivery_note)
row.append(prj.vehicle_number)
row.append(prj.tax_invoice)
row.append(prj.sales_order)
row.append(prj.product_name)
row.append(float(prj.quantity2))
row.append(prj.destination)
row.append(prj.vehicle_number_trailer)
row.append(prj.agent.code if prj.agent else 'None')
row.extend(s_exports.get_refs(prj))
row.append(float(prj.invoice.commission))
row.append(float(comm_amt))
row.append(float(comm_amt * decimal.Decimal(0.18)))
row.append(float(comm_amt*decimal.Decimal(1.18)))
row.append(prj.invoice.number)
row.append(prj.assign_no)
rows.append(row)
for j, col in enumerate(headers, start=1):
main.write(f'{cell(1, j)}', col)
for i, row in enumerate(rows, start=2):
for j, col in enumerate(row, start=1):
main.write(f'{cell(i, j)}', col)
workbook.close()
xlsx_data = output.getvalue()
return xlsx_data
def export_invoices(request, invoices):
output = io.BytesIO()
workbook = xlsxwriter.Workbook(output)
main = workbook.add_worksheet("Report")
headers = ['ID', 'INVOICE NO', 'PHY INVOICE NO', 'RATE', 'AGENT',
'QUANTITY(TONS)', 'VALUE(TZS)', 'VALUE(VAT INCL.)', 'STATUS']
rows = []
def get_phy_num(inv):
d = inv.docs.first()
ref_num = int(d.ref_number) if d else None
return ref_num
for prj in invoices:
row = []
row.append(prj.id)
row.append(prj.number)
row.append(get_phy_num(prj))
row.append(prj.commission)
row.append(prj.agent.code)
row.append(prj.quantity)
row.append(prj.value)
row.append(prj.value * decimal.Decimal(1.18))
row.append('Pending' if prj.status == 0 else 'Completed')
rows.append(row)
for j, col in enumerate(headers, start=1):
main.write(f'{cell(1, j)}', col)
for i, row in enumerate(rows, start=2):
for j, col in enumerate(row, start=1):
main.write(f'{cell(i, j)}', col)
workbook.close()
xlsx_data = output.getvalue()
return xlsx_data
|
py | 1a36882e08ba9af15494b19e8d0d770de6adeb05 |
import sys
import tarfile
from urllib.request import urlretrieve
import logging
import time
from pathlib import Path
from collections import defaultdict
logger = logging.getLogger(__name__)
MODEL_DIRECTORY = Path(__file__).parent / 'models'
MODELS = {
'en': (
'chainer',
'tri_headfirst',
'1mxl1HU99iEQcUYhWhvkowbE4WOH0UKxv',
MODEL_DIRECTORY / 'config_en.json'
),
'en[elmo]': (
'allennlp',
'lstm_parser_elmo',
'1UldQDigVq4VG2pJx9yf3krFjV0IYOwLr',
MODEL_DIRECTORY / 'config_en.json'
),
'en[rebank]': (
'allennlp',
'lstm_parser_char_rebanking',
'1Az840uCW8QuAkNCZq_Y8VOkW5j0Vtcj9',
MODEL_DIRECTORY / 'config_rebank.json'
),
'en[elmo_rebank]': (
'allennlp',
'lstm_parser_elmo_rebanking',
'1deyCjSgCuD16WkEhOL3IXEfQBfARh_ll',
MODEL_DIRECTORY / 'config_rebank.json'
),
'ja': (
'chainer',
'ja_headfinal',
'1bblQ6FYugXtgNNKnbCYgNfnQRkBATSY3',
MODEL_DIRECTORY / 'config_ja.json'
)
}
AVAILABLE_MODEL_VARIANTS = defaultdict(list)
for model in MODELS:
if '[' in model and ']' in model:
assert model[-1] == ']'
lang, variant = model[:-1].split('[')
AVAILABLE_MODEL_VARIANTS[lang].append(variant)
SEMANTIC_TEMPLATES = {
'en': MODEL_DIRECTORY / 'semantic_templates_en_event.yaml',
'ja': MODEL_DIRECTORY / 'semantic_templates_ja_event.yaml'
}
def reporthook(count, block_size, total_size):
global start_time
if count == 0:
start_time = time.time()
return
duration = time.time() - start_time
progress_size = int(count * block_size)
speed = int(progress_size / (1024 * duration))
percent = min(int(count * block_size * 100 / total_size), 100)
sys.stdout.write("\r...%d%%, %d MB, %d KB/s, %d seconds passed" %
(percent, progress_size / (1024 * 1024), speed, duration))
sys.stdout.flush()
def download(lang, variant):
model_name = f'{lang}[{variant}]' if variant else lang
framework, basename, url, _ = MODELS[model_name]
from google_drive_downloader import GoogleDriveDownloader as gdd
logging.info(f'start downloading from {url}')
filename = (MODEL_DIRECTORY / basename).with_suffix('.tar.gz')
gdd.download_file_from_google_drive(file_id=url,
dest_path=filename,
unzip=False,
overwrite=True)
if framework == 'chainer':
logging.info(f'extracting files')
tf = tarfile.open(filename)
tf.extractall(MODEL_DIRECTORY)
logging.info(f'finished')
def load_model_directory(model_name):
framework, basename, _, config = MODELS[model_name]
model_path = MODEL_DIRECTORY / basename
if framework == 'allennlp':
model_path = model_path.with_suffix('.tar.gz')
if not model_path.exists():
lang, variant = model_name[:-1].split('[')
raise RuntimeError(f'please download the model by doing \'depccg_{lang} download VARIANT\'.')
return model_path, config
def model_is_available(model_name):
return model_name in list(MODELS.keys())
|
py | 1a3688935264358745e7984cec674a5165b74a2d | from pytorch_lightning.callbacks import ModelCheckpoint
import os
from argparse import ArgumentParser
import os
import gc
import datetime
import numpy as np
import pandas as pd
import numpy as np
import torch
import pytorch_lightning as pl
from lightning_module import LightningModel
from pytorch_lightning.loggers.tensorboard import TensorBoardLogger
SEED = 1
torch.manual_seed(SEED)
np.random.seed(SEED)
def main(hparams):
"""
testing routine
Args:
hparams: checkpoint of the model to be tested and gpu, parallel backend etc.,
defined in the argument parser in if __name__ == '__main__':
Returns:
"""
checkpoint_path=hparams.ckp
model = LightningModel.load_from_checkpoint(
checkpoint_path=checkpoint_path,
tags_csv= hparams.hparams,
)
trainer = pl.Trainer(
gpus=[hparams.first_gpu+el for el in range(hparams.gpus)],
distributed_backend=hparams.distributed_backend,
)
trainer.test(model)
if __name__ == '__main__':
root_dir = os.path.dirname(os.path.realpath(__file__))
parent_parser = ArgumentParser(add_help=False)
parent_parser.add_argument(
'--gpus',
type=int,
default=4,
help='how many gpus'
)
parent_parser.add_argument(
'--distributed_backend',
type=str,
default='ddp',
help='supports three options dp, ddp, ddp2'
)
parent_parser.add_argument(
'--amp_optimization',
type=str,
default='00',
help="mixed precision format, default 00 (32), 01 mixed, 02 closer to 16, should not be used during testing"
)
parent_parser.add_argument(
'--first-gpu',
type=int,
default=0,
help='gpu number to use [first_gpu, ..., first_gpu+gpus]'
)
parent_parser.add_argument(
'--ckp',
type=str,
default='',
help='ckp path, if left empty no checkpoint is used'
)
parent_parser.add_argument(
'--hparams',
type=str,
default='',
help='path for hparams of ckp if left empty no checkpoint is used'
)
parent_parser.add_argument("--test",
action="store_true",
help="whether to train or test"
)
# each LightningModule defines arguments relevant to it
parser = LightningModel.add_model_specific_args(parent_parser)
hyperparams = parser.parse_args()
print(hyperparams)
main(hyperparams)
|
py | 1a3688c4550a5f2da6762bec8ffec114bda6c620 | # -*- coding: utf-8 -*-
# file: file_utils.py
# time: 2021/7/13 0020
# author: yangheng <[email protected]>
# github: https://github.com/yangheng95
# Copyright (C) 2021. All Rights Reserved.
import copy
import json
import os
import pickle
import urllib.request
import torch
from findfile import find_files, find_dir, find_cwd_file
from google_drive_downloader import GoogleDriveDownloader as gdd
from pyabsa.core.atepc.dataset_utils.atepc_utils import split_text
from termcolor import colored
from pyabsa import __version__
# convert atepc_datasets in this repo for inferring_tutorials
from pyabsa.functional.dataset import DatasetItem
from pyabsa.utils.pyabsa_utils import save_args
def generate_inference_set_for_apc(dataset_path):
if isinstance(dataset_path, DatasetItem):
dataset_path = dataset_path.dataset_name
elif not os.path.exists(dataset_path):
dataset_path = os.getcwd()
train_datasets = find_files(dataset_path, ['dataset', 'train', 'apc'], exclude_key='.inference')
test_datasets = find_files(dataset_path, ['dataset', 'test', 'apc'], exclude_key='.inference')
for file in train_datasets + test_datasets:
try:
fin = open(file, 'r', newline='\n', encoding='utf-8')
lines = fin.readlines()
fin.close()
path_to_save = file + '.inference'
fout = open(path_to_save, 'w', encoding='utf-8', newline='\n', errors='ignore')
for i in range(0, len(lines), 3):
sample = lines[i].strip().replace('$T$', '[ASP]{}[ASP]'.format(lines[i + 1].strip()))
fout.write(sample + ' !sent! ' + lines[i + 2].strip() + '\n')
fout.close()
except:
print('Unprocessed file:', file)
print('save in: {}'.format(path_to_save))
print('process finished')
def is_similar(s1, s2):
count = 0.0
for token in s1.split(' '):
if token in s2:
count += 1
if count / len(s1.split(' ')) >= 0.8 and count / len(s2.split(' ')) >= 0.8:
return True
else:
return False
def assemble_aspects(fname):
fin = open(fname, 'r', encoding='utf-8', newline='\n', errors='ignore')
lines = fin.readlines()
fin.close()
for i in range(len(lines)):
if i % 3 == 0 or i % 3 == 1:
lines[i] = ' '.join(split_text(lines[i].strip())).replace('$ t $', '$T$')
else:
lines[i] = lines[i].strip()
def unify_same_samples(same_samples):
text = same_samples[0][0].replace('$T$', same_samples[0][1])
polarities = [-999] * len(text.split())
tags = ['O'] * len(text.split())
samples = []
for sample in same_samples:
# print(sample)
polarities_tmp = copy.deepcopy(polarities)
try:
asp_begin = (sample[0].split().index('$T$'))
asp_end = sample[0].split().index('$T$') + len(sample[1].split())
for i in range(asp_begin, asp_end):
polarities_tmp[i] = sample[2]
if i - sample[0].split().index('$T$') < 1:
tags[i] = 'B-ASP'
else:
tags[i] = 'I-ASP'
samples.append([text, tags, polarities_tmp])
except:
print('Ignore Error:', sample[0])
return samples
samples = []
aspects_in_one_sentence = []
for i in range(0, len(lines), 3):
lines[i] = lines[i].replace('$T$', ' $T$ ').replace(' ', ' ')
if len(aspects_in_one_sentence) == 0:
aspects_in_one_sentence.append([lines[i], lines[i + 1], lines[i + 2]])
continue
if is_similar(aspects_in_one_sentence[-1][0], lines[i]):
aspects_in_one_sentence.append([lines[i], lines[i + 1], lines[i + 2]])
else:
samples.extend(unify_same_samples(aspects_in_one_sentence))
aspects_in_one_sentence = []
aspects_in_one_sentence.append([lines[i], lines[i + 1], lines[i + 2]])
samples.extend(unify_same_samples(aspects_in_one_sentence))
return samples
def split_aspects(sentence):
single_aspect_with_contex = []
aspect_num = len(sentence[1].split("|"))
aspects = sentence[1].split("|")
polarity = sentence[2].split("|")
pre_position = 0
aspect_context = sentence[0]
for i in range(aspect_num):
aspect_context = aspect_context.replace("$A$", aspects[i], 1)
single_aspect_with_contex.append(
(aspect_context[pre_position:aspect_context.find("$A$")], aspects[i], polarity[i]))
pre_position = aspect_context.find(aspects[i]) + len(aspects[i]) + 1
return single_aspect_with_contex
def convert_atepc(fname):
print('converting:', fname)
dist_fname = fname.replace('apc_datasets', 'atepc_datasets') + '.atepc'
lines = []
samples = assemble_aspects(fname)
for sample in samples:
for token_index in range(len(sample[1])):
token, label, polarity = sample[0].split()[token_index], sample[1][token_index], sample[2][token_index]
lines.append(token + " " + label + " " + str(polarity))
lines.append('\n')
# 写之前,先检验文件是否存在,存在就删掉
if os.path.exists(dist_fname):
os.remove(dist_fname)
fout = open(dist_fname, 'w', encoding='utf8')
for line in lines:
fout.writelines((line + '\n').replace('\n\n', '\n'))
fout.close()
# 将数据集中的aspect切割出来
def convert_apc_set_to_atepc_set(path):
if isinstance(path, DatasetItem):
path = path.dataset_name
if not os.path.exists(path):
files = find_files(os.getcwd(), [path, 'dataset', 'apc'], exclude_key='.inference')
else:
files = find_files(path, '', exclude_key='infer')
print('Find datasets files at {}:'.format(path))
for f in files:
print(f)
for target_file in files:
if not (target_file.endswith('.inference') or target_file.endswith('.atepc')):
try:
convert_atepc(target_file)
except:
print('failed to process"{}'.format(target_file))
else:
print('Ignore ', target_file)
print('finished')
# 将数据集中的aspect切割出来
def refactor_chinese_dataset(fname, train_fname, test_fname):
lines = []
samples = assemble_aspects(fname)
positive = 0
negative = 0
sum = 0
# refactor testset
for sample in samples[:int(len(samples) / 5)]:
for token_index in range(len(sample[1])):
token, label, polarty = sample[0].split()[token_index], sample[1][token_index], sample[2][token_index]
lines.append(token + " " + label + " " + str(polarty))
lines.append('\n')
if 1 in sample[2]:
positive += 1
else:
negative += 1
sum += 1
print(train_fname + f"sum={sum} positive={positive} negative={negative}")
if os.path.exists(test_fname):
os.remove(test_fname)
fout = open(test_fname, 'w', encoding='utf8')
for line in lines:
fout.writelines((line + '\n').replace('\n\n', '\n'))
fout.close()
positive = 0
negative = 0
sum = 0
# refactor trainset
for sample in samples[int(len(samples) / 5):]:
for token_index in range(len(sample[1])):
tokens = sample[0].split()
token, label, polarty = sample[0].split()[token_index], sample[1][token_index], sample[2][token_index]
lines.append(token + " " + label + " " + str(polarty))
lines.append('\n')
if 1 in sample[2]:
positive += 1
else:
negative += 1
sum += 1
print(train_fname + f"sum={sum} positive={positive} negative={negative}")
if os.path.exists(train_fname):
os.remove(train_fname)
fout = open(train_fname, 'w', encoding='utf8')
for line in lines:
fout.writelines((line + '\n').replace('\n\n', '\n'))
fout.close()
def detect_error_in_dataset(dataset):
f = open(dataset, 'r', encoding='utf8')
lines = f.readlines()
for i in range(0, len(lines), 3):
# print(lines[i].replace('$T$', lines[i + 1].replace('\n', '')))
if i + 3 < len(lines):
if is_similar(lines[i], lines[i + 3]) and len((lines[i] + " " + lines[i + 1]).split()) != len(
(lines[i + 3] + " " + lines[i + 4]).split()):
print(lines[i].replace('$T$', lines[i + 1].replace('\n', '')))
print(lines[i + 3].replace('$T$', lines[i + 4].replace('\n', '')))
def save_model(opt, model, tokenizer, save_path):
if not opt.save_mode:
return
# Save a trained model, configuration and tokenizer
if hasattr(model, 'module') or hasattr(model, 'core'):
# print("save model from data-parallel!")
model_to_save = model.module
else:
# print("save a single cuda model!")
model_to_save = model
if opt.save_mode == 1 or opt.save_mode == 2:
if not os.path.exists(save_path):
os.makedirs(save_path)
f_config = open(save_path + opt.model_name + '.config', mode='wb')
f_tokenizer = open(save_path + opt.model_name + '.tokenizer', mode='wb')
pickle.dump(opt, f_config)
pickle.dump(tokenizer, f_tokenizer)
f_config.close()
f_tokenizer.close()
save_args(opt, save_path + opt.model_name + '.args.txt')
if opt.save_mode == 1:
torch.save(model_to_save.state_dict(), save_path + opt.model_name + '.state_dict') # save the state dict
elif opt.save_mode == 2:
torch.save(model.cpu(), save_path + opt.model_name + '.model') # save the state dict
elif opt.save_mode == 3:
# save the fine-tuned bert model
model_output_dir = save_path + 'fine-tuned-pretrained-model'
if not os.path.exists(model_output_dir):
os.makedirs(model_output_dir)
output_model_file = os.path.join(model_output_dir, 'pytorch_model.bin')
output_config_file = os.path.join(model_output_dir, 'config.json')
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.bert.config.to_json_file(output_config_file)
tokenizer.tokenizer.save_vocabulary(model_output_dir)
else:
raise ValueError('Invalid save_mode: {}'.format(opt.save_mode))
model.to(opt.device)
def check_update_log():
print(colored('check update log at https://github.com/yangheng95/PyABSA/blob/release/release-note.json', 'red'))
def query_remote_version():
try:
dataset_url = 'https://raw.githubusercontent.com/yangheng95/ABSADatasets/v1.2/datasets/__init__.py'
content = urllib.request.urlopen(dataset_url, timeout=5)
content = content.read().decode("utf-8").split('\'')
version = content[-2]
except Exception as e:
return 'N.A.'
return version
def query_local_version():
try:
fin = open(find_cwd_file(['__init__.py', 'integrated_datasets']))
local_version = fin.read().split('\'')[-2]
fin.close()
except:
return 'N.A.'
return local_version
def check_dataset(): # retry_count is for unstable conn to GitHub
try:
local_version = query_local_version()
remote_version = query_remote_version()
print('Remote ABSADataset version: {} Local ABSADatasets version: {}'.format(remote_version, local_version))
if remote_version == 'N.A.':
print('Unknown remote version for ABSADatasets, please check the latest version of ABSADatasets')
elif local_version == 'N.A.':
print('Unknown local version for ABSADatasets, please check the latest version of ABSADatasets')
elif remote_version > local_version:
print(colored('There is a new version of ABSADatasets({}), please remove the downloaded datasets to automatically download the new version.'.format(remote_version), 'green'))
except Exception as e:
print(colored('ABSADatasets version check failed: {}, please check the latest datasets on GitHub manually.'.format(e), 'red'))
|
py | 1a36891dbbad7eff612c8e0d63e269be225f869f | """
Histogram data and expected results
"""
tiny = \
[(10, 30, '[10,30)'),
(20, 60, '[20,60)'),
(40, 50, '[40,50)'),
(10, 40, '[10,40)')]
large_count = \
[(-50.0, 37.8, 10),
(37.8, 125.6, 12),
(125.6, 213.4, 11),
(213.4, 301.2, 13),
(301.2, 389.0, 10),
(389.0, 476.8, 9),
(476.8, 564.6, 10),
(564.6, 652.4, 11),
(652.4, 740.2, 11),
(740.2, 828.0, 9)]
large_utilization = \
[(-50.0, 37.8, 0.79),
(37.8, 125.6, 0.68),
(125.6, 213.4, 0.55),
(213.4, 301.2, 0.76),
(301.2, 389.0, 0.73),
(389.0, 476.8, 0.62),
(476.8, 564.6, 0.86),
(564.6, 652.4, 0.57),
(652.4, 740.2, 0.75),
(740.2, 828.0, 0.51)]
|
py | 1a368bb3550f09bee46c1a49e43ec67992ac0679 | # Generated by Django 2.0.5 on 2018-07-22 15:10
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clients', '0001_initial'),
('travels', '0001_initial'),
('drivers', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='driver',
name='clients',
field=models.ManyToManyField(through='travels.Travel', to='clients.Client'),
),
migrations.AlterField(
model_name='timeavail',
name='start_time',
field=models.CharField(default=datetime.datetime(2018, 7, 22, 17, 10, 6, 273621), max_length=5),
),
]
|
py | 1a368c2c84b4e72bf5b38c31e803363ed4907499 | # Copyright (c) 2015-2020, Swiss Federal Institute of Technology (ETH Zurich)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""Plotting support for experimental runs"""
import typing as t
import matplotlib.pyplot as plt
import numpy as np
import pandas
import seaborn as sns
from exot.experiment._base import Run
from exot.experiment.frequency_sweep import FrequencySweepRun
from exot.experiment.performance import PerformanceRun
from exot.util.attributedict import AttributeDict
from exot.util.plotting import add_spine, remove_spine, rugplot
from exot.util.scinum import is_fitted, unpack_array
from ._base import Plotter
__all__ = ("RunPlotter", "PerformanceRunPlotter", "FrequencySweepRunPlotter")
class RunPlotter(Plotter):
"""Plotting support for Run instances
Attributes:
PLOT_FILENAMES (dict): Default filenames for supported plots
"""
PLOT_FILENAMES = {}
def __init__(self, run, *args, **kwargs):
if not run.digested and run.ingested:
raise ValueError("Plotter requires ingested and digested Run's")
self._run = run
if "save_path" not in kwargs:
kwargs["save_path"] = self.run.path
super().__init__(*args, **kwargs)
@property
def run(self) -> Run:
return self._run
def _raw_plot_helper(
self,
source: pandas.DataFrame,
start: t.Optional[float] = None,
end: t.Optional[float] = None,
dim_count: t.Optional[int] = None,
):
SUBPLOT_HEIGHT = 1 # inches
timestamps = source.iloc[:, 0]
data = source.iloc[:, 1:]
dims = data.shape[1]
dims_to_plot = data.shape[1]
if dim_count is not None:
if dim_count > dims:
raise ValueError(f"dim_count ({dim_count}) > dims ({dims})")
dims_to_plot = dim_count
start = start if start else timestamps.iloc[0]
end = end if end else timestamps.iloc[-1]
interval = timestamps.between(start, end)
timestamps = timestamps[interval]
data = data[interval]
# Create subplots: 3 columns, ndim rows
f, axes = plt.subplots(
dims_to_plot,
1,
figsize=(self._width, 1 + dims_to_plot * SUBPLOT_HEIGHT),
dpi=self._screen_dpi,
sharex="col",
sharey="col",
squeeze=False,
)
lower_ylim, upper_ylim = np.quantile(data, [0.01, 0.99])
for i, axis in enumerate(axes[:, 0]):
axis.plot(
timestamps, data.iloc[:, i], marker="+", markersize=2, linewidth=0.5, alpha=0.5
)
if lower_ylim < upper_ylim:
axis.set_ylim(0.975 * lower_ylim, 1.025 * upper_ylim)
axis.get_xaxis().get_major_formatter().set_useOffset(False)
axis.set_xlim(timestamps.iloc[0], timestamps.iloc[-1])
axis.set_ylabel("{}\n{}\n{} ({})".format(*data.columns[i].split(":")), color="gray")
annotations = None
if "io" in self.run.intermediates:
if np.isclose(timestamps.iloc[0], 0.0) and "src_log" in self.run.intermediates.io:
annotations = self.run.intermediates.io.src_log.iloc[[0, -1], 0]
elif (
not np.isclose(timestamps.iloc[0], 0.0)
) and "raw_src_log" in self.run.intermediates.io:
annotations = self.run.intermediates.io.raw_src_log.iloc[[0, -1], 0]
if annotations is not None:
for axis in axes.ravel():
axis.vlines(
annotations,
0,
1,
transform=axis.get_xaxis_transform(),
linewidth=1.0,
linestyle="--",
)
sns.despine()
axes[-1, 0].set_xlabel(source.columns[0], color="gray")
_title = (
"Processed raw data stream"
if np.isclose(timestamps.iloc[0], 0.0)
else "Raw data stream"
)
if dims_to_plot != dims:
_title += " [{} of {} dimensions]".format(dims_to_plot, dims)
plt.suptitle(_title, y=1.01, verticalalignment="bottom")
f.tight_layout()
return f
def plot_rawstream(
self,
start: t.Optional[float] = None,
end: t.Optional[float] = None,
dim_count: t.Optional[int] = None,
**kwargs,
):
f = self._raw_plot_helper(
self.run.i_rawstream, start=start, end=end, dim_count=dim_count
)
self._plot_save_helper(f, **kwargs)
def plot_rdpstream(
self,
start: t.Optional[float] = None,
end: t.Optional[float] = None,
dim_count: t.Optional[int] = None,
**kwargs,
):
f = self._raw_plot_helper(
self.run.i_rdpstream, start=start, end=end, dim_count=dim_count
)
self._plot_save_helper(f, **kwargs)
class FrequencySweepRunPlotter(RunPlotter):
"""Plotting support for FrequencySweepRun instances
Attributes:
PLOT_FILENAMES (dict): Default filenames for supported plots
"""
def __init__(self, run: FrequencySweepRun, *args, **kwargs):
if not isinstance(run, FrequencySweepRun):
raise TypeError("FrequencySweepRunPlotter accepts only FrequencySweepRun instances")
super().__init__(run, *args, **kwargs)
def plot_spectrum(self, window=8192):
pass
class PerformanceRunPlotter(RunPlotter):
"""Plotting support for PerformanceRun instances
Attributes:
PLOT_FILENAMES (dict): Default filenames for supported plots
"""
def __init__(self, run: PerformanceRun, *args, **kwargs):
if not isinstance(run, PerformanceRun):
raise TypeError("PerformanceRunPlotter accepts only PerformanceRun instances")
super().__init__(run, *args, **kwargs)
def plot_slicing(
self, start: int = 0, count: int = 10, dim_count: t.Optional[int] = None, **kwargs
):
SUBPLOT_HEIGHT = 1 # inches
samples_per_symbol = (
self.run.i_lnestream.shape[1]
if self.run.i_lnestream.ndim == 2
else self.run.i_lnestream.shape[2]
)
subsymbol_count = getattr(self.run.parent.layers.lne, "subsymbol_count", 1)
count = min([count, self.run.i_symstream.size])
start_idx = start
start_sample = start * samples_per_symbol
end_idx = start_idx + count
end_sample = start_sample + samples_per_symbol * (count)
selection_idx = slice(start_idx, start_idx + count)
selection_sample = slice(start_sample, end_sample)
selection_gt = slice(
start_idx * subsymbol_count, (start_idx + count + 1) * subsymbol_count
)
selection_slicing = self.run.intermediates.rdp.slicing[
slice(start_idx, start_idx + count + 1)
]
selection_raw = self.run.i_rdpstream.iloc[:, 0].between(
selection_slicing[0], selection_slicing[-1]
)
annotations = None
if "src_log" in self.run.intermediates.io:
annotations = self.run.intermediates.io.src_log.iloc[:, 0]
annotations = annotations[
annotations.between(0.99 * selection_slicing[0], 1.01 * selection_slicing[-1])
]
# Create plotting data, figures, and plot data
raw: pandas.DataFrame = self.run.i_rdpstream[selection_raw]
data: np.ndarray
gt: np.ndarray = self.run.o_lnestream[selection_gt]
# Handle 2-d and 3-d data
if self.run.i_lnestream.ndim == 2:
dims = 1
data = np.vstack(self.run.i_lnestream).reshape(
self.run.i_lnestream.shape[0] * samples_per_symbol, 1
)[selection_sample]
elif self.run.i_lnestream.ndim == 3:
dims = self.run.i_lnestream.shape[1]
data = (
self.run.i_lnestream.transpose(1, 0, 2)
.reshape(dims, self.run.i_lnestream.size // dims)
.T[selection_sample, :]
)
dims_to_plot = dims
if dim_count is not None:
if dim_count > dims:
raise ValueError(f"dim_count ({dim_count}) > dims ({dims})")
dims_to_plot = dim_count
# Create subplots: 3 columns, ndim rows
f, axes = plt.subplots(
dims_to_plot,
3,
figsize=(self._width, 1 + dims_to_plot * SUBPLOT_HEIGHT),
dpi=self._screen_dpi,
sharex="col",
sharey="col",
squeeze=False,
)
if dims_to_plot == 1:
gt = gt.reshape(-1, 1)
else:
# if there are more than one data dimensions, that likely means that we're dealing with
# MIMO symbols, which need to be "unpacked".
gt = np.flip(unpack_array(gt, n=dims), axis=1)
# Handle printing symbolstreams with -1 saturating values
if -1 in gt:
gt = np.vectorize(lambda v: 1 if v == -1 else v)(gt)
# Plot raw, ground truth, and samples
for i, axis_group in enumerate(axes):
axis_group[0].plot(raw.iloc[:, 0], raw.iloc[:, i + 1], alpha=1.0, linestyle="-")
# gt_t is ground truth timing, same as raw timing
# gt_d is ground truth "data"
if subsymbol_count != 1:
gt_t, gt_d = (
np.linspace(selection_slicing[0], selection_slicing[-1], gt.shape[0] - 1),
gt[:-1, i],
)
else:
gt_t, gt_d = (selection_slicing, np.array(gt[:, i]))
gt_t, gt_d = gt_t[: gt_d.size], gt_d[: gt_t.size]
axis_group[1].plot(gt_t, gt_d, marker=".", drawstyle="steps-post")
axis_group[2].plot(
data[:, i],
alpha=min(1.0, 100 / samples_per_symbol),
linestyle=":",
marker="+",
markersize=2,
linewidth=0.5,
)
sns.despine()
# Column 1, raw data
axes[-1, 0].set_ylim(
0.975 * np.quantile(raw.iloc[:, 1:], 0.01),
1.025 * np.quantile(raw.iloc[:, 1:], 0.99),
)
axes[-1, 0].set_xlabel(raw.columns[0], color="gray")
for i, axis in enumerate(axes[:, 0].ravel()):
_ = raw.columns[i + 1].split(":")
axis.set_ylabel("{}\n{}\n{} ({})".format(*_), color="gray")
# Column 2, ground truth
axes[-1, 1].set_ylim(np.nanmin(gt_d) - 0.5, np.nanmax(gt_d) + 0.5)
axes[-1, 1].set_yticks(np.unique(gt_d))
axes[-1, 1].set_xlabel(raw.columns[0], color="gray")
for i, axis in enumerate(axes[:, 1].ravel()):
axis.set_ylabel("Data[{}]".format(i), color="gray")
# Column 3, sampled data
xticks = np.arange(0, samples_per_symbol * (count + 1), samples_per_symbol)
xticklabels = np.arange(
start_sample, end_sample + samples_per_symbol, samples_per_symbol
)
xlabel = "Sample #"
axes[-1, 2].set_ylim(0.975 * np.quantile(data, 0.1), 1.025 * np.quantile(data, 0.9))
axes[-1, 2].set_xticks(xticks)
axes[-1, 2].set_xticklabels(xticklabels, rotation=45)
axes[-1, 2].set_xlabel(xlabel, color="gray")
# Plot symbol boundaries in real time space
for axis in axes[:, [0, 1]].ravel():
axis.vlines(
selection_slicing,
0,
1,
transform=axis.get_xaxis_transform(),
linewidth=1.0,
alpha=0.5,
linestyle="--",
color="k",
)
if annotations is not None:
for axis in axes[:, [0, 1]].ravel():
axis.vlines(
annotations,
0.0,
0.75,
transform=axis.get_xaxis_transform(),
linewidth=1.0,
alpha=0.5,
linestyle="-.",
color="r",
)
# Plot symbol boundaries in sample space
for axis in axes[:, 2].ravel():
axis.vlines(
xticks,
0,
1,
transform=axis.get_xaxis_transform(),
linewidth=1.0,
alpha=0.5,
linestyle="--",
color="k",
)
# Align labels
f.align_ylabels(axes[:, :])
f.align_xlabels(axes[-1, :])
# Set titles
axes[0, 0].set_title("Raw data", fontstyle="italic", color="gray")
axes[0, 1].set_title("Ground truth", fontstyle="italic", color="gray")
axes[0, 2].set_title("Sample stream", fontstyle="italic", color="gray")
_title = "Symbol stream for symbols {} to {}".format(start_idx, start_idx + count)
if dims_to_plot != dims:
_title += " [{} of {} dimensions]".format(dims_to_plot, dims)
plt.suptitle(_title, y=1.01, verticalalignment="bottom")
f.tight_layout()
self._plot_save_helper(f, **kwargs)
def plot_symstream(
self, start: t.Optional[int] = None, count: t.Optional[int] = 10, **kwargs
):
SUBPLOT_HEIGHT = 1.5 # inches
start = start if start else 0
end = start + count if count else len(self.run.o_symstream) - start
dims = self.run.i_rdpstream.shape[1] - 1
_out = self.run.o_symstream[slice(start, end)]
_in = self.run.i_symstream[slice(start, end)]
_x = np.arange(start, end)
# if dims != 1, then we're dealing with MIMO
if dims != 1:
_out = np.flip(unpack_array(_out, n=dims), axis=1)
_in = np.flip(unpack_array(_in, n=dims), axis=1)
f, axes = plt.subplots(
dims,
1,
figsize=(self._width, 1 + dims * SUBPLOT_HEIGHT),
dpi=self._screen_dpi,
sharey=True,
sharex=True,
)
if dims == 1:
axes = np.array([axes])
_out = _out.reshape(-1, 1)
_in = _in.reshape(-1, 1)
def minmax(x):
return (x.min(), x.max())
for i, axis in enumerate(axes):
_xlim, _ylim = minmax(np.hstack([_out, _in]))
_lower_margin, _h = 0.15, 0.35
axis.plot(_x, _out[:, i], marker="+", color="C0", drawstyle="steps-post")
axis.set_xlim(_x.min() - 0.5, _x.max() + 0.5)
axis.set_ylim(_out.min() - _lower_margin, _out.max() + _h)
axis.set_ylabel("Output[{}]".format(i))
axis.yaxis.label.set_color("C0")
axis.tick_params(axis="y", color="C0")
twin = axis.twinx()
twin.plot(_x, _in[:, i], marker="x", color="C1", drawstyle="steps-post")
twin.set_ylim(_in.min() - _h, _in.max() + _lower_margin)
twin.set_ylabel("Input[{}]".format(i))
twin.yaxis.label.set_color("C1")
twin.tick_params(axis="y", color="C1")
twin.spines["left"].set_color("C0")
twin.spines["right"].set_color("C1")
axis.grid(axis="y", color="C0", dashes=(5, 5), alpha=0.5)
twin.grid(axis="y", color="C1", dashes=(5, 5), alpha=0.5)
axes[-1].xaxis.set_major_locator(plt.MultipleLocator(base=1.0))
axes[-1].set_xlabel("Symbol #", color="gray")
if _x.size >= 50:
plt.setp(axes[-1].xaxis.get_majorticklabels(), rotation=90)
if _x.size >= 60:
pass
for axis in axes:
sns.despine(ax=axis, top=True, bottom=False, right=False, left=False)
plt.suptitle(
"Symbol stream for symbols {} to {}".format(*minmax(_x)),
y=1.01,
verticalalignment="bottom",
)
f.tight_layout()
self._plot_save_helper(f, **kwargs)
def plot_eye_diagram(self, **kwargs):
SUBPLOT_HEIGHT = self._width // 3 # inches
f, axis = plt.subplots(
1, 1, figsize=(self._width, SUBPLOT_HEIGHT), dpi=self._screen_dpi
)
data: np.ndarray
# Handle 2-d and 3-d data
if self.run.i_lnestream.ndim == 2:
data = self.run.i_lnestream.T[:, : self.run.i_symstream.size]
elif self.run.i_lnestream.ndim == 3:
data = np.vstack(self.run.i_lnestream).T[:, : self.run.i_symstream.size]
axis.plot(data, color="C0", linestyle="-", marker=".", alpha=(10 / data.shape[1]))
sns.despine()
lower_ylim, upper_ylim = np.quantile(data, [0.05, 0.95])
axis.set_ylim(0.975 * lower_ylim, 1.025 * upper_ylim)
axis.set_xlabel("Sample #", color="gray")
ylabel = self.run.i_rdpstream.columns[1].split(":")
ylabel = map(ylabel.__getitem__, [0, 1, -1])
ylabel = "\n".join(ylabel)
axis.set_ylabel(ylabel, color="gray")
plt.suptitle(
"Eye diagram for a total of {} symbols".format(data.shape[1]),
verticalalignment="bottom",
)
f.tight_layout()
self._plot_save_helper(f, **kwargs)
def plot_symbol_space(self, **kwargs):
symbol_space = self.run.intermediates.lne.symbol_space
decision_device = self.run.intermediates.lne.decision_device["decision_device"][0]
if not is_fitted(decision_device):
raise RuntimeError("decision device must be fitted for symbol space plotting")
sca = decision_device.named_steps.get("standardscaler", None)
pca = decision_device.named_steps.get("pca", None)
cla = decision_device.steps[-1][1]
X = sca.transform(symbol_space) if sca else symbol_space
X = pca.transform(symbol_space) if pca else symbol_space
if X.shape[1] == 1:
# plotting_data
f, axes = plt.subplots(
4,
1,
figsize=(self._width, 8),
dpi=self._screen_dpi,
sharex=True,
gridspec_kw={"height_ratios": [2, 3, 2, 1]},
)
X = X.ravel()
pred = self.run.i_bitstream.ravel()
gt = self.run.o_bitstream.ravel()
if X.size == pred.size:
# Jitter the symbol space slightly to avoid covariance calculation errors
# when all data points are the same. Also, jitter more heavily for the swarm/scatter
# plot representation to improve readability.
plotting_data = pandas.DataFrame(
{
"X": X + 1e-6 * np.random.randn(*X.shape),
"Jittered": pred + 0.1 * np.random.randn(*pred.shape),
"Prediction": pred,
"Error": pred[: gt.size] != gt[: pred.size],
}
)
sns.scatterplot(
x="X",
y="Jittered",
hue="Prediction",
style="Error",
legend="brief",
alpha=0.3,
style_order=[False, True],
palette=sns.color_palette("pastel", n_colors=2),
data=plotting_data.query("Error == False"),
ax=axes[1],
)
if plotting_data.query("Error == True").size > 0:
n_colors = plotting_data.query("Error == True")["Prediction"].unique().size
sns.scatterplot(
x="X",
y="Jittered",
hue="Prediction",
style="Error",
legend=None,
palette=sns.color_palette(palette=None, n_colors=n_colors),
style_order=[False, True],
data=plotting_data.query("Error == True"),
ax=axes[1],
)
sns.distplot(plotting_data.query("Prediction == 0").X, ax=axes[0], color="C0")
sns.rugplot(
plotting_data.query("Prediction == 0").X, alpha=0.5, ax=axes[0], color="C0"
)
sns.distplot(plotting_data.query("Prediction == 1").X, ax=axes[0], color="C1")
sns.rugplot(
plotting_data.query("Prediction == 1").X, alpha=0.5, ax=axes[0], color="C1"
)
if plotting_data.query("Error == True").size > 0:
# ValueError's can be thrown when only a single error exists
try:
sns.distplot(
plotting_data.query("Prediction == 0").query("Error == True").X,
ax=axes[2],
color="C0",
)
rugplot(
plotting_data.query("Prediction == 0").query("Error == True").X,
alpha=0.5,
ax=axes[2],
color="C0",
top=True,
)
except ValueError:
pass
try:
sns.distplot(
plotting_data.query("Prediction == 1").query("Error == True").X,
ax=axes[2],
color="C1",
)
rugplot(
plotting_data.query("Prediction == 1").query("Error == True").X,
alpha=0.5,
ax=axes[2],
color="C1",
top=True,
)
except ValueError:
pass
axes[2].set_ylim(*reversed(axes[2].get_ylim()))
for axis in axes:
remove_spine(axis, "right")
for axis in axes[[1, 2]]:
remove_spine(axis, "bottom")
for axis in axes[[0, 1, -1]]:
remove_spine(axis, "top")
add_spine(axes[3], "bottom", ticks_only=True)
axes[0].grid(dashes=(5, 5), alpha=0.5, axis="x")
axes[1].grid(dashes=(5, 5), alpha=0.5, axis="x")
axes[2].grid(dashes=(5, 5), alpha=0.5, axis="x")
for axis in axes[:-1]:
axis.set_xlabel(None)
axis.set_ylabel(None)
axes[1].yaxis.set_ticks(np.unique(plotting_data.Prediction))
axes[0].set_ylabel("Measurement\ndistribution", color="gray")
axes[1].set_ylabel("Predicted\nsymbol", color="gray")
axes[2].set_ylabel("Error\ndistribution", color="gray")
f.align_ylabels(axes[:])
else:
# No known layer uses different encoding at the moment
pass
_x = np.linspace(X.min(), X.max(), 100).reshape(-1, 1)
_d = (
decision_device.decision_function(_x)
if hasattr(decision_device, "decision_function")
else decision_device.predict_proba(_x)
)
axes[-1].plot(_x, _d)
axes[-1].grid(dashes=(5, 5), alpha=0.5)
axes[-1].set_xlim(0.975 * X.min(), 1.025 * X.max())
# Labels
xlabel = self.run.i_rdpstream.columns[1].split(":")
xlabel = map(xlabel.__getitem__, [0, 1, -1])
xlabel = "{}, {} ({})".format(*xlabel)
axes[-1].set_xlabel(xlabel, color="gray")
ylabel = (
"Decision\nfunction"
if hasattr(decision_device, "decision_function")
else "Prediction\nprobability"
)
axes[-1].set_ylabel(ylabel, color="gray")
else:
pred = self.run.i_symstream
gt = self.run.o_symstream
x_min, x_max = X[:, 0].min(), X[:, 0].max()
y_min, y_max = X[:, 1].min(), X[:, 1].max()
resolution = 0.1
XX, YY = np.meshgrid(
np.arange(x_min, x_max, resolution), np.arange(y_min, y_max, resolution)
)
plotting_data = pandas.DataFrame(
{
"Re": X[:, 0],
"Im": X[:, 1],
"Symbol": [f"Symbol {x:2b}" for x in pred],
"Error": pred[: gt.size] != gt[: pred.size],
}
)
f, axis = plt.subplots(
1, 1, figsize=(self._width, self._width), dpi=self._screen_dpi
)
axis = sns.scatterplot(
x="Re", y="Im", hue="Symbol", style="Error", data=plotting_data, ax=axis
)
axis.set_ylabel("Quadrature")
axis.set_xlabel("In-phase")
axis.set_aspect("equal", "box")
axis.grid(dashes=(5, 5), alpha=0.5)
try:
params = AttributeDict()
if hasattr(cla, "decision_function"):
Z = cla.decision_function(np.c_[XX.ravel(), YY.ravel()])
step = 1
params.levels = np.arange(-1, 1 + step, step)
params.linewidths = 1.5 - np.abs(params.levels)
else:
Z = cla.predict_proba(np.c_[XX.ravel(), YY.ravel()])
step = 0.5
params.levels = np.arange(-1, 1 + step, step)
params.linewidths = 1.5 - np.abs(params.levels)
for dim in range(Z.shape[1]):
ZZ = Z[:, dim].reshape(XX.shape)
contours = plt.contour(
XX,
YY,
ZZ,
colors=[sns.color_palette()[dim]],
linestyles=["--", ":"][dim % 2],
**params,
)
plt.gca().clabel(contours, inline=1, fontsize=10)
except Exception:
pass
plt.suptitle("Symbol space", y=0.95, verticalalignment="bottom")
self._plot_save_helper(f, **kwargs)
def plot_error(self, roll: t.Optional[int] = None, **kwargs):
SUBPLOT_HEIGHT = 2
f, axes = plt.subplots(
2,
1,
figsize=(self._width, 1 + 2 * SUBPLOT_HEIGHT),
dpi=self._screen_dpi,
sharey=False,
sharex=False,
)
bit_mismatch_length = self.run.o_bitstream.size - self.run.i_bitstream.size
sym_mismatch_length = self.run.o_symstream.size - self.run.i_symstream.size
bit_errors = (
self.run.i_bitstream[: self.run.o_bitstream.size]
!= self.run.o_bitstream[: self.run.i_bitstream.size]
)
sym_errors = (
self.run.i_symstream[: self.run.o_symstream.size]
!= self.run.o_symstream[: self.run.i_symstream.size]
)
bit_x = np.arange(0, bit_errors.size)
sym_x = np.arange(0, sym_errors.size)
bit_mismatch = (
np.arange(bit_errors.size, bit_errors.size + bit_mismatch_length)
if bit_mismatch_length != 0
else None
)
sym_mismatch = (
np.arange(sym_errors.size, sym_errors.size + sym_mismatch_length)
if sym_mismatch_length != 0
else None
)
bit_roll = roll if roll else bit_errors.size // 10
sym_roll = roll if roll else sym_errors.size // 10
bit_errors_series = (
pandas.Series(bit_errors)
.rolling(window=bit_roll, min_periods=1, center=True)
.mean()
)
sym_errors_series = (
pandas.Series(sym_errors)
.rolling(window=sym_roll, min_periods=1, center=True)
.mean()
)
axes[1].plot(bit_x, bit_errors_series)
axes[0].plot(sym_x, sym_errors_series)
if bit_mismatch:
axes[1].plot(bit_mismatch, [1.0] * bit_mismatch, linestyle="--")
if sym_mismatch:
axes[1].plot(sym_mismatch, [1.0] * sym_mismatch, linestyle="--")
axes[1].set_ylim(0, 0.5)
axes[0].set_ylim(0, 1)
axes[1].set_xlim(0, bit_errors.size - 1 + bit_mismatch_length)
axes[0].set_xlim(0, sym_errors.size - 1 + sym_mismatch_length)
axes[1].set_title(
"Windowed bit error rate (window={})".format(bit_roll),
fontstyle="italic",
color="gray",
)
axes[1].set_xlabel("Bit #", color="gray")
axes[1].set_ylabel("Bit error rate", color="gray")
axes[0].set_title(
"Windowed symbol error rate (window={})".format(sym_roll),
fontstyle="italic",
color="gray",
)
axes[0].set_xlabel("Symbol #", color="gray")
axes[0].set_ylabel("Symbol error rate", color="gray")
plt.suptitle("Error rates", verticalalignment="bottom")
f.tight_layout()
self._plot_save_helper(f, **kwargs)
def plot_timing(self, **kwargs):
SUBPLOT_HEIGHT = 2
f, axes = plt.subplots(
2,
2,
figsize=(self._width, 1 * 2 * SUBPLOT_HEIGHT),
dpi=self._screen_dpi,
sharey="row",
sharex=False,
gridspec_kw={"height_ratios": [1, 2]},
)
raw_timing = self.run.i_rdpstream.iloc[:, 0]
raw_timing_delta = raw_timing.diff()
if self.run.intermediates.rdp.timestamps.ndim < 3:
rdp_timing = pandas.Series(np.hstack(self.run.intermediates.rdp.timestamps))
else:
rdp_timing = pandas.Series(
np.hstack(self.run.intermediates.rdp.timestamps[:, 0, :])
)
rdp_timing_delta = rdp_timing.diff()
axes[0, 0].plot(np.linspace(0, 1, raw_timing.size), raw_timing)
axes[0, 1].plot(np.linspace(0, 1, rdp_timing.size), rdp_timing)
axes[1, 0].plot(np.linspace(0, 1, raw_timing_delta.size), raw_timing_delta)
axes[1, 1].plot(np.linspace(0, 1, rdp_timing_delta.size), rdp_timing_delta)
for axis in axes.ravel():
axis.set_xticks([])
axes[0, 0].set_ylabel("timestamp (s)", color="gray")
axes[1, 0].set_ylabel("timestamp diff (s)", color="gray")
axes[0, 0].set_title(
"Sample-wise timestamp differences\nRaw data", fontstyle="italic", color="gray"
)
axes[0, 1].set_title(
"Sample-wise timestamp differences\nInterpolated data",
fontstyle="italic",
color="gray",
)
sns.despine()
f.tight_layout()
self._plot_save_helper(f, **kwargs)
def plot_synchronisation(self, **kwargs):
SUBPLOT_HEIGHT = 2 # inches
timestamps = self.run.i_rdpstream.iloc[:, 0]
data = self.run.i_rdpstream.iloc[:, 1:]
dims = data.shape[1]
start = timestamps.iloc[0]
end = self.run.intermediates.rdp.slicing[10]
interval = timestamps.between(start, end)
ZOOM_BEFORE = 3
ZOOM_AFTER = 5
origin, *edges = self.run.intermediates.rdp.edge_detection
slicing = self.run.intermediates.rdp.slicing
zoom_start = slicing[0] - ZOOM_BEFORE * (slicing[1] - slicing[0])
zoom_end = slicing[0] + ZOOM_AFTER * (slicing[1] - slicing[0])
zoom_interval = timestamps.between(zoom_start, zoom_end)
# Create subplots: 3 columns, ndim rows
f, axes = plt.subplots(
dims,
2,
figsize=(self._width, 1 + dims * SUBPLOT_HEIGHT),
dpi=self._screen_dpi,
sharex="col",
sharey="row",
squeeze=False,
gridspec_kw={"width_ratios": [3, 2]},
)
lower_ylim, upper_ylim = np.quantile(data, [0.01, 0.99])
for i, axis in enumerate(axes[:, 0]):
axis.plot(
timestamps[interval],
data[interval].iloc[:, i],
marker="+",
markersize=2,
linewidth=0.5,
alpha=0.5,
)
if lower_ylim < upper_ylim:
axis.set_ylim(0.975 * lower_ylim, 1.025 * upper_ylim)
axis.set_ylabel(
"{}\n{}\n{} ({})".format(*data[interval].columns[i].split(":")), color="gray"
)
axis.vlines(
origin,
0,
1,
transform=axis.get_xaxis_transform(),
linewidth=1.5,
alpha=0.7,
linestyle="--",
color="k",
)
axes[0, 0].vlines(
edges,
0,
1,
transform=axes[0, 0].get_xaxis_transform(),
linewidth=1.5,
alpha=0.7,
linestyle="--",
color="C1",
)
for i, axis in enumerate(axes[:, 1]):
axis.plot(
timestamps[zoom_interval],
data[zoom_interval].iloc[:, i],
marker="+",
markersize=2,
linewidth=0.5,
alpha=0.5,
)
axis.tick_params(axis="x", rotation=45)
axis.vlines(
slicing[0 : ZOOM_AFTER + 1],
0,
1,
transform=axis.get_xaxis_transform(),
linewidth=1.0,
alpha=0.7,
linestyle=":",
color="C0",
)
axis.vlines(
origin,
0,
1,
transform=axis.get_xaxis_transform(),
linewidth=1.0,
alpha=0.7,
linestyle="--",
color="k",
)
sns.despine()
axes[-1, 0].set_xlabel(self.run.i_rdpstream.columns[0], color="gray")
axes[-1, 1].set_xlabel(self.run.i_rdpstream.columns[0], color="gray")
f.align_xlabels(axes[-1, :])
fmt = lambda x: np.format_float_scientific(x, precision=3)
axes[0, 0].set_title(
"Preprocessed data\nin interval {} to {}".format(*map(fmt, [start, end])),
fontstyle="italic",
color="gray",
)
axes[0, 1].set_title(
"Preprocessed data\nin interval {} to {}".format(*map(fmt, [zoom_start, zoom_end])),
fontstyle="italic",
color="gray",
)
plt.suptitle("Synchronisation", y=1.01, verticalalignment="bottom")
f.tight_layout()
plt.subplots_adjust(hspace=0.5)
self._plot_save_helper(f, **kwargs)
|
py | 1a368c8e94c2fbdb4caea9de61385db5f945ab49 | """
Unit tests for Python ICE-CASCADE hillslope erosion-deposition forward-time
centered-space model component
References:
(1) Holman, J. P. (2002). Heat transfer (pp. 75)
"""
import unittest
import numpy as np
from py_ice_cascade import hillslope
class ftcs_TestCase(unittest.TestCase):
"""Tests for hillslope ftcs model component"""
# arbitrary valid values for input arguments
hh = np.random.rand(10,10)
dd = 1.0
mm = np.ones((10,10))
kon = 1.0
koff = 0.0
bb = ['constant']*4
def test_input_valid_bc(self):
"""Allow all supported BC names, and fail for others"""
hillslope.ftcs_model(self.hh, self.mm, self.dd, self.kon, self.koff,
['constant', 'closed', 'open', 'mirror'])
hillslope.ftcs_model(self.hh, self.mm, self.dd, self.kon, self.koff,
['cyclic', 'cyclic', 'constant', 'constant'])
self.assertRaises(ValueError, hillslope.ftcs_model, self.hh, self.mm, self.dd, self.kon, self.koff,
['ooga_booga', 'cyclic', 'constant', 'constant'])
def test_input_cyclic_bc(self):
"""Unmatched cyclic BCs should throw an error"""
self.assertRaises(ValueError, hillslope.ftcs_model, self.hh, self.mm, self.dd, self.kon, self.koff,
['cyclic', 'constant', 'constant', 'constant'])
self.assertRaises(ValueError, hillslope.ftcs_model, self.hh, self.mm, self.dd, self.kon, self.koff,
['constant', 'cyclic', 'constant', 'constant'])
self.assertRaises(ValueError, hillslope.ftcs_model, self.hh, self.mm, self.dd, self.kon, self.koff,
['constant', 'constant', 'cyclic', 'constant'])
self.assertRaises(ValueError, hillslope.ftcs_model, self.hh, self.mm, self.dd, self.kon, self.koff,
['constant', 'constant', 'constant', 'cyclic'])
def test_consistent_dims(self):
"""Unequal array dims for height and mask throws error"""
self.assertRaises(ValueError, hillslope.ftcs_model, np.random.rand(11,11), self.mm, self.dd, self.kon, self.koff, self.bb)
self.assertRaises(ValueError, hillslope.ftcs_model, self.hh, np.random.rand(11,11), self.dd, self.kon, self.koff, self.bb)
def test_protect_model_dims(self):
"""Attempt to set model grid with incorrect size array throw error"""
model = hillslope.ftcs_model(self.hh, self.mm, self.dd, self.kon, self.koff, self.bb)
self.assertRaises(ValueError, model.set_height, np.random.rand(11,11))
self.assertRaises(ValueError, model.set_mask, np.random.rand(11,11))
def test_steady_bc_constant(self):
"""Compare against exact solution for sinusoid y=max and zero at other bnd"""
# parameters
h0 = 1.0
nx = 100
ny = 50
lx = 1.0
delta = lx/(nx-1)
ly = delta*(ny-1)
t_end = 0.25
epsilon = 0.001
# Case 1:
# # exact solution
xx = np.linspace(0, lx, nx, dtype=np.double).reshape(( 1,nx))
yy = np.linspace(0, ly, ny, dtype=np.double).reshape((ny, 1))
h_exact = h0/np.sinh(np.pi*ly/lx)*np.sin(np.pi*xx/lx)*np.sinh(np.pi*yy/lx)
# # numerical solution
h_init = np.zeros((ny, nx))
h_init[-1,:] = h0*np.sin(np.pi*xx/lx)
kappa = 1.0
mask = np.ones((ny,nx))
bcs = ['constant']*4
model = hillslope.ftcs_model(h_init, mask, delta, kappa, kappa, bcs)
model.run(t_end)
# # check errors
h_error = np.abs(model.get_height()-h_exact)
self.assertTrue(np.max(h_error) < epsilon)
# Case 2: rotate 90 degrees
# # exact solution
h_exact = np.rot90(h_exact)
# # numerical solution
h_init = np.rot90(h_init)
mask = np.rot90(mask)
bcs = ['constant']*4
model = hillslope.ftcs_model(h_init, mask, delta, kappa, kappa, bcs)
model.run(t_end)
# # check errors
h_error = np.abs(model.get_height()-h_exact)
self.assertTrue(np.max(h_error) < epsilon)
def test_steady_layered_kappa(self):
"""Compare against exact solution for diffusion in 2 layered material"""
# parameters
nx = 100
ny = 5
lx = 1.0
delta = lx/(nx-1)
xx = np.linspace(0, lx, nx, dtype=np.double).reshape((1,nx))*np.ones((ny,1))
l0 = 0.5*(xx[0,50]+xx[0,51]) # transition at midpoint
l1 = lx-l0
h0 = 1.0
h1 = 0.0
k0 = 1.0
k1 = 0.5
t_end = 1.5
epsilon = 0.001
# Case 1:
# # exact solution (resistance = l/k in series)
qq = (h0-h1)/(l0/k0+l1/k1)
hb = h0-qq*l0/k0 # or: hb = qq*l1/k1-h1
xx = np.linspace(0, lx, nx, dtype=np.double).reshape((1,nx))*np.ones((ny,1))
h_exact = np.where(xx <= l0, h0+(hb-h0)/l0*xx, hb+(h1-hb)/l1*(xx-l0))
# # numerical solution
h_init = np.zeros((ny, nx))
h_init[:,0] = h0
h_init[:,-1] = h1
mask = np.where(xx <= l0, True, False)
bcs = ['closed', 'closed', 'constant', 'constant']
model = hillslope.ftcs_model(h_init, mask, delta, k0, k1, bcs)
model.run(t_end)
# # check errors
h_error = np.abs(model.get_height()-h_exact)
self.assertTrue(np.max(h_error) < epsilon)
# Case 2: rotate 90 degrees
# # exact solution
h_exact = np.rot90(h_exact)
# # numerical solution
h_init = np.rot90(h_init)
mask = np.rot90(mask)
bcs = ['constant', 'constant', 'closed', 'closed']
model = hillslope.ftcs_model(h_init, mask, delta, k0, k1, bcs)
model.run(t_end)
# # check errors
h_error = np.abs(model.get_height()-h_exact)
self.assertTrue(np.max(h_error) < epsilon)
def test_mass_conservation(self):
"""Confirm mass conservation with closed and cyclic BCs"""
# parameters
nx = ny = 100
delta = 1.0/(nx-1)
h_init = np.linspace(0.0, 1.0, nx).reshape(1,nx)*np.linspace(0.0, 1.0, ny).reshape(ny,1)
h_init += 0.1*(np.random.rand(ny, nx)-0.5)
mask = np.where(np.random.rand(ny, nx)>0.5, True, False)
kappa1 = 1.0
kappa0 = 0.5
t_end = 0.25
epsilon = 0.0001
# Case 1
# # exact solution
h_total = np.sum(h_init)
# # numerical solution
bcs = ['cyclic', 'cyclic', 'closed', 'closed']
model = hillslope.ftcs_model(h_init, mask, delta, kappa1, kappa0, bcs)
model.run(t_end)
# # check error
h_error = np.abs(h_total-np.sum(model.get_height()))
self.assertTrue(h_error < epsilon)
# Case 2: rotate 90 deg
# # exact solution
# # numerical solution
h_init = np.rot90(h_init)
mask = np.rot90(mask)
bcs = ['closed', 'closed', 'cyclic', 'cyclic']
model = hillslope.ftcs_model(h_init, mask, delta, kappa1, kappa0, bcs)
model.run(t_end)
# # check error
h_error = np.abs(h_total-np.sum(model.get_height()))
self.assertTrue(h_error < epsilon)
if __name__ == '__main__':
unittest.main()
|
py | 1a368d7a20daadd3a5b30b156ed45ebd8761b280 | # -*- coding: utf-8 -*-
import copy
from pathlib import Path
from collections import OrderedDict, namedtuple
import numpy as np
from parfive import Downloader
import astropy.table
import astropy.units as u
import parfive
import sunpy
from sunpy import config
from sunpy.net.base_client import BaseClient
from sunpy.net.vso.attrs import Time, Wavelength, _Range
from sunpy.time import TimeRange
TIME_FORMAT = config.get("general", "time_format")
__all__ = ['QueryResponse', 'GenericClient']
class QueryResponseBlock:
"""
Represents url, source along with other information
"""
def __init__(self, map0, url, time=None):
"""
Parameters
----------
map0 : Dict with relevant information
url : Uniform Resource Locator
"""
self._map = map0
self.source = map0.get('source', "Data not Available")
self.provider = map0.get('provider', "Data not Available")
self.physobs = map0.get('physobs', "Data not Available")
self.instrument = map0.get('instrument', "Data not Available")
self.url = url
self.time = TimeRange(map0.get('Time_start'),
map0.get('Time_end')) if time is None else time
self.wave = map0.get('wavelength', np.NaN)
def iter_urls(amap, url_list, time):
"""Helper Function"""
for aurl, t in zip(url_list, time):
tmp = QueryResponseBlock(amap, aurl, t)
yield tmp
class QueryResponse(list):
"""
Container of QueryResponseBlocks
"""
def __init__(self, lst):
super().__init__(lst)
@classmethod
def create(cls, amap, lst, time=None):
if time is None:
time = [None] * len(lst)
return cls(iter_urls(amap, lst, time))
def time_range(self):
"""
Returns the time-span for which records are available
"""
return TimeRange(min(qrblock.time.start for qrblock in self),
max(qrblock.time.end for qrblock in self))
def response_block_properties(self):
"""
Returns a set of class attributes on all the response blocks.
"""
s = {a if not a.startswith('_') else None for a in dir(self[0])}
for resp in self[1:]:
s = s.intersection({a if not a.startswith('_') else None for a in dir(resp)})
s.remove(None)
return s
def __repr__(self):
return repr(type(self)) + repr(self._build_table())
def __str__(self):
return str(self._build_table())
def _repr_html_(self):
return self._build_table()._repr_html_()
def _build_table(self):
columns = OrderedDict((('Start Time', []), ('End Time', []),
('Source', []), ('Instrument', []),
('Wavelength', [])))
for i, qrblock in enumerate(self):
columns['Start Time'].append(
(qrblock.time.start).strftime(TIME_FORMAT))
columns['End Time'].append(
(qrblock.time.end).strftime(TIME_FORMAT))
columns['Source'].append(qrblock.source)
columns['Instrument'].append(qrblock.instrument)
columns['Wavelength'].append(str(u.Quantity(qrblock.wave)))
return astropy.table.Table(columns)
class GenericClient(BaseClient):
"""
Base class for simple web clients for the data retriever module. This class
is mainly designed for downloading data from FTP and HTTP type data
sources, although should in theory be general enough to get data from any
web service.
This class has two user facing methods
`~sunpy.net.dataretriever.client.GenericClient.search` and
`~sunpy.net.dataretriever.client.GenericClient.fetch` the former generates a
set of results for files available through the service the client is
querying and the latter downloads that data.
The `~sunpy.net.dataretriever.client.GenericClient.search` method takes a
set of `sunpy.net.attrs` objects and then converts these into a call to
`~sunpy.net.dataretriever.client.GenericClient._get_url_for_timerange`. It
does this through the `map\\_` dictionary which represents the
`~sunpy.net.attrs` objects as a dictionary.
"""
def __init__(self):
self.map_ = {}
def _makeargs(self, *args):
"""
Construct the `map\\_` internal representation of the query.
This `map\\_` dictionary is passed through to the
`_get_url_for_timerange` method to get the URL results.
Parameters
----------
\\*args: `tuple`
The query attributes.
"""
for elem in args:
if isinstance(elem, Time):
self.map_['TimeRange'] = TimeRange(elem.start, elem.end)
self.map_['Time_start'] = elem.start
self.map_['Time_end'] = elem.end
elif isinstance(elem, _Range):
a_min = elem.min
a_max = elem.max
if a_min == a_max:
self.map_[elem.__class__.__name__.lower()] = a_min
else:
if isinstance(elem, Wavelength):
prefix = 'wave'
else:
prefix = ''
minmax = namedtuple("minmax", "{0}min {0}max".format(prefix))
self.map_[elem.__class__.__name__.lower()] = minmax(a_min, a_max)
else:
if hasattr(elem, 'value'):
self.map_[elem.__class__.__name__.lower()] = elem.value
else:
# This will only get hit if the attr is something like
# Extent, which is a unique subclass of Attr. Currently no
# unidown Clients support this, so we skip this line.
# Anything that hits this will require special code to
# convert it into the map_ dict.
raise ValueError(
"GenericClient can not add {} to the map_ dictionary to pass "
"to the Client.".format(elem.__class__.__name__)) # pragma: no cover
self._makeimap()
@classmethod
def _get_url_for_timerange(cls, timerange, **kwargs):
"""
Method which generates URL results from a timerange and the `map\\_`
dictionary.
Parameters
----------
timerange: `sunpy.time.TimeRange`
The timerange to extract the URLs for.
\\*\\*kwargs: `dict`
Any extra keywords to refine the search. Generated from the
attributes passed to
`~sunpy.net.dataretriever.client.GenericClient.search`.
"""
raise NotImplementedError
def _makeimap(self):
"""
Add client specific information to the _map dict.
Normally this is extra metadata which is not downloaded, but known
a priori.
"""
raise NotImplementedError
@classmethod
def _can_handle_query(cls, *query):
"""
Method the
`sunpy.net.fido_factory.UnifiedDownloaderFactory`
class uses to dispatch queries to this Client.
"""
raise NotImplementedError
def _get_full_filenames(self, qres, filenames, path):
"""
Download a set of results.
Parameters
----------
qres : `~sunpy.net.dataretriever.QueryResponse`
Results to download.
filenames : list
List of base filenames (ex - "xyz.txt")
path : str
Path to download files to
Returns
-------
List of full pathnames for each file (download_directory + filename)
"""
# Create function to compute the filepath to download to if not set
default_dir = Path(sunpy.config.get("downloads", "download_dir"))
paths = []
for i, filename in enumerate(filenames):
fname = Path(filename)
if path is None:
fname = default_dir / '{file}'
elif '{file}' not in str(path):
fname = path / '{file}'
temp_dict = qres[i]._map.copy()
temp_dict['file'] = str(filename)
fname = fname.expanduser()
fname = Path(str(fname).format(**temp_dict))
paths.append(fname)
return paths
def _get_time_for_url(self, urls):
"""
This method allows clients to customise the timerange displayed for
each URL.
It should return a sunpy.time.TimeRange object per URL.
"""
return NotImplemented
def search(self, *args, **kwargs):
"""
Query this client for a list of results.
Parameters
----------
\\*args: `tuple`
`sunpy.net.attrs` objects representing the query.
"""
GenericClient._makeargs(self, *args, **kwargs)
kwergs = copy.copy(self.map_)
kwergs.update(kwargs)
urls = self._get_url_for_timerange(
self.map_.get('TimeRange'), **kwergs)
if urls:
times = self._get_time_for_url(urls)
if times and times is not NotImplemented:
return QueryResponse.create(self.map_, urls, times)
return QueryResponse.create(self.map_, urls)
def fetch(self, qres, path=None, overwrite=False,
progress=True, downloader=None, wait=True):
"""
Download a set of results.
Parameters
----------
qres : `~sunpy.net.dataretriever.QueryResponse`
Results to download.
path : `str` or `pathlib.Path`, optional
Path to the download directory, or file template including the
``{file}`` string which will be replaced with the filename.
overwrite : `bool` or `str`, optional
Determine how to handle downloading if a file already exists with the
same name. If `False` the file download will be skipped and the path
returned to the existing file, if `True` the file will be downloaded
and the existing file will be overwritten, if `'unique'` the filename
will be modified to be unique.
progress : `bool`, optional
If `True` show a progress bar showing how many of the total files
have been downloaded. If `False`, no progress bar will be shown.
downloader : `parfive.Downloader`, optional
The download manager to use.
wait : `bool`, optional
If `False` ``downloader.download()`` will not be called. Only has
any effect if `downloader` is not `None`.
Returns
-------
results: `parfive.Results`
"""
if path is not None:
path = Path(path)
urls = [qrblock.url for qrblock in qres]
filenames = [url.split('/')[-1] for url in urls]
paths = self._get_full_filenames(qres, filenames, path)
dl_set = True
if not downloader:
dl_set = False
downloader = Downloader(progress=progress, overwrite=overwrite)
for url, filename in zip(urls, paths):
downloader.enqueue_file(url, filename=filename)
if dl_set and not wait:
return
return downloader.download()
def _link(self, map_):
"""Helper Function"""
paths = []
for k, v in map_.items():
paths.append(map_[k]['path'])
return paths
|
py | 1a368da458f2274868cb71e4af967a56f0c9751c | """Produce custom labelling for a colorbar.
Contributed by Scott Sinclair
"""
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
from numpy.random import randn
# Make plot with vertical (default) colorbar
fig, ax = plt.subplots()
data = np.clip(randn(250, 250), -1, 1)
cax = ax.imshow(data, interpolation='nearest', cmap=cm.coolwarm)
ax.set_title('Gaussian noise with vertical colorbar')
# Add colorbar, make sure to specify tick locations to match desired ticklabels
cbar = fig.colorbar(cax, ticks=[-1, 0, 1])
cbar.ax.set_yticklabels(['< -1', '0', '> 1'])# vertically oriented colorbar
# Make plot with horizontal colorbar
fig, ax = plt.subplots()
data = np.clip(randn(250, 250), -1, 1)
cax = ax.imshow(data, interpolation='nearest', cmap=cm.afmhot)
ax.set_title('Gaussian noise with horizontal colorbar')
cbar = fig.colorbar(cax, ticks=[-1, 0, 1], orientation='horizontal')
cbar.ax.set_xticklabels(['Low', 'Medium', 'High'])# horizontal colorbar
plt.show()
|
py | 1a368e6c0f1924dbcad83d916ec6ae10b2c55f16 | # -*- coding: utf-8 -*-
'''
Manage Elasticsearch Domains
=================
.. versionadded:: 2016.11.0
Create and destroy Elasticsearch domains. Be aware that this interacts with Amazon's services,
and so may incur charges.
This module uses ``boto3``, which can be installed via package, or pip.
This module accepts explicit vpc credentials but can also utilize
IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More information available `here
<http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_.
If IAM roles are not used you need to specify them either in a pillar file or
in the minion's config file:
.. code-block:: yaml
vpc.keyid: GKTADJGHEIQSXMKKRBJ08H
vpc.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile,
either passed in as a dict, or as a string to pull from pillars or minion
config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
.. code-block:: yaml
Ensure domain exists:
boto_elasticsearch_domain.present:
- DomainName: mydomain
- profile='user-credentials'
- ElasticsearchVersion: "2.3"
- ElasticsearchClusterConfig:
InstanceType": "t2.micro.elasticsearch"
InstanceCount: 1
DedicatedMasterEnabled: False
ZoneAwarenessEnabled: False
- EBSOptions:
EBSEnabled: True
VolumeType: "gp2"
VolumeSize: 10
Iops: 0
- AccessPolicies:
Version: "2012-10-17"
Statement:
- Effect: "Allow"
- Principal:
AWS: "*"
- Action:
- "es:*"
- Resource: "arn:aws:es:*:111111111111:domain/mydomain/*
- Condition:
IpAddress:
"aws:SourceIp":
- "127.0.0.1",
- "127.0.0.2",
- SnapshotOptions:
AutomatedSnapshotStartHour: 0
- AdvancedOptions:
rest.action.multi.allow_explicit_index": "true"
- Tags:
a: "b"
- region: us-east-1
- keyid: GKTADJGHEIQSXMKKRBJ08H
- key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
# Import Salt libs
import salt.utils.json
# Import 3rd-party libs
from salt.ext import six
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if boto is available.
'''
return 'boto_elasticsearch_domain' if 'boto_elasticsearch_domain.exists' in __salt__ else False
def _compare_json(current, desired):
return __utils__['boto3.json_objs_equal'](current, desired)
def present(name, DomainName,
ElasticsearchClusterConfig=None,
EBSOptions=None,
AccessPolicies=None,
SnapshotOptions=None,
AdvancedOptions=None,
Tags=None,
region=None, key=None, keyid=None, profile=None,
ElasticsearchVersion="1.5"):
'''
Ensure domain exists.
name
The name of the state definition
DomainName
Name of the domain.
ElasticsearchClusterConfig
Configuration options for an Elasticsearch domain. Specifies the
instance type and number of instances in the domain cluster.
InstanceType (string) --
The instance type for an Elasticsearch cluster.
InstanceCount (integer) --
The number of instances in the specified domain cluster.
DedicatedMasterEnabled (boolean) --
A boolean value to indicate whether a dedicated master node is enabled.
See About Dedicated Master Nodes for more information.
ZoneAwarenessEnabled (boolean) --
A boolean value to indicate whether zone awareness is enabled. See About
Zone Awareness for more information.
DedicatedMasterType (string) --
The instance type for a dedicated master node.
DedicatedMasterCount (integer) --
Total number of dedicated master nodes, active and on standby, for the
cluster.
EBSOptions
Options to enable, disable and specify the type and size of EBS storage
volumes.
EBSEnabled (boolean) --
Specifies whether EBS-based storage is enabled.
VolumeType (string) --
Specifies the volume type for EBS-based storage.
VolumeSize (integer) --
Integer to specify the size of an EBS volume.
Iops (integer) --
Specifies the IOPD for a Provisioned IOPS EBS volume (SSD).
AccessPolicies
IAM access policy
SnapshotOptions
Option to set time, in UTC format, of the daily automated snapshot.
Default value is 0 hours.
AutomatedSnapshotStartHour (integer) --
Specifies the time, in UTC format, when the service takes a daily
automated snapshot of the specified Elasticsearch domain. Default value
is 0 hours.
AdvancedOptions
Option to allow references to indices in an HTTP request body. Must be
false when configuring access to individual sub-resources. By default,
the value is true .
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
ElasticsearchVersion
String of format X.Y to specify version for the Elasticsearch domain eg.
"1.5" or "2.3".
'''
ret = {'name': DomainName,
'result': True,
'comment': '',
'changes': {}
}
if ElasticsearchClusterConfig is None:
ElasticsearchClusterConfig = {
'DedicatedMasterEnabled': False,
'InstanceCount': 1,
'InstanceType': 'm3.medium.elasticsearch',
'ZoneAwarenessEnabled': False
}
if EBSOptions is None:
EBSOptions = {
'EBSEnabled': False,
}
if SnapshotOptions is None:
SnapshotOptions = {
'AutomatedSnapshotStartHour': 0
}
if AdvancedOptions is None:
AdvancedOptions = {
'rest.action.multi.allow_explicit_index': 'true'
}
if Tags is None:
Tags = {}
if AccessPolicies is not None and isinstance(AccessPolicies, six.string_types):
try:
AccessPolicies = salt.utils.json.loads(AccessPolicies)
except ValueError as e:
ret['result'] = False
ret['comment'] = 'Failed to create domain: {0}.'.format(e.message)
return ret
r = __salt__['boto_elasticsearch_domain.exists'](DomainName=DomainName,
region=region, key=key, keyid=keyid, profile=profile)
if 'error' in r:
ret['result'] = False
ret['comment'] = 'Failed to create domain: {0}.'.format(r['error']['message'])
return ret
if not r.get('exists'):
if __opts__['test']:
ret['comment'] = 'Domain {0} is set to be created.'.format(DomainName)
ret['result'] = None
return ret
r = __salt__['boto_elasticsearch_domain.create'](DomainName=DomainName,
ElasticsearchClusterConfig=ElasticsearchClusterConfig,
EBSOptions=EBSOptions,
AccessPolicies=AccessPolicies,
SnapshotOptions=SnapshotOptions,
AdvancedOptions=AdvancedOptions,
ElasticsearchVersion=str(ElasticsearchVersion), # future lint: disable=blacklisted-function
region=region, key=key,
keyid=keyid, profile=profile)
if not r.get('created'):
ret['result'] = False
ret['comment'] = 'Failed to create domain: {0}.'.format(r['error']['message'])
return ret
_describe = __salt__['boto_elasticsearch_domain.describe'](DomainName,
region=region, key=key, keyid=keyid, profile=profile)
ret['changes']['old'] = {'domain': None}
ret['changes']['new'] = _describe
ret['comment'] = 'Domain {0} created.'.format(DomainName)
return ret
ret['comment'] = os.linesep.join([ret['comment'], 'Domain {0} is present.'.format(DomainName)])
ret['changes'] = {}
# domain exists, ensure config matches
_status = __salt__['boto_elasticsearch_domain.status'](DomainName=DomainName,
region=region, key=key, keyid=keyid,
profile=profile)['domain']
if _status.get('ElasticsearchVersion') != str(ElasticsearchVersion): # future lint: disable=blacklisted-function
ret['result'] = False
ret['comment'] = (
'Failed to update domain: version cannot be modified '
'from {0} to {1}.'.format(
_status.get('ElasticsearchVersion'),
str(ElasticsearchVersion) # future lint: disable=blacklisted-function
)
)
return ret
_describe = __salt__['boto_elasticsearch_domain.describe'](DomainName=DomainName,
region=region, key=key, keyid=keyid,
profile=profile)['domain']
_describe['AccessPolicies'] = salt.utils.json.loads(_describe['AccessPolicies'])
# When EBSEnabled is false, describe returns extra values that can't be set
if not _describe.get('EBSOptions', {}).get('EBSEnabled'):
opts = _describe.get('EBSOptions', {})
opts.pop('VolumeSize', None)
opts.pop('VolumeType', None)
comm_args = {}
need_update = False
es_opts = {'ElasticsearchClusterConfig': ElasticsearchClusterConfig,
'EBSOptions': EBSOptions,
'AccessPolicies': AccessPolicies,
'SnapshotOptions': SnapshotOptions,
'AdvancedOptions': AdvancedOptions}
for k, v in six.iteritems(es_opts):
if not _compare_json(v, _describe[k]):
need_update = True
comm_args[k] = v
ret['changes'].setdefault('new', {})[k] = v
ret['changes'].setdefault('old', {})[k] = _describe[k]
if need_update:
if __opts__['test']:
msg = 'Domain {0} set to be modified.'.format(DomainName)
ret['comment'] = msg
ret['result'] = None
return ret
ret['comment'] = os.linesep.join([ret['comment'], 'Domain to be modified'])
r = __salt__['boto_elasticsearch_domain.update'](DomainName=DomainName,
region=region, key=key,
keyid=keyid, profile=profile,
**comm_args)
if not r.get('updated'):
ret['result'] = False
ret['comment'] = 'Failed to update domain: {0}.'.format(r['error'])
ret['changes'] = {}
return ret
return ret
def absent(name, DomainName,
region=None, key=None, keyid=None, profile=None):
'''
Ensure domain with passed properties is absent.
name
The name of the state definition.
DomainName
Name of the domain.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
'''
ret = {'name': DomainName,
'result': True,
'comment': '',
'changes': {}
}
r = __salt__['boto_elasticsearch_domain.exists'](DomainName,
region=region, key=key, keyid=keyid, profile=profile)
if 'error' in r:
ret['result'] = False
ret['comment'] = 'Failed to delete domain: {0}.'.format(r['error']['message'])
return ret
if r and not r['exists']:
ret['comment'] = 'Domain {0} does not exist.'.format(DomainName)
return ret
if __opts__['test']:
ret['comment'] = 'Domain {0} is set to be removed.'.format(DomainName)
ret['result'] = None
return ret
r = __salt__['boto_elasticsearch_domain.delete'](DomainName,
region=region, key=key,
keyid=keyid, profile=profile)
if not r['deleted']:
ret['result'] = False
ret['comment'] = 'Failed to delete domain: {0}.'.format(r['error']['message'])
return ret
ret['changes']['old'] = {'domain': DomainName}
ret['changes']['new'] = {'domain': None}
ret['comment'] = 'Domain {0} deleted.'.format(DomainName)
return ret
|
py | 1a368fe21ab2a137b4e42a3730a08f1264b9df8a | # -*- coding: utf-8 -*-
"""Mathematical sequences.
We provide a compact syntax to create lazy constant, arithmetic, geometric and
power. Numeric (int, float, mpmath) and symbolic (SymPy) formats are supported.
We avoid accumulating roundoff error when used with floating-point.
We also provide arithmetic operation support for iterables (termwise).
The function versions of the arithmetic operations have an **s** prefix (short
for mathematical **sequence**), because in Python the **i** prefix (which could
stand for *iterable*) is already used to denote the in-place operators.
We provide the Cauchy product, and its generalization, the diagonal
combination-reduction, for two (possibly infinite) iterables.
Finally, we provide ready-made generators that yield some common sequences
(currently, the Fibonacci numbers and the prime numbers).
"""
__all__ = ["s", "m", "mg", "almosteq",
"sadd", "ssub", "sabs", "spos", "sneg", "sinvert", "smul", "spow",
"struediv", "sfloordiv", "smod", "sdivmod",
"sround", "strunc", "sfloor", "sceil",
"slshift", "srshift", "sand", "sxor", "sor",
"cauchyprod", "diagonal_reduce",
"fibonacci", "primes"]
from itertools import repeat, takewhile, count
from functools import wraps
from operator import add as primitive_add, mul as primitive_mul, \
pow as primitive_pow, mod as primitive_mod, \
floordiv as primitive_floordiv, truediv as primitive_truediv, \
sub as primitive_sub, \
neg as primitive_neg, pos as primitive_pos, \
and_ as primitive_and, xor as primitive_xor, or_ as primitive_or, \
lshift as primitive_lshift, rshift as primitive_rshift, \
invert as primitive_invert
from .it import take, rev
from .gmemo import imemoize, gmemoize
class _NoSuchType:
pass
# stuff to support float, mpf and SymPy expressions transparently
#
from sys import float_info
from math import log as math_log, copysign, trunc, floor, ceil
try:
from mpmath import mpf, almosteq as mpf_almosteq
except ImportError:
mpf = type(_NoSuchType())
mpf_almosteq = None
def _numsign(x):
if x == 0:
return 0
return int(copysign(1.0, x))
try:
from sympy import log as _symlog, Expr as _symExpr, sign as _symsign
def log(x, b):
if isinstance(x, _symExpr):
# https://stackoverflow.com/questions/46129259/how-to-simplify-logarithm-of-exponent-in-sympy
return _symlog(x, b).expand(force=True)
return math_log(x, b)
def sign(x):
if isinstance(x, _symExpr):
return _symsign(x)
return _numsign(x)
except ImportError:
log = math_log
sign = _numsign
_symExpr = type(_NoSuchType())
def almosteq(a, b, tol=1e-8):
"""Almost-equality that supports several formats.
The tolerance ``tol`` is used for the builtin ``float`` and ``mpmath.mpf``.
For ``mpmath.mpf``, we just delegate to ``mpmath.almosteq``, with the given
``tol``. For ``float``, we use the strategy suggested in:
https://floating-point-gui.de/errors/comparison/
Anything else, for example SymPy expressions, strings, and containers
(regardless of content), is tested for exact equality.
**CAUTION**: Although placed in ``unpythonic.mathseq``, this function
**does not** support iterables; rather, it is a low-level tool that is
exposed in the public API in the hope it may be useful elsewhere.
"""
if a == b: # infinities and such, plus any non-float type
return True
if isinstance(a, mpf) and isinstance(b, mpf):
return mpf_almosteq(a, b, tol)
# compare as native float if only one is an mpf
elif isinstance(a, mpf) and isinstance(b, float):
a = float(a)
elif isinstance(a, float) and isinstance(b, mpf):
b = float(b)
if not all(isinstance(x, float) for x in (a, b)):
return False # non-float type, already determined that a != b
min_normal = float_info.min
max_float = float_info.max
d = abs(a - b)
if a == 0 or b == 0 or d < min_normal:
return d < tol * min_normal
return d / min(abs(a) + abs(b), max_float) < tol
def s(*spec):
"""Create a lazy mathematical sequence.
The sequence is returned as a generator object that supports infix math
(see ``m``).
**Formats**
Below, any ellipsis ``...`` inside an ``s()`` is meant literally.
The sequence specification may have an optional final element, which must
belong to the sequence being described. If a final element is specified,
a finite sequence is returned, terminating after the given final element.
*Convenience fallback*:
As a fallback, we accept an explicit enumeration of all elements of the
desired sequence. This returns a genexpr that reads from a tuple. Syntax::
s(1, 2, 3, 4, 5)
This mainly exists so that the ``...``, if any, can be quickly dropped
when testing/debugging the user program.
*Constant sequence*: ``[a0, identity] -> a0, a0, a0, ...``
Syntax::
s(1, ...)
Constant sequences **do not** support the optional-final-element termination
syntax, because the number of terms cannot be computed from the value of the
final element.
*Arithmetic sequence*: ``[a0, +d] -> a0, a0 + d, a0 + 2 d, ...``
Two terms required, more allowed if consistent. Syntax::
s(1, 2, ...)
s(1, 2, 3, ...)
s(1, 2, 3, ..., 10)
*Geometric sequence*: ``[a0, *r] -> a0, a0*r, a0*r**2, ...``
Three terms required, more allowed if consistent. Syntax::
s(1, 2, 4, ...)
s(1, -2, 4, ...) # alternating geometric sequence
s(1, 2, 4, ..., 512)
s(1, -2, 4, ..., -512)
s(1, 1/2, 1/4, ...)
s(1, -1/2, 1/4, ...)
s(1, 1/2, 1/4, ..., 1/512)
s(1, -1/2, 1/4, ..., -1/512)
Specified as ``s(a0, a1, a2, ...)``, it must hold that ``a0, a1, a2 != 0``.
The sequence ``a0, a0**2, a0**3, ...`` is just a special case of a geometric
sequence, with ``r = a0``, so e.g. ``s(3, 9, 27, ...)`` works as expected.
*Power sequence*: ``[a0, **p] -> a0, a0**p, a0**(p**2), ...``
Three terms required, more allowed if consistent. Syntax::
s(2, 4, 16, ...)
s(2, 2**2, 2**4, ...) # equivalent
s(2, 2**(1/2), 2**(1/4), ...)
Specified as ``s(a0, a1, a2, ...)``, it must hold that ``|a0| != 1`` and
``a1, a2 != 0``.
If ``spec`` matches none of the above, ``SyntaxError`` is raised at runtime.
**Symbolic input**
We support symbolic (SymPy) input for any of the formats::
from sympy import symbols
x0 = symbols("x0", real=True)
k = symbols("x0", positive=True)
s(x0, ...)
s(x0, x0 + k, ...)
s(x0, x0 + k, ..., x0 + 5*k)
s(x0, x0*k, x0*k**2, ...)
s(x0, -x0*k, x0*k**2, ...)
s(x0, x0*k, x0*k**2, ..., x0*k**5)
s(x0, -x0*k, x0*k**2, ..., -x0*k**5)
x0, k = symbols("x0, k", positive=True)
s(x0, x0**k, x0**(k**2), ...)
s(x0, x0**k, x0**(k**2), ..., x0**(k**5))
For a symbolic geometric sequence with a final term, it is important that
SymPy can determine the correct sign; hence in this example we have declared
``k`` as positive.
**Composition**
We support only these four basic kinds of sequences, because many more
can be built using them as building blocks. For example::
1, 4, 9, 16, ...: s(1, 2, ...)**2
1, 1/2, 1/3, 1/4, ...: 1 / s(1, 2, ...)
Sequences returned by ``s()`` support infix math syntax, so the above
expressions with ``s()`` are valid Python code.
A symbolic example::
x = symbols("x", real=True) # SymPy
px = lambda stream: stream * s(1, x, x**2, ...) # powers of x
s1 = px(s(1, 3, 5, ...)) # 1, 3*x, 5*x**2, ...
s2 = px(s(2, 4, 6, ...)) # 2, 4*x, 6*x**2, ...
**Notes**
Symbolic input will create a generator that yields SymPy expressions.
For floating-point input, the created generators avoid accumulating roundoff
error (unlike e.g. ``itertools.count``). Even for a long but finite arithmetic
sequence where the start value and the diff are not exactly representable
by base-2 floats, the final value should be within 1 ULP of the true value.
This is because once the input has been analyzed, the terms are generated
from the closed-form formula for the nth term of the sequence that was
described by the input; nothing is actually accumulated.
Note this reverse-engineers the given numbers to figure out which case the
input corresponds to. Although we take some care to avoid roundoff errors
in this analysis when used with floating-point input, it may sometimes occur
that roundoff prevents correct detection of the sequence (especially for
power sequences, since their detection requires taking logarithms).
Inspired by Haskell's sequence notation.
"""
origspec = spec # for error messages
def is_almost_int(x):
try:
return almosteq(float(round(x)), x)
except TypeError: # likely a SymPy expression that didn't simplify to a number
return False
def analyze(*spec): # raw spec (part before '...' if any) --> description
l = len(spec)
if l == 1:
a0 = spec[0]
return ("const", a0, None)
elif l == 2:
a0, a1 = spec
d1 = a1 - a0
if d1 == 0:
return ("const", a0, None)
return ("arith", a0, d1)
elif l == 3:
a0, a1, a2 = spec
d1 = a1 - a0
d2 = a2 - a1
if d2 == d1 == 0: # a0, a0, a0, ... [a0, identity]
return ("const", a0, None)
if almosteq(d2, d1): # a0, a0 + d, a0 + 2 d, ... [a0, +d]
d = (d1 + d2)/2 # average to give roundoff errors a chance to cancel
return ("arith", a0, d)
if a0 != 0 and a1 != 0 and a2 != 0:
r1 = a1/a0
r2 = a2/a1
if almosteq(r2, r1): # a0, a0*r, a0*r**2, ... [a0, *r]
r = (r1 + r2)/2
return ("geom", a0, r)
if abs(a0) != 1 and a1 != 0 and a2 != 0:
p1 = log(abs(a1), abs(a0))
p2 = log(abs(a2), abs(a1))
if almosteq(p1, p2): # a0, a0**p, (a0**p)**p, ... [a0, **p]
p = (p1 + p2)/2
return ("power", a0, p)
raise SyntaxError("Specification did not match any supported formula: '{}'".format(origspec))
else: # more elements are optional but must be consistent
data = [analyze(*triplet) for triplet in zip(spec, spec[1:], spec[2:])]
seqtypes, x0s, ks = zip(*data)
def isconst(*xs):
first, *rest = xs
return all(almosteq(x, first) for x in rest)
if not isconst(seqtypes) or not isconst(ks):
raise SyntaxError("Inconsistent specification '{}'".format(origspec))
return data[0]
# final term handler for finite sequences - compute how many terms we should generate in total
infty = float("inf")
def nofterms(desc, elt): # return total number of terms in sequence or False
seqtype, x0, k = desc
if seqtype == "const":
if elt == x0:
return infty # cannot determine how many items in a '...''d constant sequence
elif seqtype == "arith":
# elt = x0 + a*k --> a = (elt - x0) / k
a = (elt - x0) / k
if is_almost_int(a) and a > 0:
return 1 + round(a) # fencepost
elif seqtype == "geom":
# elt = x0*(k**a) --> k**a = (elt/x0) --> a = logk(elt/x0)
a = log(abs(elt/x0), abs(k))
if is_almost_int(a) and a > 0:
if not almosteq(x0*(k**a), elt): # check parity of final term, could be an alternating sequence
return False
return 1 + round(a)
else: # seqtype == "power":
# elt = x0**(k**a) --> k**a = logx0 elt --> a = logk (logx0 elt)
a = log(log(abs(elt), abs(x0)), abs(k))
if is_almost_int(a) and a > 0:
if not almosteq(x0**(k**a), elt): # parity
return False
return 1 + round(a)
return False
# analyze the specification
if Ellipsis not in spec: # convenience fallback
return (x for x in spec)
else:
*spec, last = spec
if last is Ellipsis:
seqtype, x0, k = analyze(*spec)
n = infty
else:
*spec, dots = spec
if dots is not Ellipsis:
raise SyntaxError("Expected s(a0, a1, ...) or s(a0, a1, ..., an); got '{}'".format(origspec))
desc = analyze(*spec)
n = nofterms(desc, last)
if n is False:
raise SyntaxError("The final element, if present, must belong to the specified sequence; got '{}'".format(origspec))
elif n is infty:
raise SyntaxError("The length of a constant sequence cannot be determined from a final element; got '{}'".format(origspec))
seqtype, x0, k = desc
if not spec:
raise SyntaxError("Expected at least one term before the '...'; got '{}'".format(origspec))
# generate the sequence
if seqtype == "const":
return m(repeat(x0) if n is infty else repeat(x0, n))
elif seqtype == "arith":
# itertools.count doesn't avoid accumulating roundoff error for floats, so we implement our own.
# This should be, for any j, within 1 ULP of the true result.
def arith():
j = 0
while True:
yield x0 + j*k
j += 1
return m(arith() if n is infty else take(n, arith()))
elif seqtype == "geom":
if isinstance(k, _symExpr) or abs(k) >= 1:
def geom():
j = 0
while True:
yield x0*(k**j)
j += 1
else:
# e.g. "3" can be represented exactly as a base-2 float, but "1/3" can't,
# so it's better to do the arithmetic with the inverse and then use division.
#
# Note that 1/(1/3) --> 3.0 even for floats, so we don't actually
# need to modify the detection algorithm to account for this.
kinv = 1/k
def geom():
j = 0
while True:
yield x0/(kinv**j)
j += 1
return m(geom() if n is infty else take(n, geom()))
else: # seqtype == "power":
if isinstance(k, _symExpr) or abs(k) >= 1:
def power():
j = 0
while True:
yield x0**(k**j)
j += 1
else:
kinv = 1/k
def power():
j = 0
while True:
yield x0**(1/(kinv**j))
j += 1
return m(power() if n is infty else take(n, power()))
# -----------------------------------------------------------------------------
# https://docs.python.org/3/reference/datamodel.html#emulating-numeric-types
class m:
"""Endow any iterable with infix math support (termwise).
The original iterable is saved to an attribute, and ``m.__iter__`` redirects
to it. No caching is performed, so performing a math operation on the m'd
iterable will still consume the iterable (if it is consumable, for example
a generator).
This adds infix math only; to apply a function (e.g. ``sin``) termwise to
an iterable, use the comprehension syntax or ``map``, as usual.
The mathematical sequences (Python-technically, iterables) returned by
``s()`` are automatically m'd, as is the result of any infix arithmetic
operation performed on an already m'd iterable.
**CAUTION**: When an operation meant for general iterables is applied to an
m'd iterable, the math support vanishes (because the operation returns a
general iterable, not an m'd one), but can be restored by m'ing again.
**NOTE**: The function versions of the operations (``sadd`` etc.) work on
general iterables (so you don't need to ``m`` their inputs), and return
an m'd iterable. The ``m`` operation is only needed for infix math, to make
arithmetic-heavy code more readable.
Examples::
a = s(1, 3, ...)
b = s(2, 4, ...)
c = a + b
assert isinstance(c, m) # the result still has math support
assert tuple(take(5, c)) == (3, 7, 11, 15, 19) # + was applied termwise
d = 1 / (a**2 + b**2)
assert isinstance(d, m)
e = take(5, c) # general iterable operation drops math support...
assert not isinstance(e, m)
f = m(take(5, c)) # ...and it can be restored by m'ing again.
assert isinstance(f, m)
"""
def __init__(self, iterable):
self._g = iterable
def __iter__(self):
return self._g
def __add__(self, other):
return sadd(self, other)
def __radd__(self, other):
return sadd(other, self)
def __sub__(self, other):
return ssub(self, other)
def __rsub__(self, other):
return ssub(other, self)
def __abs__(self):
return sabs(self)
def __pos__(self):
return self
def __neg__(self):
return sneg(self)
def __invert__(self):
return sinvert(self)
def __mul__(self, other):
return smul(self, other)
def __rmul__(self, other):
return smul(other, self)
def __truediv__(self, other):
return struediv(self, other)
def __rtruediv__(self, other):
return struediv(other, self)
def __floordiv__(self, other):
return sfloordiv(self, other)
def __rfloordiv__(self, other):
return sfloordiv(other, self)
def __divmod__(self, other):
return sdivmod(self, other)
def __rdivmod__(self, other):
return sdivmod(other, self)
def __mod__(self, other):
return smod(self, other)
def __rmod__(self, other):
return smod(other, self)
def __pow__(self, other, *mod):
return spow(self, other, *mod)
def __rpow__(self, other):
return spow(other, self)
def __round__(self, *ndigits):
return sround(self, *ndigits)
def __trunc__(self):
return strunc(self)
def __floor__(self):
return sfloor(self)
def __ceil__(self):
return sceil(self)
def __lshift__(self, other):
return slshift(self, other)
def __rlshift__(self, other):
return slshift(other, self)
def __rshift__(self, other):
return srshift(self, other)
def __rrshift__(self, other):
return srshift(other, self)
def __and__(self, other):
return sand(self, other)
def __rand__(self, other):
return sand(other, self)
def __xor__(self, other):
return sxor(self, other)
def __rxor__(self, other):
return sxor(other, self)
def __or__(self, other):
return sor(self, other)
def __ror__(self, other):
return sor(other, self)
# TODO: conversion (bool, complex, int, float) and comparison operators? Do we want those?
def mg(gfunc):
"""Decorator: make gfunc m() the returned generator instances.
Return a new gfunc, which passes all its arguments to the original ``gfunc``.
Example::
a = mg(imemoize(s(1, 2, ...)))
assert last(take(5, a())) == 5
assert last(take(5, a())) == 5
assert last(take(5, a() + a())) == 10
"""
@wraps(gfunc)
def mathify(*args, **kwargs):
return m(gfunc(*args, **kwargs))
return mathify
# The *settings mechanism is used by round and pow.
# These are recursive to support iterables containing iterables (e.g. an iterable of math sequences).
def _make_termwise_stream_unop(op, *settings):
def sop(a):
if hasattr(a, "__iter__"):
return m(sop(x, *settings) for x in a)
return op(a, *settings)
return sop
def _make_termwise_stream_binop(op, *settings):
def sop(a, b):
ig = [hasattr(x, "__iter__") for x in (a, b)]
if all(ig):
# it's very convenient here that zip() terminates when the shorter input runs out.
return m(sop(x, y, *settings) for x, y in zip(a, b))
elif ig[0]:
c = b
return m(sop(x, c, *settings) for x in a)
elif ig[1]:
c = a
return m(sop(c, y, *settings) for y in b) # careful; op might not be commutative
else: # not any(ig):
return op(a, b, *settings)
return sop
# We expose the full set of "m" operators also as functions à la the ``operator`` module.
_add = _make_termwise_stream_binop(primitive_add)
_sub = _make_termwise_stream_binop(primitive_sub)
_abs = _make_termwise_stream_unop(abs)
_pos = _make_termwise_stream_unop(primitive_pos)
_neg = _make_termwise_stream_unop(primitive_neg)
_mul = _make_termwise_stream_binop(primitive_mul)
_pow = _make_termwise_stream_binop(primitive_pow) # 2-arg form
_truediv = _make_termwise_stream_binop(primitive_truediv)
_floordiv = _make_termwise_stream_binop(primitive_floordiv)
_mod = _make_termwise_stream_binop(primitive_mod)
_divmod = _make_termwise_stream_binop(divmod)
_round = _make_termwise_stream_unop(round) # 1-arg form
_trunc = _make_termwise_stream_unop(trunc)
_floor = _make_termwise_stream_unop(floor)
_ceil = _make_termwise_stream_unop(ceil)
_lshift = _make_termwise_stream_binop(primitive_lshift)
_rshift = _make_termwise_stream_binop(primitive_rshift)
_and = _make_termwise_stream_binop(primitive_and)
_xor = _make_termwise_stream_binop(primitive_xor)
_or = _make_termwise_stream_binop(primitive_or)
_invert = _make_termwise_stream_unop(primitive_invert)
def sadd(a, b):
"""Termwise a + b when one or both are iterables."""
return _add(a, b)
def ssub(a, b):
"""Termwise a - b when one or both are iterables."""
return _sub(a, b)
def sabs(a):
"""Termwise abs(a) for an iterable."""
return _abs(a)
def spos(a):
"""Termwise +a for an iterable."""
return _pos(a)
def sneg(a):
"""Termwise -a for an iterable."""
return _neg(a)
def sinvert(a):
"""Termwise ~a for an iterable."""
return _invert(a)
def smul(a, b):
"""Termwise a * b when one or both are iterables."""
return _mul(a, b)
def spow(a, b, *mod):
"""Termwise a ** b when one or both are iterables.
An optional third argument is supported, and passed through to the
built-in ``pow`` function.
"""
op = _make_termwise_stream_binop(pow, mod[0]) if mod else _pow
return op(a, b)
def struediv(a, b):
"""Termwise a / b when one or both are iterables."""
return _truediv(a, b)
def sfloordiv(a, b):
"""Termwise a // b when one or both are iterables."""
return _floordiv(a, b)
def smod(a, b):
"""Termwise a % b when one or both are iterables."""
return _mod(a, b)
def sdivmod(a, b):
"""Termwise (a // b, a % b) when one or both are iterables."""
return _divmod(a, b)
def sround(a, *ndigits):
"""Termwise round(a) for an iterable.
An optional second argument is supported, and passed through to the
built-in ``round`` function.
"""
op = _make_termwise_stream_unop(round, ndigits[0]) if ndigits else _round
return op(a)
def strunc(a):
"""Termwise math.trunc(a) for an iterable."""
return _trunc(a)
def sfloor(a):
"""Termwise math.floor(a) for an iterable."""
return _floor(a)
def sceil(a):
"""Termwise math.ceil(a) for an iterable."""
return _ceil(a)
def slshift(a, b):
"""Termwise a << b when one or both are iterables."""
return _lshift(a, b)
def srshift(a, b):
"""Termwise a >> b when one or both are iterables."""
return _rshift(a, b)
def sand(a, b):
"""Termwise a & b when one or both are iterables."""
return _and(a, b)
def sxor(a, b):
"""Termwise a ^ b when one or both are iterables."""
return _xor(a, b)
def sor(a, b):
"""Termwise a | b when one or both are iterables."""
return _or(a, b)
# -----------------------------------------------------------------------------
def cauchyprod(a, b, *, require="any"):
"""Cauchy product of two (possibly infinite) iterables.
Defined by::
c[k] = sum(a[j] * b[k-j], j = 0, 1, ..., k), k = 0, 1, ...
As a table::
j
0 1 2 3 ...
+-----------
i 0 | 0 1 2 3
1 | 1 2 3
2 | 2 3 .
3 | 3 .
... | .
The element ``c[k]`` of the product is formed by summing all such
``a[i]*b[j]`` for which the table entry at ``(i, j)`` is ``k``.
For more details (esp. the option ``require``, used for finite inputs),
see the docstring of ``diagonal_reduce``, which is the general case of
this diagonal construction, when we allow custom operations to take the
roles of ``*`` and ``sum``.
"""
return diagonal_reduce(a, b, require=require, combine=smul, reduce=sum)
def diagonal_reduce(a, b, *, combine, reduce, require="any"):
"""Diagonal combination-reduction for two (possibly infinite) iterables.
Defined by::
c[k] = reduce(combine(a[j], b[k-j]), j = 0, 1, ..., k), k = 0, 1, ...
As a table::
j
0 1 2 3 ...
+-----------
i 0 | 0 1 2 3
1 | 1 2 3
2 | 2 3 .
3 | 3 .
... | .
The element ``c[k]`` is formed by reducing over all combinations of
``a[i], b[j]`` for which the table entry at ``(i, j)`` is ``k``.
The Cauchy product is the special case with ``combine=smul, reduce=sum``.
The output is automatically m'd so that it supports infix arithmetic.
The operations:
- ``combine = combine(a, b)`` is a binary operation that accepts
two iterables, and combines them termwise into a new iterable.
Roughly speaking, it gets the slices ``a[:(k+1)]`` and ``b[k::-1]``
as its input iterables. (Roughly speaking, because of caching and
finite input handling.) The inputs are guaranteed to have the same
length.
- ``reduce = reduce(a)`` is a unary operation that accepts one iterable,
and produces a scalar. The reduction is only invoked if there is
at least one term to process.
The computations for ``a[i]`` and ``b[j]`` are triggered only once (ever)
for each value of ``i`` or ``j``. The values then enter a local cache.
The computational cost for the term ``c[k]`` is ``O(k)``, because although
``a[i]`` and ``b[j]`` are cached, the reduction itself consists of ``k + 1``
terms that are all formed with new combinations of ``i`` and ``j``. This means
the total cost of computing the ``n`` first terms of ``c`` is ``O(n**2)``.
**CAUTION**: The caching works by applying ``imemoize`` to both inputs;
the usual caveats apply.
**Finite inputs**
**When** ``require="any"``, we run with increasing ``k`` as long **any**
combination appearing inside the above reduction can be formed. When ``k``
has reached a value for which no combinations can be formed, the generator
raises ``StopIteration``.
In terms of the above table, the table is cut by vertical and horizontal lines
just after the maximum possible ``i`` and ``j``, and only the terms in the
upper left quadrant contribute to the reduction (since these are the only
terms that can be formed).
For example, if both ``a`` and ``b`` have length 2, and we are computing the
Cauchy product, then the iterable ``c`` will consist of *three* terms:
``c[0] = a[0]*b[0]``, ``c[1] = a[0]*b[1] + a[1]*b[0]``, and
``c[2] = a[1]*b[1]``.
**When** ``require="all"``, we run with increasing ``k`` until either end
of the diagonal falls of the end of the shorter input. (In the case of inputs
of equal length, both ends fall off simultaneously.) In other words, ``c[k]``
is formed only if **all** combinations that would contribute to it (in the
infinite case) can be formed.
In terms of the above table, the diagonal marked with the value ``k`` is
considered, and ``c[k]`` is formed only if all its combinations of
``a[i], b[j]`` can be formed from the given finite inputs.
For example, if both ``a`` and ``b`` have length 2, and we are computing the
Cauchy product, then the iterable ``c`` will consist of *two* terms:
``c[0] = a[0]*b[0]``, and ``c[1] = a[0]*b[1] + a[1]*b[0]``. The term ``c[2]``
is not formed, because the terms ``a[0]*b[2]`` and ``a[2]*b[0]`` (that would
contribute to it in the infinite case) cannot be formed from length-2 inputs.
"""
if not all(hasattr(x, "__iter__") for x in (a, b)):
raise TypeError("Expected two iterables, got '{}', '{}'".format(type(a), type(b)))
if require not in ("all", "any"):
raise ValueError("require must be 'all' or 'any'; got '{}'".format(require))
ga = imemoize(a)
gb = imemoize(b)
def diagonal():
n = 1 # how many terms to take from a and b; output index k = n - 1
while True:
xs, ys = (tuple(take(n, g())) for g in (ga, gb))
lx, ly = len(xs), len(ys)
if require == "all" and (lx < n or ly < n):
break
if (lx == ly and lx < n) or lx < ly or ly < lx:
xs = xs[(n - ly):]
ys = ys[(n - lx):]
assert len(xs) == len(ys) # TODO: maybe take this out later?
if not xs:
break
yield reduce(combine(xs, rev(ys)))
n += 1
return m(diagonal())
# -----------------------------------------------------------------------------
def fibonacci():
"""Return the Fibonacci numbers 1, 1, 2, 3, 5, 8, ... as a lazy sequence."""
def fibos():
a, b = 1, 1
while True:
yield a
a, b = b, a + b
return m(fibos())
# See test_gmemo.py for history. This is an FP-ized sieve of Eratosthenes.
#
# This version wins in speed for moderate n (1e5) on typical architectures where
# the memory bus is a bottleneck, since the rule for generating new candidates is
# simple arithmetic. Contrast memo_primes3, which needs to keep a table that gets
# larger as n grows (so memory transfers dominate for large n). That strategy
# seems faster for n ~ 1e3, though.
@gmemoize
def _primes():
yield 2
for n in count(start=3, step=2):
if not any(n % p == 0 for p in takewhile(lambda x: x*x <= n, _primes())):
yield n
@gmemoize
def _fastprimes():
memo = []
def primes():
memo.append(2)
yield 2
for n in count(start=3, step=2):
if not any(n % p == 0 for p in takewhile(lambda x: x*x <= n, memo)):
memo.append(n)
yield n
return primes()
def primes(optimize="speed"):
"""Return the prime numbers 2, 3, 5, 7, 11, 13, ... as a lazy sequence.
FP sieve of Eratosthenes with memoization.
``optimize`` is one of ``"memory"`` or ``"speed"``. The memory-optimized
version shares one global memo, which is re-used also in the tight inner loop,
whereas the speed-optimized one keeps exactly one more copy of the results
as an internal memo (double memory usage, but faster, as it skips the very
general ``gmemoize`` machinery in the inner loop).
"""
if optimize not in ("memory", "speed"):
raise ValueError("optimize must be 'memory' or 'speed'; got '{}'".format(optimize))
if optimize == "speed":
return m(_fastprimes())
else: # optimize == "memory":
return m(_primes())
|
py | 1a369160fe79d59e2db755d0c79add2f20181d47 | # -*- coding: utf-8 -*-
# Password generator functions
# Import stuff
import os
from random import choice
from base64 import b64encode
# Complex Charset including special char
charset1 = [
'abcdefghijklmnopqrstuvwxyz',
'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'0123456789',
'^!\$%&/()=?{[]}+~#-_.:,;<>|\\',
]
# Simple Charset omitting potentially misleading chars
charset2 = [
'abcdefghijkmnpqrstuvwxyz',
'ABCDEFGHJKLMNPQRSTUVWXYZ',
'123456789',
'!$%&()=?[]#<>+-_',
]
def mkPassword(lenPassword=22):
pwd = []
charset = choice(charset1)
while len(pwd) < lenPassword:
pwd.append(choice(charset))
charset = choice(list(set(charset1) - set([charset])))
return ''.join(pwd)
def mkKey(lenKey=64):
key = os.urandom(lenKey)
token = b64encode(key).decode('utf-8')
return token
# Uncomment to test the fuctions in file
# if __name__ == '__main__':
# print(mkPassword()) |
py | 1a36927018b55b2c26e8884cd614b3c08848a3c4 | import os
from datetime import datetime
import pandas as pd
import src.config.constants as constants
import src.munging as process_data
import src.common as common
if __name__ == "__main__":
RUN_ID = datetime.now().strftime("%m%d_%H%M")
MODEL_NAME = os.path.basename(__file__).split(".")[0]
logger = common.get_logger("blend")
train_df, test_df, sample_submission_df = process_data.read_processed_data(
logger,
constants.PROCESSED_DATA_DIR,
train=True,
test=True,
sample_submission=True,
)
# File with public score 7.84988
# https://www.kaggle.com/pavfedotov/blending-tool-tps-aug-2021
df_sub_ext = pd.read_csv(f"{constants.PUB_SUBMISSION_DIR}/0.part")
# https://www.kaggle.com/vaby667/84996to-improve-your-ranking
# PL : 0.84996
df_2 = pd.read_csv(
f"{constants.PUB_SUBMISSION_DIR}/file1_7.84996_file2_7.84996_blend.csv"
)
# LGB Benchamrk with StratifiedKFold (10) with frequency encoding params from Kaggle, seed 20 (0.9, 0.1)
df_lgb_log_loss_top_10 = pd.read_csv(
f"{constants.SUBMISSION_DIR}/sub_lgb_SKF_freq_params_f_kaggle_0817_1247_7.84284.csv"
)
# Giving more importnace to external submission
sample_submission_df.loss = (
0.5 * df_sub_ext.loss + 0.49 * df_2.loss + 0.01 * df_lgb_log_loss_top_10.loss
).values
file_name = f"sub_{MODEL_NAME}_{RUN_ID}.csv"
logger.info(f"Saving to submission file {constants.SUBMISSION_DIR}/{file_name}")
sample_submission_df.to_csv(f"{constants.SUBMISSION_DIR}/{file_name}")
logger.info(pd.read_csv(f"{constants.SUBMISSION_DIR}/{file_name}"))
|
py | 1a36927b7b62bf745d8d13b55bb31a1471aff514 | from invoke import task
from subprocess import run, PIPE
import os
from tasks.shared import is_local
@task
def add(ctx):
"""deploy locally kubernetes dashboard"""
if is_local():
ctx.run("kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/master/aio/deploy/recommended.yaml")
ctx.run("kubectl apply -f dashboard/dashboard-admin-user.yaml")
p = run("kubectl -n kubernetes-dashboard describe secret admin-user | awk '{for(i=1;i<=NF;i++) {if($i~/token:/) print $(i+1)}}'", shell=True, stdout=PIPE, encoding='ascii')
cmd = "echo \"{}\" | pbcopy".format(p.stdout)
ctx.run(cmd)
print('dashboard token copied to clipboard')
dashboard = 'kubectl proxy &'
os.system(dashboard)
ctx.run("open http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/")
@task
def token(ctx):
"""copy dashboard token to clipboard"""
if is_local():
p = run("kubectl -n kubernetes-dashboard describe secret admin-user | awk '{for(i=1;i<=NF;i++) {if($i~/token:/) print $(i+1)}}'", shell=True, stdout=PIPE, encoding='ascii')
print(p.stdout)
cmd = "echo \"{}\" | pbcopy".format(p.stdout)
ctx.run(cmd)
print('dashboard token copied to clipboard')
@task
def reset(ctx):
"""reset dashboard proxy"""
if is_local():
ctx.run('pkill kubectl')
p = run("kubectl -n kubernetes-dashboard describe secret admin-user | awk '{for(i=1;i<=NF;i++) {if($i~/token:/) print $(i+1)}}'", shell=True, stdout=PIPE, encoding='ascii')
cmd = "echo \"{}\" | pbcopy".format(p.stdout)
ctx.run(cmd)
print('dashboard token copied to clipboard')
dashboard = 'kubectl proxy &'
os.system(dashboard)
ctx.run("open http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/")
@task
def rm(ctx):
"""delete dashboard"""
if is_local():
ctx.run('pkill kubectl')
ctx.run("kubectl delete ns kubernetes-dashboard --grace-period=0 --force")
|
py | 1a3692c96bafaffe9d22914b6edd4f607100b969 | import ast
from typing import Any, List
from vyper.parser.context import Context
from vyper.parser.expr import Expr
from vyper.parser.function_definitions.utils import (
get_default_names_to_set,
get_nonreentrant_lock,
get_sig_statements,
make_unpacker,
)
from vyper.parser.lll_node import LLLnode
from vyper.parser.parser_utils import getpos, make_setter
from vyper.parser.stmt import parse_body
from vyper.signatures import FunctionSignature, sig_utils
from vyper.signatures.function_signature import VariableRecord
from vyper.types.types import BaseType, ByteArrayLike, get_size_of_type
from vyper.utils import MemoryPositions
def get_internal_arg_copier(total_size: int, memory_dest: int) -> List[Any]:
"""
Copy arguments.
For internal functions, MSTORE arguments and callback pointer from the stack.
:param total_size: total size to copy
:param memory_dest: base memory position to copy to
:return: LLL list that copies total_size of memory
"""
copier: List[Any] = ["seq"]
for pos in range(0, total_size, 32):
copier.append(["mstore", memory_dest + pos, "pass"])
return copier
def parse_internal_function(
code: ast.FunctionDef, sig: FunctionSignature, context: Context
) -> LLLnode:
"""
Parse a internal function (FuncDef), and produce full function body.
:param sig: the FuntionSignature
:param code: ast of function
:return: full sig compare & function body
"""
# Get nonreentrant lock
nonreentrant_pre, nonreentrant_post = get_nonreentrant_lock(sig, context.global_ctx)
# Create callback_ptr, this stores a destination in the bytecode for a internal
# function to jump to after a function has executed.
clampers: List[LLLnode] = []
# Allocate variable space.
context.memory_allocator.expand_memory(sig.max_copy_size)
_post_callback_ptr = f"{sig.name}_{sig.method_id}_post_callback_ptr"
context.callback_ptr = context.new_internal_variable(typ=BaseType("uint256"))
clampers.append(
LLLnode.from_list(
["mstore", context.callback_ptr, "pass"], annotation="pop callback pointer",
)
)
if sig.total_default_args > 0:
clampers.append(LLLnode.from_list(["label", _post_callback_ptr]))
# internal functions without return types need to jump back to
# the calling function, as there is no return statement to handle the
# jump.
if sig.output_type is None:
stop_func = [["jump", ["mload", context.callback_ptr]]]
else:
stop_func = [["stop"]]
# Generate copiers
if len(sig.base_args) == 0:
copier = ["pass"]
clampers.append(LLLnode.from_list(copier))
elif sig.total_default_args == 0:
copier = get_internal_arg_copier(
total_size=sig.base_copy_size, memory_dest=MemoryPositions.RESERVED_MEMORY
)
clampers.append(LLLnode.from_list(copier))
# Fill variable positions
for arg in sig.args:
if isinstance(arg.typ, ByteArrayLike):
mem_pos = context.memory_allocator.expand_memory(32 * get_size_of_type(arg.typ))
context.vars[arg.name] = VariableRecord(arg.name, mem_pos, arg.typ, False)
else:
context.vars[arg.name] = VariableRecord(
arg.name, MemoryPositions.RESERVED_MEMORY + arg.pos, arg.typ, False,
)
# internal function copiers. No clamping for internal functions.
dyn_variable_names = [a.name for a in sig.base_args if isinstance(a.typ, ByteArrayLike)]
if dyn_variable_names:
i_placeholder = context.new_internal_variable(typ=BaseType("uint256"))
unpackers: List[Any] = []
for idx, var_name in enumerate(dyn_variable_names):
var = context.vars[var_name]
ident = f"_load_args_{sig.method_id}_dynarg{idx}"
o = make_unpacker(ident=ident, i_placeholder=i_placeholder, begin_pos=var.pos)
unpackers.append(o)
if not unpackers:
unpackers = ["pass"]
# 0 added to complete full overarching 'seq' statement, see internal_label.
unpackers.append(0)
clampers.append(
LLLnode.from_list(
["seq_unchecked"] + unpackers,
typ=None,
annotation="dynamic unpacker",
pos=getpos(code),
)
)
# Function has default arguments.
if sig.total_default_args > 0: # Function with default parameters.
default_sigs = sig_utils.generate_default_arg_sigs(code, context.sigs, context.global_ctx)
sig_chain: List[Any] = ["seq"]
for default_sig in default_sigs:
sig_compare, internal_label = get_sig_statements(default_sig, getpos(code))
# Populate unset default variables
set_defaults = []
for arg_name in get_default_names_to_set(sig, default_sig):
value = Expr(sig.default_values[arg_name], context).lll_node
var = context.vars[arg_name]
left = LLLnode.from_list(
var.pos, typ=var.typ, location="memory", pos=getpos(code), mutable=var.mutable
)
set_defaults.append(make_setter(left, value, "memory", pos=getpos(code)))
current_sig_arg_names = [x.name for x in default_sig.args]
# Load all variables in default section, if internal,
# because the stack is a linear pipe.
copier_arg_count = len(default_sig.args)
copier_arg_names = current_sig_arg_names
# Order copier_arg_names, this is very important.
copier_arg_names = [x.name for x in default_sig.args if x.name in copier_arg_names]
# Variables to be populated from calldata/stack.
default_copiers: List[Any] = []
if copier_arg_count > 0:
# Get map of variables in calldata, with thier offsets
offset = 4
calldata_offset_map = {}
for arg in default_sig.args:
calldata_offset_map[arg.name] = offset
offset += (
32 if isinstance(arg.typ, ByteArrayLike) else get_size_of_type(arg.typ) * 32
)
# Copy set default parameters from calldata
dynamics = []
for arg_name in copier_arg_names:
var = context.vars[arg_name]
if isinstance(var.typ, ByteArrayLike):
_size = 32
dynamics.append(var.pos)
else:
_size = var.size * 32
default_copiers.append(
get_internal_arg_copier(memory_dest=var.pos, total_size=_size,)
)
# Unpack byte array if necessary.
if dynamics:
i_placeholder = context.new_internal_variable(typ=BaseType("uint256"))
for idx, var_pos in enumerate(dynamics):
ident = f"unpack_default_sig_dyn_{default_sig.method_id}_arg{idx}"
default_copiers.append(
make_unpacker(
ident=ident, i_placeholder=i_placeholder, begin_pos=var_pos,
)
)
default_copiers.append(0) # for over arching seq, POP
sig_chain.append(
[
"if",
sig_compare,
[
"seq",
internal_label,
LLLnode.from_list(
["mstore", context.callback_ptr, "pass"],
annotation="pop callback pointer",
pos=getpos(code),
),
["seq"] + set_defaults if set_defaults else ["pass"],
["seq_unchecked"] + default_copiers if default_copiers else ["pass"],
["goto", _post_callback_ptr],
],
]
)
# With internal functions all variable loading occurs in the default
# function sub routine.
_clampers = [["label", _post_callback_ptr]]
# Function with default parameters.
o = LLLnode.from_list(
[
"seq",
sig_chain,
[
"if",
0, # can only be jumped into
[
"seq",
["seq"]
+ nonreentrant_pre
+ _clampers
+ [parse_body(c, context) for c in code.body]
+ nonreentrant_post
+ stop_func,
],
],
],
typ=None,
pos=getpos(code),
)
else:
# Function without default parameters.
sig_compare, internal_label = get_sig_statements(sig, getpos(code))
o = LLLnode.from_list(
[
"if",
sig_compare,
["seq"]
+ [internal_label]
+ nonreentrant_pre
+ clampers
+ [parse_body(c, context) for c in code.body]
+ nonreentrant_post
+ stop_func,
],
typ=None,
pos=getpos(code),
)
return o
return o
|
py | 1a3692d0c1734f2c3e6c5ce8f5ed65ad2eeda951 | #!/usr/bin/env python
import os
import numpy as np
from cereal import car
from common.numpy_fast import clip, interp
from common.realtime import sec_since_boot
from selfdrive.config import Conversions as CV
from selfdrive.controls.lib.drive_helpers import create_event, EventTypes as ET, get_events
from selfdrive.controls.lib.vehicle_model import VehicleModel
from selfdrive.car.honda.carstate import CarState, get_can_parser
from selfdrive.car.honda.values import CruiseButtons, CM, BP, AH
from selfdrive.controls.lib.planner import A_ACC_MAX
from common.fingerprints import HONDA as CAR
try:
from .carcontroller import CarController
except ImportError:
CarController = None
# msgs sent for steering controller by camera module on can 0.
# those messages are mutually exclusive on CRV and non-CRV cars
CAMERA_MSGS = [0xe4, 0x194]
def compute_gb_honda(accel, speed):
creep_brake = 0.0
creep_speed = 2.3
creep_brake_value = 0.15
if speed < creep_speed:
creep_brake = (creep_speed - speed) / creep_speed * creep_brake_value
return float(accel) / 4.8 - creep_brake
def get_compute_gb_acura():
# generate a function that takes in [desired_accel, current_speed] -> [-1.0, 1.0]
# where -1.0 is max brake and 1.0 is max gas
# see debug/dump_accel_from_fiber.py to see how those parameters were generated
w0 = np.array([[ 1.22056961, -0.39625418, 0.67952657],
[ 1.03691769, 0.78210306, -0.41343188]])
b0 = np.array([ 0.01536703, -0.14335321, -0.26932889])
w2 = np.array([[-0.59124422, 0.42899439, 0.38660881],
[ 0.79973811, 0.13178682, 0.08550351],
[-0.15651935, -0.44360259, 0.76910877]])
b2 = np.array([ 0.15624429, 0.02294923, -0.0341086 ])
w4 = np.array([[-0.31521443],
[-0.38626176],
[ 0.52667892]])
b4 = np.array([-0.02922216])
def compute_output(dat, w0, b0, w2, b2, w4, b4):
m0 = np.dot(dat, w0) + b0
m0 = leakyrelu(m0, 0.1)
m2 = np.dot(m0, w2) + b2
m2 = leakyrelu(m2, 0.1)
m4 = np.dot(m2, w4) + b4
return m4
def leakyrelu(x, alpha):
return np.maximum(x, alpha * x)
def _compute_gb_acura(accel, speed):
# linearly extrap below v1 using v1 and v2 data
v1 = 5.
v2 = 10.
dat = np.array([accel, speed])
if speed > 5.:
m4 = compute_output(dat, w0, b0, w2, b2, w4, b4)
else:
dat[1] = v1
m4v1 = compute_output(dat, w0, b0, w2, b2, w4, b4)
dat[1] = v2
m4v2 = compute_output(dat, w0, b0, w2, b2, w4, b4)
m4 = (speed - v1) * (m4v2 - m4v1) / (v2 - v1) + m4v1
return float(m4)
return _compute_gb_acura
class CarInterface(object):
def __init__(self, CP, sendcan=None):
self.CP = CP
self.frame = 0
self.last_enable_pressed = 0
self.last_enable_sent = 0
self.gas_pressed_prev = False
self.brake_pressed_prev = False
self.can_invalid_count = 0
self.cp = get_can_parser(CP)
# *** init the major players ***
self.CS = CarState(CP)
self.VM = VehicleModel(CP)
# sending if read only is False
if sendcan is not None:
self.sendcan = sendcan
self.CC = CarController(self.cp.dbc_name, CP.enableCamera)
if self.CS.CP.carFingerprint == CAR.ACURA_ILX:
self.compute_gb = get_compute_gb_acura()
else:
self.compute_gb = compute_gb_honda
@staticmethod
def calc_accel_override(a_ego, a_target, v_ego, v_target):
# limit the pcm accel cmd if:
# - v_ego exceeds v_target, or
# - a_ego exceeds a_target and v_ego is close to v_target
eA = a_ego - a_target
valuesA = [1.0, 0.1]
bpA = [0.3, 1.1]
eV = v_ego - v_target
valuesV = [1.0, 0.1]
bpV = [0.0, 0.5]
valuesRangeV = [1., 0.]
bpRangeV = [-1., 0.]
# only limit if v_ego is close to v_target
speedLimiter = interp(eV, bpV, valuesV)
accelLimiter = max(interp(eA, bpA, valuesA), interp(eV, bpRangeV, valuesRangeV))
# accelOverride is more or less the max throttle allowed to pcm: usually set to a constant
# unless aTargetMax is very high and then we scale with it; this help in quicker restart
return float(max(0.714, a_target / A_ACC_MAX)) * min(speedLimiter, accelLimiter)
@staticmethod
def get_params(candidate, fingerprint):
# kg of standard extra cargo to count for drive, gas, etc...
std_cargo = 136
# Ridgeline reqires scaled tire stiffness
ts_factor = 1
ret = car.CarParams.new_message()
ret.carName = "honda"
ret.carFingerprint = candidate
ret.safetyModel = car.CarParams.SafetyModels.honda
ret.enableSteer = True
ret.enableBrake = True
ret.enableCamera = not any(x for x in CAMERA_MSGS if x in fingerprint)
ret.enableGas = 0x201 in fingerprint
print "ECU Camera Simulated: ", ret.enableCamera
print "ECU Gas Interceptor: ", ret.enableGas
ret.enableCruise = not ret.enableGas
# FIXME: hardcoding honda civic 2016 touring params so they can be used to
# scale unknown params for other cars
mass_civic = 2923./2.205 + std_cargo
wheelbase_civic = 2.70
centerToFront_civic = wheelbase_civic * 0.4
centerToRear_civic = wheelbase_civic - centerToFront_civic
rotationalInertia_civic = 2500
tireStiffnessFront_civic = 85400
tireStiffnessRear_civic = 90000
ret.steerKiBP, ret.steerKpBP = [[0.], [0.]]
if candidate == CAR.CIVIC:
stop_and_go = True
ret.mass = mass_civic
ret.wheelbase = wheelbase_civic
ret.centerToFront = centerToFront_civic
ret.steerRatio = 13.0
# Civic at comma has modified steering FW, so different tuning for the Neo in that car
is_fw_modified = os.getenv("DONGLE_ID") in ['99c94dc769b5d96e']
ret.steerKpV, ret.steerKiV = [[0.4], [0.12]] if is_fw_modified else [[0.8], [0.24]]
ret.longitudinalKpBP = [0., 5., 35.]
ret.longitudinalKpV = [3.6, 2.4, 1.5]
ret.longitudinalKiBP = [0., 35.]
ret.longitudinalKiV = [0.54, 0.36]
elif candidate == CAR.ACURA_ILX:
stop_and_go = False
ret.mass = 3095./2.205 + std_cargo
ret.wheelbase = 2.67
ret.centerToFront = ret.wheelbase * 0.37
ret.steerRatio = 15.3
# Acura at comma has modified steering FW, so different tuning for the Neo in that car
is_fw_modified = os.getenv("DONGLE_ID") in ['85a6c74d4ad9c310']
ret.steerKpV, ret.steerKiV = [[0.4], [0.12]] if is_fw_modified else [[0.8], [0.24]]
ret.longitudinalKpBP = [0., 5., 35.]
ret.longitudinalKpV = [1.2, 0.8, 0.5]
ret.longitudinalKiBP = [0., 35.]
ret.longitudinalKiV = [0.18, 0.12]
elif candidate == CAR.CRV:
stop_and_go = False
ret.mass = 3572./2.205 + std_cargo
ret.wheelbase = 2.62
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 15.3
ret.steerKpV, ret.steerKiV = [[0.8], [0.24]]
ret.longitudinalKpBP = [0., 5., 35.]
ret.longitudinalKpV = [1.2, 0.8, 0.5]
ret.longitudinalKiBP = [0., 35.]
ret.longitudinalKiV = [0.18, 0.12]
elif candidate == CAR.ACURA_RDX:
stop_and_go = False
ret.mass = 3935./2.205 + std_cargo
ret.wheelbase = 2.68
ret.centerToFront = ret.wheelbase * 0.38
ret.steerRatio = 15.0
ret.steerKpV, ret.steerKiV = [[0.8], [0.24]]
ret.longitudinalKpBP = [0., 5., 35.]
ret.longitudinalKpV = [1.2, 0.8, 0.5]
ret.longitudinalKiBP = [0., 35.]
ret.longitudinalKiV = [0.18, 0.12]
elif candidate == CAR.ODYSSEY:
stop_and_go = False
ret.mass = 4354./2.205 + std_cargo
ret.wheelbase = 3.00
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 14.35
ret.steerKpV, ret.steerKiV = [[0.6], [0.18]]
ret.longitudinalKpBP = [0., 5., 35.]
ret.longitudinalKpV = [1.2, 0.8, 0.5]
ret.longitudinalKiBP = [0., 35.]
ret.longitudinalKiV = [0.18, 0.12]
elif candidate == CAR.PILOT:
stop_and_go = False
ret.mass = 4303./2.205 + std_cargo
ret.wheelbase = 2.81
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 16.0
ret.steerKpV, ret.steerKiV = [[0.38], [0.11]]
ret.longitudinalKpBP = [0., 5., 35.]
ret.longitudinalKpV = [1.2, 0.8, 0.5]
ret.longitudinalKiBP = [0., 35.]
ret.longitudinalKiV = [0.18, 0.12]
elif candidate == CAR.RIDGELINE:
stop_and_go = False
ts_factor = 1.4
ret.mass = 4515./2.205 + std_cargo
ret.wheelbase = 3.18
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 15.59
ret.steerKpV, ret.steerKiV = [[0.38], [0.11]]
ret.longitudinalKpBP = [0., 5., 35.]
ret.longitudinalKpV = [1.2, 0.8, 0.5]
ret.longitudinalKiBP = [0., 35.]
ret.longitudinalKiV = [0.18, 0.12]
else:
raise ValueError("unsupported car %s" % candidate)
ret.steerKf = 0. # TODO: investigate FF steer control for Honda
# min speed to enable ACC. if car can do stop and go, then set enabling speed
# to a negative value, so it won't matter. Otherwise, add 0.5 mph margin to not
# conflict with PCM acc
ret.minEnableSpeed = -1. if (stop_and_go or ret.enableGas) else 25.5 * CV.MPH_TO_MS
centerToRear = ret.wheelbase - ret.centerToFront
# TODO: get actual value, for now starting with reasonable value for
# civic and scaling by mass and wheelbase
ret.rotationalInertia = rotationalInertia_civic * \
ret.mass * ret.wheelbase**2 / (mass_civic * wheelbase_civic**2)
# TODO: start from empirically derived lateral slip stiffness for the civic and scale by
# mass and CG position, so all cars will have approximately similar dyn behaviors
ret.tireStiffnessFront = (tireStiffnessFront_civic * ts_factor) * \
ret.mass / mass_civic * \
(centerToRear / ret.wheelbase) / (centerToRear_civic / wheelbase_civic)
ret.tireStiffnessRear = (tireStiffnessRear_civic * ts_factor) * \
ret.mass / mass_civic * \
(ret.centerToFront / ret.wheelbase) / (centerToFront_civic / wheelbase_civic)
# no rear steering, at least on the listed cars above
ret.steerRatioRear = 0.
# no max steer limit VS speed
ret.steerMaxBP = [0.] # m/s
ret.steerMaxV = [1.] # max steer allowed
ret.gasMaxBP = [0.] # m/s
ret.gasMaxV = [0.6] if ret.enableGas else [0.] # max gas allowed
ret.brakeMaxBP = [5., 20.] # m/s
ret.brakeMaxV = [1., 0.8] # max brake allowed
ret.longPidDeadzoneBP = [0.]
ret.longPidDeadzoneV = [0.]
ret.stoppingControl = True
ret.steerLimitAlert = True
ret.startAccel = 0.5
ret.steerRateCost = 0.5
return ret
# returns a car.CarState
def update(self, c):
# ******************* do can recv *******************
canMonoTimes = []
self.cp.update(int(sec_since_boot() * 1e9), False)
self.CS.update(self.cp)
# create message
ret = car.CarState.new_message()
# speeds
ret.vEgo = self.CS.v_ego
ret.aEgo = self.CS.a_ego
ret.vEgoRaw = self.CS.v_ego_raw
ret.yawRate = self.VM.yaw_rate(self.CS.angle_steers * CV.DEG_TO_RAD, self.CS.v_ego)
ret.standstill = self.CS.standstill
ret.wheelSpeeds.fl = self.CS.v_wheel_fl
ret.wheelSpeeds.fr = self.CS.v_wheel_fr
ret.wheelSpeeds.rl = self.CS.v_wheel_rl
ret.wheelSpeeds.rr = self.CS.v_wheel_rr
# gas pedal
ret.gas = self.CS.car_gas / 256.0
if not self.CP.enableGas:
ret.gasPressed = self.CS.pedal_gas > 0
else:
ret.gasPressed = self.CS.user_gas_pressed
# brake pedal
ret.brake = self.CS.user_brake
ret.brakePressed = self.CS.brake_pressed != 0
# FIXME: read sendcan for brakelights
brakelights_threshold = 0.02 if self.CS.CP.carFingerprint == CAR.CIVIC else 0.1
ret.brakeLights = bool(self.CS.brake_switch or
c.actuators.brake > brakelights_threshold)
# steering wheel
ret.steeringAngle = self.CS.angle_steers
ret.steeringRate = self.CS.angle_steers_rate
# gear shifter lever
ret.gearShifter = self.CS.gear_shifter
ret.steeringTorque = self.CS.steer_torque_driver
ret.steeringPressed = self.CS.steer_override
# cruise state
ret.cruiseState.enabled = self.CS.pcm_acc_status != 0
ret.cruiseState.speed = self.CS.v_cruise_pcm * CV.KPH_TO_MS
ret.cruiseState.available = bool(self.CS.main_on)
ret.cruiseState.speedOffset = self.CS.cruise_speed_offset
ret.cruiseState.standstill = False
# TODO: button presses
buttonEvents = []
ret.leftBlinker = bool(self.CS.left_blinker_on)
ret.rightBlinker = bool(self.CS.right_blinker_on)
ret.doorOpen = not self.CS.door_all_closed
ret.seatbeltUnlatched = not self.CS.seatbelt
if self.CS.left_blinker_on != self.CS.prev_left_blinker_on:
be = car.CarState.ButtonEvent.new_message()
be.type = 'leftBlinker'
be.pressed = self.CS.left_blinker_on != 0
buttonEvents.append(be)
if self.CS.right_blinker_on != self.CS.prev_right_blinker_on:
be = car.CarState.ButtonEvent.new_message()
be.type = 'rightBlinker'
be.pressed = self.CS.right_blinker_on != 0
buttonEvents.append(be)
if self.CS.cruise_buttons != self.CS.prev_cruise_buttons:
be = car.CarState.ButtonEvent.new_message()
be.type = 'unknown'
if self.CS.cruise_buttons != 0:
be.pressed = True
but = self.CS.cruise_buttons
else:
be.pressed = False
but = self.CS.prev_cruise_buttons
if but == CruiseButtons.RES_ACCEL:
be.type = 'accelCruise'
elif but == CruiseButtons.DECEL_SET:
be.type = 'decelCruise'
elif but == CruiseButtons.CANCEL:
be.type = 'cancel'
elif but == CruiseButtons.MAIN:
be.type = 'altButton3'
buttonEvents.append(be)
if self.CS.cruise_setting != self.CS.prev_cruise_setting:
be = car.CarState.ButtonEvent.new_message()
be.type = 'unknown'
if self.CS.cruise_setting != 0:
be.pressed = True
but = self.CS.cruise_setting
else:
be.pressed = False
but = self.CS.prev_cruise_setting
if but == 1:
be.type = 'altButton1'
# TODO: more buttons?
buttonEvents.append(be)
ret.buttonEvents = buttonEvents
# events
# TODO: I don't like the way capnp does enums
# These strings aren't checked at compile time
events = []
if not self.CS.can_valid:
self.can_invalid_count += 1
if self.can_invalid_count >= 5:
events.append(create_event('commIssue', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE]))
else:
self.can_invalid_count = 0
if self.CS.steer_error:
events.append(create_event('steerUnavailable', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE, ET.PERMANENT]))
elif self.CS.steer_not_allowed:
events.append(create_event('steerTempUnavailable', [ET.NO_ENTRY, ET.WARNING]))
if self.CS.brake_error:
events.append(create_event('brakeUnavailable', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE, ET.PERMANENT]))
if not ret.gearShifter == 'drive':
events.append(create_event('wrongGear', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if ret.doorOpen:
events.append(create_event('doorOpen', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if ret.seatbeltUnlatched:
events.append(create_event('seatbeltNotLatched', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if self.CS.esp_disabled:
events.append(create_event('espDisabled', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if not self.CS.main_on:
events.append(create_event('wrongCarMode', [ET.NO_ENTRY, ET.USER_DISABLE]))
if ret.gearShifter == 'reverse':
events.append(create_event('reverseGear', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE]))
if self.CS.brake_hold:
events.append(create_event('brakeHold', [ET.NO_ENTRY, ET.USER_DISABLE]))
if self.CS.park_brake:
events.append(create_event('parkBrake', [ET.NO_ENTRY, ET.USER_DISABLE]))
if self.CP.enableCruise and ret.vEgo < self.CP.minEnableSpeed:
events.append(create_event('speedTooLow', [ET.NO_ENTRY]))
# disable on pedals rising edge or when brake is pressed and speed isn't zero
if (ret.gasPressed and not self.gas_pressed_prev) or \
(ret.brakePressed and (not self.brake_pressed_prev or ret.vEgo > 0.001)):
events.append(create_event('pedalPressed', [ET.NO_ENTRY, ET.USER_DISABLE]))
if ret.gasPressed:
events.append(create_event('pedalPressed', [ET.PRE_ENABLE]))
# it can happen that car cruise disables while comma system is enabled: need to
# keep braking if needed or if the speed is very low
if self.CP.enableCruise and not ret.cruiseState.enabled and c.actuators.brake <= 0.:
# non loud alert if cruise disbales below 25mph as expected (+ a little margin)
if ret.vEgo < self.CP.minEnableSpeed + 2.:
events.append(create_event('speedTooLow', [ET.IMMEDIATE_DISABLE]))
else:
events.append(create_event("cruiseDisabled", [ET.IMMEDIATE_DISABLE]))
if self.CS.CP.minEnableSpeed > 0 and ret.vEgo < 0.001:
events.append(create_event('manualRestart', [ET.WARNING]))
cur_time = sec_since_boot()
enable_pressed = False
# handle button presses
for b in ret.buttonEvents:
# do enable on both accel and decel buttons
if b.type in ["accelCruise", "decelCruise"] and not b.pressed:
print "enabled pressed at", cur_time
self.last_enable_pressed = cur_time
enable_pressed = True
# do disable on button down
if b.type == "cancel" and b.pressed:
events.append(create_event('buttonCancel', [ET.USER_DISABLE]))
if self.CP.enableCruise:
# KEEP THIS EVENT LAST! send enable event if button is pressed and there are
# NO_ENTRY events, so controlsd will display alerts. Also not send enable events
# too close in time, so a no_entry will not be followed by another one.
# TODO: button press should be the only thing that triggers enble
if ((cur_time - self.last_enable_pressed) < 0.2 and
(cur_time - self.last_enable_sent) > 0.2 and
ret.cruiseState.enabled) or \
(enable_pressed and get_events(events, [ET.NO_ENTRY])):
events.append(create_event('buttonEnable', [ET.ENABLE]))
self.last_enable_sent = cur_time
elif enable_pressed:
events.append(create_event('buttonEnable', [ET.ENABLE]))
ret.events = events
ret.canMonoTimes = canMonoTimes
# update previous brake/gas pressed
self.gas_pressed_prev = ret.gasPressed
self.brake_pressed_prev = ret.brakePressed
# cast to reader so it can't be modified
return ret.as_reader()
# pass in a car.CarControl
# to be called @ 100hz
def apply(self, c):
if c.hudControl.speedVisible:
hud_v_cruise = c.hudControl.setSpeed * CV.MS_TO_KPH
else:
hud_v_cruise = 255
hud_alert = {
"none": AH.NONE,
"fcw": AH.FCW,
"steerRequired": AH.STEER,
"brakePressed": AH.BRAKE_PRESSED,
"wrongGear": AH.GEAR_NOT_D,
"seatbeltUnbuckled": AH.SEATBELT,
"speedTooHigh": AH.SPEED_TOO_HIGH}[str(c.hudControl.visualAlert)]
snd_beep, snd_chime = {
"none": (BP.MUTE, CM.MUTE),
"beepSingle": (BP.SINGLE, CM.MUTE),
"beepTriple": (BP.TRIPLE, CM.MUTE),
"beepRepeated": (BP.REPEATED, CM.MUTE),
"chimeSingle": (BP.MUTE, CM.SINGLE),
"chimeDouble": (BP.MUTE, CM.DOUBLE),
"chimeRepeated": (BP.MUTE, CM.REPEATED),
"chimeContinuous": (BP.MUTE, CM.CONTINUOUS)}[str(c.hudControl.audibleAlert)]
pcm_accel = int(clip(c.cruiseControl.accelOverride,0,1)*0xc6)
self.CC.update(self.sendcan, c.enabled, self.CS, self.frame, \
c.actuators, \
c.cruiseControl.speedOverride, \
c.cruiseControl.override, \
c.cruiseControl.cancel, \
pcm_accel, \
hud_v_cruise, c.hudControl.lanesVisible, \
hud_show_car = c.hudControl.leadVisible, \
hud_alert = hud_alert, \
snd_beep = snd_beep, \
snd_chime = snd_chime)
self.frame += 1
|
py | 1a3692f6e50978d66eb00297b881ce78ebab6eb4 | #!/usr/bin/env python3
import argparse
import socketserver
import signal
import sys
import handlers
from util import eprint
def get_arguments():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--port", "-p", help="Local port to bind to", default=2525, type=int)
parser.add_argument("--host", help="Hostname to bind to", default="localhost")
return parser.parse_args()
def main():
args = get_arguments()
# Enable socket reuse for quicker testing
socketserver.TCPServer.allow_reuse_address = True
with socketserver.ThreadingTCPServer((args.host, args.port), handlers.SMTPHandler) as server:
def close_handler(signal, frame):
eprint("Shutdown requested")
server.server_close()
eprint("Shutting down")
sys.exit(0)
signal.signal(signal.SIGINT, close_handler)
server.serve_forever()
if __name__ == '__main__':
main()
|
py | 1a3693fe3a4237035fe987c5b6c9c9bbc0f15c80 | from sqlalchemy import Column, Integer, SmallInteger, String, Text, DateTime, Boolean
from sqlalchemy import TypeDecorator, ForeignKey, inspect
from proj.config import CONF
from proj.extensions import sql_db
class ModelMixin(object):
def save(self):
sql_db.session.add(self)
sql_db.session.commit()
class TimestampMixin(object):
created_at = Column(DateTime, default=now, nullable=False)
updated_at = Column(DateTime, default=now, onupdate=now, nullable=False)
class MySqlModel(sql_db.Model, ModelMixin, TimestampMixin):
__tablename__ = 'my_model'
id = Column(Integer, primary_key=True)
def to_dict(self):
return {
'id': self.id
}
|
py | 1a3694441ed1afbe6861421d43e1cbd78e8785ea | import os
from sent2vec.vectorizer import Vectorizer
from scipy import spatial
def compare_two_sentences(sentence_1, sentence_2):
sentences = [sentence_1, sentence_2]
vectorizer = Vectorizer()
vectorizer.bert(sentences)
vec_1, vec_2 = vectorizer.vectors
dist = spatial.distance.cosine(vec_1, vec_2)
return dist
def dir_creator(dir_name):
# print('checking_dir:', dir_name)
try:
tmp_dir = os.getcwd()
os.chdir(dir_name)
for _ in dir_name.split('/')[:-1]:
os.chdir('..')
os.chdir(tmp_dir)
except FileNotFoundError:
if len(dir_name.split('/')) > 1:
tot_dir = ''
for dir in dir_name.split('/'):
tot_dir += dir
try:os.mkdir(tot_dir)
except FileExistsError:pass
tot_dir += '/'
else:
os.mkdir(dir_name)
|
py | 1a3694d7a6a38d8a53c4f2fa7a832d41febba42a | # CDR3translator
# James M. Heather, February 2016, UCL
# https://innate2adaptive.github.io/Decombinator/
##################
### BACKGROUND ###
##################
# Take any decombined data and output the functional CDR3s only
# CDR3s must be: in-frame; lacking-stop codons, and run from a conserved cysteine to FGXG motif (or appropriate alternatives)
# Originally built on PlotDCR.py (itself modified from dcr v1.4), then algorithm swapped
# Now uses CDR3 detection based on Katharine's functions.py script
# Uses defined conserved cysteine residues, and allows for atypical TRAJ CDR3-ending motifs
# Makes use of functions originally developed in Katharine Best's functions.py script and Niclas Thomas' Decombinator (v1.2)
# Note that this version provides the capabilities to generate CDR3s from all genes covered in the extended Decombinator tags
# Many of these genes will never generate CDR3s from this code regardless of whether they're included in CDR3s
# This is typically due to V genes that lack the conserved C residue, or contain stop codons upstream of it.
# These have been left in, both to provide a single location that describes the whole heterogeneity of the prototypical alpha/beta genes
# Built on CDR3ulator.py v2
# New in v3:
# Have an option to output a file of non-functional rearrangements
# Provides an option to turn off statistics standard out results
# This now includes the percentages of the different reasons for being assigned non-functional
##################
###### INPUT #####
##################
# Takes any text file in comma-space (", ") delimited decombinator format
# 5-part TCR identifier must come first, followed by any additional fields, such as frequency
# Note that in addition to the same FASTA files that Decombinator makes use of, this script requires additional '.translate' files
# These contain four comma-delimited fields, which allow for the correct translation of TCR sequences from DCR indexes
# Those fields are: Gene name, conserved position (of cysteine or FGXG motif), conserved residue (when the C/FGXG differs), and IMGT-designated functionality
##################
##### OUTPUT #####
##################
# Users have options of two output, comma-delimited formats:
# '.cdr3' files, which consist of the unique productively-rearranged CDR3s from the original file, with frequencies
# '.dcrcdr3' files, which contains the five-part Decombinator index before the CDR3 it encodes and its frequency
# The choice of which file format is used is decided by altering the 'inputargs['dcroutput']' variable
# Note that output files will likely be shorter than the input file, due to multiple TCR sequences encoding the same CDR3
# Users also have the option to include the final three residues of the CDR3 in the output, or to stop at the phenylalanine
# The script also outputs some simple statistics about the number of productive rearrangements, and the frequency of different functionality genes
# NB: See IMGT for further information on definitions of functionality and productivity:
# http://www.imgt.org/IMGTScientificChart/SequenceDescription/IMGTfunctionality.html
# Essentially, 'functionality' refers to the predicted ability of certain germline genes to contribute to working receptors
# 'Productive' refers to the predicted ability of a given rearrangement to encode a working receptor chain
##################
#### PACKAGES ####
##################
from __future__ import division
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
from Bio import SeqIO
from time import strftime
import argparse
import string
import re
import sys
import collections as coll
import os
import urllib2
import warnings
import gzip
__version__ = '3.1'
# Supress Biopython translation warning when translating sequences where length % 3 != 0
warnings.filterwarnings("ignore")
###################
#### FUNCTIONS ####
###################
def args():
"""args(): Obtains command line arguments which dictate the script's behaviour"""
# Help flag
parser = argparse.ArgumentParser(
description='Translate and extract CDR3 sequences from Decombinator classifier files. Please see https://innate2adaptive.github.io/Decombinator/ for details.')
# Add arguments
parser.add_argument(
'-in', '--infile', type=str, help='File containing 5 part Decombinator indexes, (with/without frequencies)', required=True)
parser.add_argument(
'-c', '--chain', type=str, help='TCR chain (a/b/g/d)', required=False)
parser.add_argument(
'-sp', '--species', type=str, help='Specify which species TCR repertoire the data consists of (human or mouse). Default = human', required=False, default="human")
parser.add_argument(
'-tg', '--tags', type=str, help='Specify which Decombinator tag set to use (extended or original). Default = extended', required=False, default="extended")
parser.add_argument(
'-s', '--suppresssummary', action='store_true', help='Suppress the production of summary data log', required=False)
parser.add_argument(
'-dz', '--dontgzip', action='store_true', help='Stop the output FASTQ files automatically being compressed with gzip', required=False)
parser.add_argument(
'-dc', '--dontcount', action='store_true', help='Stop printing the running count', required=False)
parser.add_argument(
'-ex', '--extension', type=str, help='Specify the file extension of the output translation file. Default = \"cdr3\"', required=False, default="cdr3")
parser.add_argument(
'-npx', '--npextension', type=str, help='Specify the file extension of the output nonproductive file. Default = \"np\"', required=False, default="np")
parser.add_argument(
'-do', '--dcroutput', action='store_true', help='Optionally include Decombinator TCR index along with the CDR3 sequence and frequency', \
required=False)
parser.add_argument(
'-tfdir', '--tagfastadir', type=str, help='Path to folder containing TCR FASTA and Decombinator tag files, for offline analysis. \
Default = \"Decombinator-Tags-FASTAs\".', required=False, default="Decombinator-Tags-FASTAs")
parser.add_argument(
'-gxg', '--includeGXG', action='store_true', help='Optionally include the \"GXG\" motif following the conserved phenylalanine residue that terminates the CDR3 region', required=False)
parser.add_argument(
'-np', '--nonproductive', action='store_true', help='Optionally output an additional file containing the non-productive TCR rearrangements', required=False)
return parser.parse_args()
def findfile(testfile):
""" Check whether file is present at given path """
try:
testopen = open(str(filename),"rU")
testopen.close()
except:
print 'Cannot find the specified input file. Please try again'
sys.exit()
def read_tcr_file(species, tagset, gene, filetype, expected_dir_name):
""" Reads in the FASTA and tag data for the appropriate TCR locus """
# Define expected file name
expected_file = species + "_" + tagset + "_" + "TR" + chain.upper() + gene.upper() + "." + filetype
# First check whether the files are available locally (in pwd or in bundled directory)
if os.path.isfile(expected_file):
fl = expected_file
fl_opener = open
elif os.path.isfile(expected_dir_name + os.sep + expected_file):
fl = expected_dir_name + os.sep + expected_file
fl_opener = open
else:
try:
fl = "https://raw.githubusercontent.com/JamieHeather/Decombinator-Tags-FASTAs/master/" + expected_file
urllib2.urlopen(urllib2.Request(fl)) # Request URL, see whether is found
fl_opener = urllib2.urlopen
except:
print "Cannot find following file locally or online:", expected_file
print "Please either run Decombinator with internet access, or point Decombinator to local copies of the tag and FASTA files with the \'-tf\' flag."
sys.exit()
# Return opened file, for either FASTA or tag file parsing
return fl_opener(fl)
def sort_permissions(fl):
# Need to ensure proper file permissions on output data
# If users are running pipeline through Docker might otherwise require root access
if oct(os.stat(fl).st_mode)[4:] != '666':
os.chmod(fl, 0o666)
def import_gene_information(inputargs):
""" Obtains gene-specific information for translation """
# Runs first: reads in V and J gene sequence and name data (from fasta files)
# and positions of conserved cysteine residues in V genes (from separate files)
# If files cannot be found in local directory, script looks for them online at GitHub
# NB that a number of psuedogenes have no officially designated conserved C (or indeed a 3' C at all)
# Where possible, the nearest suitable C residue is used, where not an arbitrary position of 0 is given
# Somewhat moot, as most psuedogenes contain a number of stop codons and thus cannot produce productive rearrangements
# First check that valid tag/species combinations have been used
global chainnams, chain
chain = inputargs['chain']
if inputargs['tags'] == "extended" and inputargs['species'] == "mouse":
print "Please note that there is currently no extended tag set for mouse TCR genes.\n \
Decombinator will now switch the tag set in use from \'extended\' to \'original\'.\n \
In future, consider editing the script to change the default, or use the appropriate flags (-sp mouse -tg original)."
inputargs['tags'] = "original"
if inputargs['tags'] == "extended" and ( chain == 'g' or chain == 'd' ):
print "Please note that there is currently no extended tag set for gamma/delta TCR genes.\n \
Decombinator will now switch the tag set in use from \'extended\' to \'original\'.\n \
In future, consider editing the script to change the default, or use the appropriate flags."
inputargs['tags'] = "original"
# Check species information
if inputargs['species'] not in ["human", "mouse"]:
print "Species not recognised. Please select either \'human\' (default) or \'mouse\'.\n \
If mouse is required by default, consider changing the default value in the script."
sys.exit()
# Look for tag and V/J fasta and cysteine position files: if these cannot be found in the working directory, source them from GitHub repositories
# Note that fasta/tag files fit the pattern "species_tagset_gene.[fasta/tags]"
# I.e. "[human/mouse]_[extended/original]_TR[A/B/G/D][V/J].[fasta/tags]"
for gene in ['v', 'j']:
# Get FASTA data
fasta_file = read_tcr_file(inputargs['species'], inputargs['tags'], gene, "fasta", inputargs['tagfastadir'])
globals()[gene+"_genes"] = list(SeqIO.parse(fasta_file, "fasta"))
fasta_file.close()
globals()[gene+"_regions"] = [str(string.upper(item.seq)) for item in globals()[gene+"_genes"]]
globals()[gene+"_names"] = [str(string.upper(item.id).split("|")[1]) for item in globals()[gene+"_genes"]]
# Get conserved translation residue sites and functionality data
translation_file = read_tcr_file(inputargs['species'], inputargs['tags'], gene, "translate", inputargs['tagfastadir'])
translate_data = [x.rstrip() for x in list(translation_file)]
translation_file.close()
globals()[gene+"_translate_position"] = [int(x.split(",")[1]) for x in translate_data]
globals()[gene+"_translate_residue"] = [x.split(",")[2] for x in translate_data]
globals()[gene+"_functionality"] = [x.split(",")[3] for x in translate_data]
return v_regions, j_regions, v_names, j_names, v_translate_position, v_translate_residue, j_translate_position, j_translate_residue,\
v_functionality, j_functionality
def get_cdr3(dcr, chain, vregions, jregions, vtranslate_pos, vtranslate_res, jtranslate_pos, jtranslate_res, includefgxg):
""" Checks the productivity of a given DCR-assigned rearrangement
Returns a 1 if productive, 0 if not """
# NB: A productively rearranged receptor does not necessarily mean that it is the working receptor used in a cell
# It could be a silenced chain that isn't used, or could have inactivating mutations upstream of the sequenced region
# 0.5 Set up check variables
# Boolean productivity checks that CDR3s must pass
in_frame = 0
no_stop = 0
found_c = 0
found_fgxg = 0
# CDR3-defining positions
start_cdr3 = 0
end_cdr3 = 0
# 1. Rebuild whole nucleotide sequence from Decombinator assignment
classifier_elements = dcr.split(', ')
v = int(classifier_elements[0])
j = int(classifier_elements[1])
vdel = int(classifier_elements[2])
jdel = int(classifier_elements[3])
ins_nt = classifier_elements[4]
if vdel == 0:
v_used = vregions[v]
else:
v_used = vregions[v][:-vdel]
j_used = jregions[j][jdel:]
nt = ''.join([v_used, ins_nt, j_used])
# 2. Translate
aa = str(Seq(nt, generic_dna).translate())
# 3. Check whether whole rearrangement is in frame
if (len(nt)-1) % 3 == 0:
in_frame = 1
else:
if '*' in aa:
return "OOF_with_stop"
else:
return "OOF_without_stop"
# 4. Check for stop codons in the in-frame rearrangements
if '*' not in aa:
no_stop = 1
else:
return "IF_with_stop"
# 5. Check for conserved cysteine in the V gene
cdr3_start = vtranslate_pos
cdr3_c = vtranslate_res
if aa[vtranslate_pos[v]-1] == vtranslate_res[v]:
found_c = 1
start_cdr3 = vtranslate_pos[v]-1
else:
return "No_conserved_cysteine"
# 5.5 Having found conserved cysteine, only need look downstream to find other end of CDR3
downstream_c = aa[start_cdr3:]
# 6. Check for presence of FGXG motif (or equivalent)
site = downstream_c[jtranslate_pos[j]:jtranslate_pos[j]+4]
if re.findall(jtranslate_res[j], site):
if includefgxg == True:
end_cdr3 = len(downstream_c) + jtranslate_pos[j] + start_cdr3 + 4
else:
end_cdr3 = len(downstream_c) + jtranslate_pos[j] + start_cdr3 + 1
else:
return "No_conserved_FGXG"
return aa[start_cdr3:end_cdr3]
###################################################
######## CHECK INPUT FILES AND PARAMETERS #########
###################################################
if __name__ == '__main__':
# Get parameters
inputargs = vars(args())
counts = coll.Counter()
if inputargs['infile'].endswith('.gz'):
opener = gzip.open
else:
opener = open
# Get chain information
if not inputargs['chain']:
# If chain not given, try and infer from input file name
chaincheck = [x for x in ["alpha", "beta", "gamma", "delta"] if x in inputargs['infile'].lower()]
if len(chaincheck) == 1:
chain = chaincheck[0][0]
else:
print "TCR chain not recognised. Please choose from a/b/g/d (case-insensitive)."
sys.exit()
else:
if inputargs['chain'].upper() in ['A', 'ALPHA', 'TRA', 'TCRA']:
chain = "a"
elif inputargs['chain'].upper() in ['B', 'BETA', 'TRB', 'TCRB']:
chain = "b"
elif inputargs['chain'].upper() in ['G', 'GAMMA', 'TRG', 'TCRG']:
chain = "g"
elif inputargs['chain'].upper() in ['D', 'DELTA', 'TRD', 'TCRD']:
chain = "d"
else:
print "TCR chain not recognised. Please choose from a/b/g/d (case-insensitive)."
sys.exit()
inputargs['chain'] = chain # Correct inputarg chain value so that import gene function gets correct input
suffix = "." + inputargs['extension']
filename = inputargs['infile']
findfile(filename)
if inputargs['nonproductive'] == False and inputargs['npextension'] != 'np':
print "Warning: a non-default extension was provided for the non-productive output data (-nxt), yet that data was not output (-np False)."
print "Script will assume you meant to output NP data and do so."
inputargs['nonproductive'] = True
if inputargs['extension'] == inputargs['npextension']:
print "Error: Extensions for output CDR3s (-ex) and output non-productive sequences (-npx) cannot be the same. Please edit and re-run."
sys.exit()
####################################
########## EXTRACT CDR3s ###########
####################################
v_regions, j_regions, v_names, j_names, v_translate_position, v_translate_residue, j_translate_position, j_translate_residue, \
v_functionality, j_functionality = import_gene_information(inputargs)
infile = opener(filename, "rU")
counts['line_count'] = 0
# Count non-productive rearrangments
fail_count = coll.Counter()
fails = ["OOF_with_stop", "OOF_without_stop", "IF_with_stop", "No_conserved_cysteine", "No_conserved_FGXG"]
# Store and count CDR3s
if inputargs['dcroutput'] == False:
cdr3_count = coll.Counter()
elif inputargs['dcroutput'] == True:
dcr_cdr3_count = coll.Counter()
np_cdr3_count = coll.Counter()
chainnams = {"a": "alpha", "b": "beta", "g": "gamma", "d": "delta"}
print "Translating", chainnams[chain], "chain CDR3s from", inputargs['infile']
for line in infile:
counts['line_count'] += 1
comma = [m.start() for m in re.finditer(',', line)]
if len(comma) == 4: # pure DCR file, just the five fields (no frequency)
in_dcr = line.rstrip()
use_freq = False
frequency = 1
elif len(comma) == 5: # assume that we're working from a .freq file (or equivalent)
in_dcr = str(line[:comma[4]])
frequency = int(line[comma[4]+2:].rstrip())
use_freq = True
elif len(comma) > 5: # assume that it's an n12 file (no frequency)
in_dcr = str(line[:comma[4]])
use_freq = False
frequency = 1
else:
print "Based on the number of commas per line, script is unable to determine file type. Please ensure you're inputting a valid file (e.g. .n12 or .freq)."
sys.exit()
cdr3 = get_cdr3(in_dcr, chain, v_regions, j_regions, v_translate_position, v_translate_residue, j_translate_position, j_translate_residue, inputargs['includeGXG'])
dcr_cdr3 = in_dcr + ":" + cdr3
v = int(line[:comma[0]])
j = int(line[comma[0]+2:comma[1]])
if cdr3 not in fails:
counts['prod_recomb'] += 1
productivity = "P"
if inputargs['dcroutput'] == False:
cdr3_count[cdr3] += frequency
elif inputargs['dcroutput'] == True:
dcr_cdr3_count[dcr_cdr3] += frequency
else:
np_cdr3_count[dcr_cdr3] += frequency
fail_count[cdr3] += 1
productivity = "NP"
# Count the number of number of each type of gene functionality (by IMGT definitions, based on prototypic gene)
if inputargs['tags'] == 'extended' and inputargs['species'] == 'human':
counts[productivity + "_" + "V-" + v_functionality[v]] += 1
counts[productivity + "_" + "J-" + j_functionality[j]] += 1
##########################
##### OUTPUT RESULTS #####
##########################
infile1 = inputargs['infile']
file_id1 = infile1.split('/')[-1].split('.')[0]
if inputargs['dcroutput'] == True:
if inputargs['extension'] == 'cdr3': # Keep default suffixes unless specified otherwise
suffix = '.dcrcdr3'
outpath = ''
outfilename = outpath + file_id1 + suffix
#outfilename = filename.split(".")[0] + suffix
outfile = open(outfilename, "w")
for x in dcr_cdr3_count:
if use_freq == True:
outtext = x + ", " + str(dcr_cdr3_count[x])
else:
outtext = x
print >> outfile, outtext
infile.close()
outfile.close()
elif inputargs['dcroutput'] == False:
suffix = "." + inputargs['extension']
outpath = ''
outfilename = outpath + file_id1 + suffix
#outfilename = filename.split(".")[0] + suffix
outfile = open(outfilename, "w")
for x in cdr3_count:
if use_freq == True:
outtext = x + ", " + str(cdr3_count[x])
else:
outtext = x
print >> outfile, outtext
infile.close()
outfile.close()
print "CDR3 data written to", outfilename
# Compress output
if inputargs['dontgzip'] == False:
print "Compressing CDR3 output file to", outfilename + ".gz"
with open(outfilename) as infile, gzip.open(outfilename + '.gz', 'wb') as outfile:
outfile.writelines(infile)
os.unlink(outfilename)
outfilenam = outfilename + ".gz"
else:
outfilenam = outfilename
sort_permissions(outfilenam)
# Output non-productive rearrangements
counts['NP_count'] = sum(fail_count.values())
if inputargs['nonproductive'] == True:
npsuffix = inputargs['npextension']
#npfilename = filename.split(".")[0]+"." + npsuffix
npfilename = outpath + file_id1 + "." + npsuffix
npfile = open(npfilename, "w")
for x in np_cdr3_count:
if use_freq == True:
outtext = x + ", " + str(np_cdr3_count[x])
else:
outtext = x
print >> npfile, outtext
npfile.close()
if inputargs['dontgzip'] == False:
print "Compressing non-productive rearrangement output file,", npfilename, "..."
with open(npfilename) as infile, gzip.open(npfilename + '.gz', 'wb') as outfile:
outfile.writelines(infile)
os.unlink(npfilename)
npfilename = npfilename + ".gz"
print "Non-productive rearrangement data written to", npfilename
sort_permissions(npfilename)
# Write data to summary file
if inputargs['suppresssummary'] == False:
# Check for directory and make summary file
if not os.path.exists('Logs'):
os.makedirs('Logs')
date = strftime("%Y_%m_%d")
# Check for existing date-stamped file
summaryname = "Logs/" + date + "_" + file_id1 + "_CDR3_Translation_Summary.csv"
if not os.path.exists(summaryname):
summaryfile = open(summaryname, "w")
else:
# If one exists, start an incremental day stamp
for i in range(2,10000):
summaryname = "Logs/" + date + "_" + file_id1 + "_CDR3_Translation_Summary" + str(i) + ".csv"
if not os.path.exists(summaryname):
summaryfile = open(summaryname, "w")
break
# Generate string to write to summary file
summstr = "Property,Value\nDirectory," + os.getcwd() + "\nInputFile," + inputargs['infile'] + "\nOutputFile," + outfilenam \
+ "\nDateFinished," + date + "\nTimeFinished," + strftime("%H:%M:%S") + "\n\nInputArguments:,\n"
for s in ['species', 'chain','extension', 'tags', 'dontgzip', 'includeGXG', 'dcroutput', 'nonproductive']:
summstr = summstr + s + "," + str(inputargs[s]) + "\n"
if inputargs['nonproductive'] == True:
summstr = summstr + 'npextension,' + inputargs['npextension'] + '\nNPdataOutputFile,' + npfilename + "\n"
summstr = summstr + "\nNumberUniqueDCRsInput," + str(counts['line_count']) \
+ "\nNumberUniqueDCRsProductive," + str(counts['prod_recomb']) \
+ "\nNumberUniqueDCRsNonProductive," + str(counts['NP_count'])
if inputargs['tags'] == 'extended' and inputargs['species'] == 'human':
summstr = summstr + "\n\nFunctionalityOfGermlineGenesUsed,"
for p in ['P', 'NP']:
for g in ['V', 'J']:
for f in ['F', 'ORF', 'P']:
target = p + '_' + g + '-' + f
summstr = summstr + '\n' + target + ',' + str(counts[target])
print >> summaryfile, summstr
summaryfile.close()
sort_permissions(summaryname)
sys.exit()
|
py | 1a3695c664cdb9be51c63776a3e50a7cf41d8dd8 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test merkleblock fetch/validation
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class MerkleBlockTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
def setup_network(self):
self.nodes = []
# Nodes 0/1 are "wallet" nodes
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug"]))
# Nodes 2/3 are used for testing
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-debug", "-txindex"]))
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
self.is_network_split = False
self.sync_all()
def run_test(self):
print("Mining blocks...")
self.nodes[0].generate(105)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
node0utxos = self.nodes[0].listunspent(1)
tx1 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 500})
txid1 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx1)["hex"], False, False, True)
tx2 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 500})
txid2 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx2)["hex"], False, False, True)
assert_raises(JSONRPCException, self.nodes[0].gettxoutproof, [txid1])
self.nodes[0].generate(1)
blockhash = self.nodes[0].getblockhash(chain_height + 1)
self.sync_all()
txlist = []
blocktxn = self.nodes[0].getblock(blockhash, True)["tx"]
txlist.append(blocktxn[1])
txlist.append(blocktxn[2])
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1])), [txid1])
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist)
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2], blockhash)), txlist)
txin_spent = self.nodes[1].listunspent(1).pop()
tx3 = self.nodes[1].createrawtransaction([txin_spent], {self.nodes[0].getnewaddress(): 500})
self.nodes[0].sendrawtransaction(self.nodes[1].signrawtransaction(tx3)["hex"], False, False, True)
self.nodes[0].generate(1)
self.sync_all()
txid_spent = txin_spent["txid"]
txid_unspent = txid1 if txin_spent["txid"] != txid1 else txid2
# We can't find the block from a fully-spent tx
# Doesn't apply to Pion Core - we have txindex always on
# assert_raises(JSONRPCException, self.nodes[2].gettxoutproof, [txid_spent])
# ...but we can if we specify the block
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_spent], blockhash)), [txid_spent])
# ...or if the first tx is not fully-spent
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_unspent])), [txid_unspent])
try:
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist)
except JSONRPCException:
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid2, txid1])), txlist)
# ...or if we have a -txindex
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[3].gettxoutproof([txid_spent])), [txid_spent])
if __name__ == '__main__':
MerkleBlockTest().main()
|
py | 1a36963a027e957d345e9812c3285b1014a40aea | import itertools
import uuid
from dataclasses import dataclass, field
from typing import (
Generator,
Iterator,
Dict,
Sequence,
Optional,
TYPE_CHECKING,
Union,
Tuple,
List,
)
import numpy as np
import weaviate
from ..base.backend import BaseBackendMixin
from .... import Document
from ....helper import dataclass_from_dict
from ..registry import _REGISTRY
if TYPE_CHECKING:
from ....types import (
DocumentArraySourceType,
)
@dataclass
class WeaviateConfig:
"""This class stores the config variables to initialize
connection to the Weaviate server"""
n_dim: int
client: Union[str, weaviate.Client] = 'http://localhost:8080'
name: Optional[str] = None
serialize_config: Dict = field(default_factory=dict)
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[WeaviateConfig, Dict]] = None,
**kwargs,
):
"""Initialize weaviate storage.
:param docs: the list of documents to initialize to
:param config: the config object used to ininitialize connection to weaviate server
:param kwargs: extra keyword arguments
:raises ValueError: only one of name or docs can be used for initialization,
raise an error if both are provided
"""
if not config:
raise ValueError('Config object must be specified')
elif isinstance(config, dict):
config = dataclass_from_dict(WeaviateConfig, config)
from ... import DocumentArray
self._n_dim = config.n_dim
self._serialize_config = config.serialize_config
if config.name and config.name != config.name.capitalize():
raise ValueError(
'Weaviate class name has to be capitalized. '
'Please capitalize when declaring the name field in config.'
)
self._persist = bool(config.name)
if isinstance(config.client, str):
self._client = weaviate.Client(config.client)
else:
self._client = config.client
self._config = config
self._schemas = self._load_or_create_weaviate_schema()
self._offset2ids, self._offset2ids_wid = self._get_offset2ids_meta()
_REGISTRY[self.__class__.__name__][self._class_name].append(self)
# To align with Sqlite behavior; if `docs` is not `None` and table name
# is provided, :class:`DocumentArraySqlite` will clear the existing
# table and load the given `docs`
if _docs is None:
return
elif isinstance(
_docs, (DocumentArray, Sequence, Generator, Iterator, itertools.chain)
):
self.clear()
self.extend(_docs)
else:
self.clear()
if isinstance(_docs, Document):
self.append(_docs)
def _get_weaviate_class_name(self) -> str:
"""Generate the class/schema name using the ``uuid1`` module with some
formatting to tailor to weaviate class name convention
:return: string representing the name of weaviate class/schema name of
this :class:`DocumentArrayWeaviate` object
"""
return ''.join([i for i in uuid.uuid1().hex if not i.isdigit()]).capitalize()
def _get_schema_by_name(self, cls_name: str) -> Dict:
"""Return the schema dictionary object with the class name
Content of the all dictionaries by this method are the same except the name
of the weaviate's ``class``
:param cls_name: the name of the schema/class in weaviate
:return: the schema dictionary
"""
# TODO: ideally we should only use one schema. this will allow us to deal with
# consistency better
return {
'classes': [
{
'class': cls_name,
"vectorizer": "none",
'vectorIndexConfig': {'skip': False},
'properties': [
{
'dataType': ['blob'],
'name': '_serialized',
'indexInverted': False,
},
],
},
{
'class': cls_name + 'Meta',
"vectorizer": "none",
'vectorIndexConfig': {'skip': True},
'properties': [
{
'dataType': ['string[]'],
'name': '_offset2ids',
'indexInverted': False,
},
],
},
]
}
def _load_or_create_weaviate_schema(self):
"""Create a new weaviate schema for this :class:`DocumentArrayWeaviate` object
if not present in weaviate or if ``self._config.name`` is None. If ``self._config.name``
is provided and not None and schema with the specified name exists in weaviate,
then load the object with the given ``self._config.name``
:return: the schemas of this :class`DocumentArrayWeaviate` object and its meta
"""
if not self._config.name:
name_candidate = self._get_weaviate_class_name()
doc_schemas = self._get_schema_by_name(name_candidate)
while self._client.schema.contains(doc_schemas):
name_candidate = self._get_weaviate_class_name()
doc_schemas = self._get_schema_by_name(name_candidate)
self._client.schema.create(doc_schemas)
self._config.name = name_candidate
return doc_schemas
doc_schemas = self._get_schema_by_name(self._config.name)
if self._client.schema.contains(doc_schemas):
return doc_schemas
self._client.schema.create(doc_schemas)
return doc_schemas
def _update_offset2ids_meta(self):
"""Update the offset2ids in weaviate the the current local version"""
if self._offset2ids_wid is not None and self._client.data_object.exists(
self._offset2ids_wid
):
self._client.data_object.update(
data_object={'_offset2ids': self._offset2ids},
class_name=self._meta_name,
uuid=self._offset2ids_wid,
)
else:
self._offset2ids_wid = str(uuid.uuid1())
self._client.data_object.create(
data_object={'_offset2ids': self._offset2ids},
class_name=self._meta_name,
uuid=self._offset2ids_wid,
)
def _get_offset2ids_meta(self) -> Tuple[List, str]:
"""Return the offset2ids stored in weaviate along with the name of the schema/class
in weaviate that stores meta information of this object
:return: a tuple with first element as a list of offset2ids and second element
being name of weaviate class/schema of the meta object
:raises ValueError: error is raised if meta class name is not defined
"""
if not self._meta_name:
raise ValueError('meta object is not defined')
resp = (
self._client.query.get(self._meta_name, ['_offset2ids', '_additional {id}'])
.do()
.get('data', {})
.get('Get', {})
.get(self._meta_name, [])
)
if not resp:
return [], None
elif len(resp) == 1:
return resp[0]['_offset2ids'], resp[0]['_additional']['id']
else:
raise ValueError('received multiple meta copies which is invalid')
@property
def name(self):
"""An alias to _class_name that returns the id/name of the class
in the weaviate of this :class:`DocumentArrayWeaviate`
:return: name of weaviate class/schema of this :class:`DocumentArrayWeaviate`
"""
return self._class_name
@property
def _class_name(self):
"""Return the name of the class in weaviate of this :class:`DocumentArrayWeaviate
:return: name of weaviate class/schema of this :class:`DocumentArrayWeaviate`
"""
if not self._schemas:
return None
return self._schemas['classes'][0]['class']
@property
def _meta_name(self):
"""Return the name of the class in weaviate that stores the meta information of
this :class:`DocumentArrayWeaviate`
:return: name of weaviate class/schema of class that stores the meta information
"""
# TODO: remove this after we combine the meta info to the DocumentArray class
if not self._schemas:
return None
return self._schemas['classes'][1]['class']
@property
def _class_schema(self) -> Optional[Dict]:
"""Return the schema dictionary of this :class:`DocumentArrayWeaviate`'s weaviate schema
:return: the dictionary representing this weaviate schema
"""
if not self._schemas:
return None
return self._schemas['classes'][0]
@property
def _meta_schema(self):
"""Return the schema dictionary of this weaviate schema that stores this object's meta
:return: the dictionary representing a meta object's weaviate schema
"""
if not self._schemas and len(self._schemas) < 2:
return None
return self._schemas['classes'][1]
def _doc2weaviate_create_payload(self, value: 'Document'):
"""Return the payload to store :class:`Document` into weaviate
:param value: document to create a payload for
:return: the payload dictionary
"""
if value.embedding is None:
embedding = np.zeros(self._n_dim)
else:
from ....math.ndarray import to_numpy_array
embedding = to_numpy_array(value.embedding)
if embedding.ndim > 1:
embedding = np.asarray(embedding).squeeze()
if embedding.shape != (self._n_dim,):
raise ValueError(
f'All documents must have embedding of shape n_dim: {self._n_dim}, receiving shape: {embedding.shape}'
)
# Weaviate expects vector to have dim 2 at least
# or get weaviate.exceptions.UnexpectedStatusCodeException: models.C11yVector
# hence we cast it to list of a single element
if len(embedding) == 1:
embedding = [embedding[0]]
return dict(
data_object={'_serialized': value.to_base64(**self._serialize_config)},
class_name=self._class_name,
uuid=self._wmap(value.id),
vector=embedding,
)
def _wmap(self, doc_id: str):
"""the function maps doc id to weaviate id
:param doc_id: id of the document
:return: weaviate object id
"""
# appending class name to doc id to handle the case:
# daw1 = DocumentArrayWeaviate([Document(id=str(i), text='hi') for i in range(3)])
# daw2 = DocumentArrayWeaviate([Document(id=str(i), text='bye') for i in range(3)])
# daw2[0, 'text'] == 'hi' # this will be False if we don't append class name
return str(uuid.uuid5(uuid.NAMESPACE_URL, doc_id + self._class_name))
def _get_storage_infos(self) -> Dict:
storage_infos = super()._get_storage_infos()
return {
'Backend': 'Weaviate (www.semi.technology/developers/weaviate)',
'Hostname': self._config.client,
'Schema Name': self._config.name,
'Serialization Protocol': self._config.serialize_config.get('protocol'),
**storage_infos,
}
|
py | 1a36968c36896ac9dd7b7cd849ce5073fdde6fcf | # coding: utf-8
import pprint
import re
import six
class UpdateDomainLoginPolicyRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'domain_id': 'str',
'body': 'UpdateDomainLoginPolicyRequestBody'
}
attribute_map = {
'domain_id': 'domain_id',
'body': 'body'
}
def __init__(self, domain_id=None, body=None):
"""UpdateDomainLoginPolicyRequest - a model defined in huaweicloud sdk"""
self._domain_id = None
self._body = None
self.discriminator = None
self.domain_id = domain_id
if body is not None:
self.body = body
@property
def domain_id(self):
"""Gets the domain_id of this UpdateDomainLoginPolicyRequest.
:return: The domain_id of this UpdateDomainLoginPolicyRequest.
:rtype: str
"""
return self._domain_id
@domain_id.setter
def domain_id(self, domain_id):
"""Sets the domain_id of this UpdateDomainLoginPolicyRequest.
:param domain_id: The domain_id of this UpdateDomainLoginPolicyRequest.
:type: str
"""
self._domain_id = domain_id
@property
def body(self):
"""Gets the body of this UpdateDomainLoginPolicyRequest.
:return: The body of this UpdateDomainLoginPolicyRequest.
:rtype: UpdateDomainLoginPolicyRequestBody
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this UpdateDomainLoginPolicyRequest.
:param body: The body of this UpdateDomainLoginPolicyRequest.
:type: UpdateDomainLoginPolicyRequestBody
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateDomainLoginPolicyRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 1a3697957012962cd84ca5d3b14182d4609c0a9a | """Main FastAPI application."""
import asyncio
import logging
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from starlette.responses import Response
from starlette.requests import Request
from starlette.middleware.base import RequestResponseEndpoint
from opentrons import __version__
from .router import router
from .errors import exception_handlers
from .hardware import initialize_hardware, cleanup_hardware
from .service import initialize_logging
from .service.dependencies import get_protocol_manager
from .service.legacy.rpc import cleanup_rpc_server
from . import constants
log = logging.getLogger(__name__)
app = FastAPI(
title="Opentrons OT-2 HTTP API Spec",
description=(
"This OpenAPI spec describes the HTTP API of the Opentrons "
"OT-2. It may be retrieved from a robot on port 31950 at "
"/openapi. Some schemas used in requests and responses use "
"the `x-patternProperties` key to mean the JSON Schema "
"`patternProperties` behavior."
),
version=__version__,
)
# exception handlers
# TODO(mc, 2021-05-10): after upgrade to FastAPI > 0.61.2, we can pass these
# to FastAPI's `exception_handlers` arg instead. Current version has bug, see:
# https://github.com/tiangolo/fastapi/pull/1924
for exc_cls, handler in exception_handlers.items():
app.add_exception_handler(exc_cls, handler)
# cors
app.add_middleware(
CORSMiddleware,
allow_origins=("*"),
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# main router
app.include_router(router=router)
@app.on_event("startup")
async def on_startup() -> None:
"""Handle app startup."""
initialize_logging()
initialize_hardware(app.state)
@app.on_event("shutdown")
async def on_shutdown() -> None:
"""Handle app shutdown."""
protocol_manager = await get_protocol_manager()
protocol_manager.remove_all()
shutdown_results = await asyncio.gather(
cleanup_rpc_server(app.state),
cleanup_hardware(app.state),
return_exceptions=True,
)
shutdown_errors = [r for r in shutdown_results if isinstance(r, BaseException)]
for e in shutdown_errors:
log.warning("Error during shutdown", exc_info=e)
@app.middleware("http")
async def api_version_response_header(
request: Request,
call_next: RequestResponseEndpoint,
) -> Response:
"""Attach Opentrons-Version headers to responses."""
# Attach the version the request state. Optional dependency
# check_version_header will override this value if check passes.
request.state.api_version = constants.API_VERSION
response: Response = await call_next(request)
# Put the api version in the response header
response.headers[constants.API_VERSION_HEADER] = str(request.state.api_version)
response.headers[constants.MIN_API_VERSION_HEADER] = str(constants.MIN_API_VERSION)
return response
|
py | 1a3698436a5d1374fecf2ebafe965c537803de10 | import os
h, d, aim = 0, 0, 0
def forward(x):
global h, d, aim
h += x
d += aim * x
def down(x):
global aim
aim += x
def up(x):
global aim
aim -= x
if __name__ == '__main__':
with open(os.path.join('inputs', 'day2.txt')) as f:
moves = list(map(lambda x: x.split(' '), f.readlines()))
moves = list(map(lambda x: (x[0], int(x[1])), moves))
for q in moves:
locals()[q[0]](q[1])
print(h, d, h * d)
|
py | 1a36985a85898a3a3d5557092eb582e76c514063 | import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
bphEfficiency = DQMEDHarvester("DQMGenericClient",
subDirs = cms.untracked.vstring("HLT/BPH/*"),
verbose = cms.untracked.uint32(0), # Set to 2 for all messages
resolution = cms.vstring(),
efficiency = cms.vstring(
"effic_muPhi 'mu efficiency vs phi; mu phi [rad]; efficiency' muPhi_numerator muPhi_denominator",
"effic_muEta 'mu efficiency vs eta; mu eta [rad]; efficiency' muEta_numerator muEta_denominator",
"effic_muPt 'mu efficiency vs pt; mu pt [GeV]; efficiency' muPt_numerator muPt_denominator",
"effic_mu_d0 'mu efficiency vs d0; mu d0 [cm]; efficiency' mu_d0_numerator mu_d0_denominator",
"effic_mu_z0 'mu efficiency vs z0; mu z0 [cm]; efficiency' mu_z0_numerator mu_z0_denominator",
),
# efficiencyProfile = cms.untracked.vstring(
# "effic_met_vs_LS 'MET efficiency vs LS; LS; PF MET efficiency' metVsLS_numerator metVsLS_denominator"
# ),
)
bphClient = cms.Sequence(
bphEfficiency
)
|
py | 1a3698a30d5e7066e19d9226c3532dfa2bbce532 | from data.loveda import LoveDALoader
from utils.tools import *
from skimage.io import imsave
import os
def predict_test(model, cfg, ckpt_path=None, save_dir='./submit_test'):
os.makedirs(save_dir, exist_ok=True)
seed_torch(2333)
model_state_dict = torch.load(ckpt_path)
model.load_state_dict(model_state_dict, strict=True)
count_model_parameters(model)
model.eval()
print(cfg.EVAL_DATA_CONFIG)
eval_dataloader = LoveDALoader(cfg.EVAL_DATA_CONFIG)
with torch.no_grad():
for ret, ret_gt in tqdm(eval_dataloader):
ret = ret.to(torch.device('cuda'))
cls = model(ret)
cls = cls.argmax(dim=1).cpu().numpy()
for fname, pred in zip(ret_gt['fname'], cls):
imsave(os.path.join(save_dir, fname), pred.astype(np.uint8))
torch.cuda.empty_cache()
if __name__ == '__main__':
ckpt_path = './log/CBST_2Urban.pth'
from module.Encoder import Deeplabv2
cfg = import_config('st.cbst.2urban')
model = Deeplabv2(dict(
backbone=dict(
resnet_type='resnet50',
output_stride=16,
pretrained=True,
),
multi_layer=False,
cascade=False,
use_ppm=True,
ppm=dict(
num_classes=cfg.NUM_CLASSES,
use_aux=False,
),
inchannels=2048,
num_classes=cfg.NUM_CLASSES
)).cuda()
predict_test(model, cfg, ckpt_path) |
py | 1a36993aa2cd90862f55d11428448a26feff406d | #!/usr/bin/python
#
# Copyright 2002-2021 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
"""
PyCOMPSs Dummy API - Constraint
===============================
This file contains the dummy class constraint used as decorator.
"""
from pycompss.util.typing_helper import typing
class Constraint(object):
"""
Dummy constraint class (decorator style)
"""
def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None:
self.args = args
self.kwargs = kwargs
def __call__(self, f: typing.Any) -> typing.Any:
def wrapped_f(*args: typing.Any, **kwargs: typing.Any) -> typing.Any:
return f(*args, **kwargs)
return wrapped_f
constraint = Constraint
|
py | 1a369943319b922d88592102fcfadd3d2d42d06f | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class IntegrationEvent(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
IntegrationEvent - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'str',
'self_uri': 'str',
'correlation_id': 'str',
'timestamp': 'datetime',
'level': 'str',
'event_code': 'str',
'message': 'MessageInfo',
'entities': 'list[EventEntity]',
'context_attributes': 'dict(str, str)',
'detail_message': 'MessageInfo',
'user': 'User'
}
self.attribute_map = {
'id': 'id',
'self_uri': 'selfUri',
'correlation_id': 'correlationId',
'timestamp': 'timestamp',
'level': 'level',
'event_code': 'eventCode',
'message': 'message',
'entities': 'entities',
'context_attributes': 'contextAttributes',
'detail_message': 'detailMessage',
'user': 'user'
}
self._id = None
self._self_uri = None
self._correlation_id = None
self._timestamp = None
self._level = None
self._event_code = None
self._message = None
self._entities = None
self._context_attributes = None
self._detail_message = None
self._user = None
@property
def id(self):
"""
Gets the id of this IntegrationEvent.
Unique ID for this event
:return: The id of this IntegrationEvent.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this IntegrationEvent.
Unique ID for this event
:param id: The id of this IntegrationEvent.
:type: str
"""
self._id = id
@property
def self_uri(self):
"""
Gets the self_uri of this IntegrationEvent.
The URI for this object
:return: The self_uri of this IntegrationEvent.
:rtype: str
"""
return self._self_uri
@self_uri.setter
def self_uri(self, self_uri):
"""
Sets the self_uri of this IntegrationEvent.
The URI for this object
:param self_uri: The self_uri of this IntegrationEvent.
:type: str
"""
self._self_uri = self_uri
@property
def correlation_id(self):
"""
Gets the correlation_id of this IntegrationEvent.
Correlation ID for the event
:return: The correlation_id of this IntegrationEvent.
:rtype: str
"""
return self._correlation_id
@correlation_id.setter
def correlation_id(self, correlation_id):
"""
Sets the correlation_id of this IntegrationEvent.
Correlation ID for the event
:param correlation_id: The correlation_id of this IntegrationEvent.
:type: str
"""
self._correlation_id = correlation_id
@property
def timestamp(self):
"""
Gets the timestamp of this IntegrationEvent.
Time the event occurred. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:return: The timestamp of this IntegrationEvent.
:rtype: datetime
"""
return self._timestamp
@timestamp.setter
def timestamp(self, timestamp):
"""
Sets the timestamp of this IntegrationEvent.
Time the event occurred. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:param timestamp: The timestamp of this IntegrationEvent.
:type: datetime
"""
self._timestamp = timestamp
@property
def level(self):
"""
Gets the level of this IntegrationEvent.
Indicates the severity of the event.
:return: The level of this IntegrationEvent.
:rtype: str
"""
return self._level
@level.setter
def level(self, level):
"""
Sets the level of this IntegrationEvent.
Indicates the severity of the event.
:param level: The level of this IntegrationEvent.
:type: str
"""
allowed_values = ["INFO", "WARN", "ERROR", "CRITICAL"]
if level.lower() not in map(str.lower, allowed_values):
# print("Invalid value for level -> " + level)
self._level = "outdated_sdk_version"
else:
self._level = level
@property
def event_code(self):
"""
Gets the event_code of this IntegrationEvent.
A classification for the event. Suitable for programmatic searching, sorting, or filtering
:return: The event_code of this IntegrationEvent.
:rtype: str
"""
return self._event_code
@event_code.setter
def event_code(self, event_code):
"""
Sets the event_code of this IntegrationEvent.
A classification for the event. Suitable for programmatic searching, sorting, or filtering
:param event_code: The event_code of this IntegrationEvent.
:type: str
"""
self._event_code = event_code
@property
def message(self):
"""
Gets the message of this IntegrationEvent.
Message indicating what happened
:return: The message of this IntegrationEvent.
:rtype: MessageInfo
"""
return self._message
@message.setter
def message(self, message):
"""
Sets the message of this IntegrationEvent.
Message indicating what happened
:param message: The message of this IntegrationEvent.
:type: MessageInfo
"""
self._message = message
@property
def entities(self):
"""
Gets the entities of this IntegrationEvent.
Collection of entities affected by or pertaining to the event (e.g. a list of Integrations or Bridge connectors)
:return: The entities of this IntegrationEvent.
:rtype: list[EventEntity]
"""
return self._entities
@entities.setter
def entities(self, entities):
"""
Sets the entities of this IntegrationEvent.
Collection of entities affected by or pertaining to the event (e.g. a list of Integrations or Bridge connectors)
:param entities: The entities of this IntegrationEvent.
:type: list[EventEntity]
"""
self._entities = entities
@property
def context_attributes(self):
"""
Gets the context_attributes of this IntegrationEvent.
Map of context attributes specific to this event.
:return: The context_attributes of this IntegrationEvent.
:rtype: dict(str, str)
"""
return self._context_attributes
@context_attributes.setter
def context_attributes(self, context_attributes):
"""
Sets the context_attributes of this IntegrationEvent.
Map of context attributes specific to this event.
:param context_attributes: The context_attributes of this IntegrationEvent.
:type: dict(str, str)
"""
self._context_attributes = context_attributes
@property
def detail_message(self):
"""
Gets the detail_message of this IntegrationEvent.
Message with additional details about the event. (e.g. an exception cause.)
:return: The detail_message of this IntegrationEvent.
:rtype: MessageInfo
"""
return self._detail_message
@detail_message.setter
def detail_message(self, detail_message):
"""
Sets the detail_message of this IntegrationEvent.
Message with additional details about the event. (e.g. an exception cause.)
:param detail_message: The detail_message of this IntegrationEvent.
:type: MessageInfo
"""
self._detail_message = detail_message
@property
def user(self):
"""
Gets the user of this IntegrationEvent.
User that took an action that resulted in the event.
:return: The user of this IntegrationEvent.
:rtype: User
"""
return self._user
@user.setter
def user(self, user):
"""
Sets the user of this IntegrationEvent.
User that took an action that resulted in the event.
:param user: The user of this IntegrationEvent.
:type: User
"""
self._user = user
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
py | 1a3699df30db5049f17c68be11f61fca583072a6 | import logging
import logging.handlers
import argparse
import sys
import os
import time
from bluetooth import *
from . import gpioservice
from .powerControllerModule import PowerThread
from .configControllerModule import ConfigController
from . import stateControllerModule
from .libInstaller import LibInstaller
from subprocess import call
class LoggerHelper(object):
def __init__(self, logger, level):
self.logger = logger
self.level = level
def write(self, message):
if message.rstrip() != "":
self.logger.log(self.level, message.rstrip())
gpioservice.setup()
gpioservice.loadInitialData()
stateController = stateControllerModule.StateController()
powerThread = PowerThread()
client_sock = None
config = ConfigController()
def setup_logging():
# Default logging settings
LOG_FILE = "/var/log/bluetoothservice.log"
LOG_LEVEL = logging.INFO
# Define and parse command line arguments
argp = argparse.ArgumentParser(description="Raspberry PI Bluetooth Server")
argp.add_argument("-l", "--log", help="log (default '" + LOG_FILE + "')")
# Grab the log file from arguments
args = argp.parse_args()
if args.log:
LOG_FILE = args.log
# Setup the logger
logger = logging.getLogger(__name__)
# Set the log level
logger.setLevel(LOG_LEVEL)
# Make a rolling event log that resets at midnight and backs-up every 3 days
handler = logging.handlers.TimedRotatingFileHandler(LOG_FILE,
when="midnight",
backupCount=3)
# Log messages should include time stamp and log level
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
# Attach the formatter to the handler
handler.setFormatter(formatter)
# Attach the handler to the logger
logger.addHandler(handler)
# Replace stdout with logging to file at INFO level
sys.stdout = LoggerHelper(logger, logging.INFO)
# Replace stderr with logging to file at ERROR level
sys.stderr = LoggerHelper(logger, logging.ERROR)
def loadPersistentData():
if config.loadPreferences():
if not config.isSQLiteInstalled():
libInstaller = LibInstaller()
libInstaller.installSQLite()
if config.setSQLiteInstalled():
restart(None, None)
else:
shutDown(None)
# Main loop
def main():
powerThread.start()
loadPersistentData()
print ("Starting main")
# We need to wait until Bluetooth init is done
time.sleep(5)
# print ("Bluetooth initalised")
print (read_local_bdaddr())
# Make device visible
call("sudo hciconfig hci0 piscan", shell=True)
# Create a new server socket using RFCOMM protocol
server_sock = BluetoothSocket(RFCOMM)
# Bind to any port
server_sock.bind(("", PORT_ANY))
# Start listening
server_sock.listen(1)
# Get the port the server socket is listening
port = server_sock.getsockname()[1]
# The service UUID to advertise
uuid = "aaabf455-b0e1-4b88-b9c8-184e53f15663"
# Start advertising the service
advertise_service( server_sock, "TrainmoteServer",
service_id=uuid,
service_classes=[uuid, SERIAL_PORT_CLASS],
profiles=[SERIAL_PORT_PROFILE])
# Main Bluetooth server loop
client_sock = None
while True:
try:
# This will block until we get a new connection
if client_sock is None:
stateController.setState(stateControllerModule.STATE_NOT_CONNECTED)
print ("Waiting for connection on RFCOMM channel %d" % port)
client_sock, client_info = server_sock.accept()
print ("Accepted connection from ", client_info)
stateController.setState(stateControllerModule.STATE_CONNECTED)
# Read the data sent by the client
data = client_sock.recv(1024)
if len(data) == 0:
break
print ("Received [%s]" % data)
# Handle the request
response = gpioservice.receivedMessage(data)
client_sock.send(response)
print ("Sent back [%s]" % response)
# Check if respone is firmware update, load from git and restart script.
if 'PERFORM_GIT_UPDATE' in response and 'success' in response:
call('sudo sh ./scripts/updateScript.sh', shell=True)
restart(server_sock, client_sock)
break
except IOError:
print ("Error occured")
closeClientConnection(client_sock)
client_sock = None
pass
except KeyboardInterrupt:
closeClientConnection(client_sock)
shutDown(server_sock)
break
def restart(server_sock, client_sock):
closeClientConnection(client_sock)
shutDown(server_sock)
os.execv(sys.executable, ['python'] + sys.argv)
def shutDown(server_sock):
powerThread.kill.set()
powerThread.isTurningOff = True
powerThread.join()
stateController.setState(stateControllerModule.STATE_SHUTDOWN)
if server_sock is not None:
server_sock.close()
print ("Server going down")
stateController.stop()
def closeClientConnection(client_sock):
print ("Closing client socket")
if client_sock is not None:
client_sock.close()
client_sock = None
if __name__ == '__main__':
main() |
py | 1a369a5a5d96435b238f49a9e3f623ae6617e89c | import requests
def esmoneda(cripto):
return cripto in monedas
def main():
monedas_list=[]
data=requests.get("https://api.coinmarketcap.com/v2/listings/").json()
for cripto in data["data"]:
monedas_list.append(cripto["symbol"])
monedas=tuple(monedas_list)
moneda=input("Indique el nombre de la moneda a verificar: ")
while not esmoneda(moneda):
print("Moneda Invalida.")
moneda=input("Ingrese el nombre de la moneda: ")
else:
print("La moneda,",moneda,"es valida porque existe en coimnmarketcap.com")
main()
|
py | 1a369b4e1210549070b160c88b7f5f48d04633ba | import frappe
def execute():
frappe.reload_doc("contacts", "doctype", "contact_email")
frappe.reload_doc("contacts", "doctype", "contact_phone")
frappe.reload_doc("contacts", "doctype", "contact")
contact_details = frappe.db.sql(
"""
SELECT
`name`, `email_id`, `phone`, `mobile_no`, `modified_by`, `creation`, `modified`
FROM `tabContact`
where not exists (select * from `tabContact Email`
where `tabContact Email`.parent=`tabContact`.name
and `tabContact Email`.email_id=`tabContact`.email_id)
""",
as_dict=True,
)
email_values = []
phone_values = []
for count, contact_detail in enumerate(contact_details):
phone_counter = 1
is_primary = 1
if contact_detail.email_id:
email_values.append(
(
1,
frappe.generate_hash(contact_detail.email_id, 10),
contact_detail.email_id,
"email_ids",
"Contact",
contact_detail.name,
1,
contact_detail.creation,
contact_detail.modified,
contact_detail.modified_by,
)
)
if contact_detail.phone:
is_primary_phone = 1 if phone_counter == 1 else 0
phone_values.append(
(
phone_counter,
frappe.generate_hash(contact_detail.email_id, 10),
contact_detail.phone,
"phone_nos",
"Contact",
contact_detail.name,
is_primary_phone,
0,
contact_detail.creation,
contact_detail.modified,
contact_detail.modified_by,
)
)
phone_counter += 1
if contact_detail.mobile_no:
is_primary_mobile_no = 1 if phone_counter == 1 else 0
phone_values.append(
(
phone_counter,
frappe.generate_hash(contact_detail.email_id, 10),
contact_detail.mobile_no,
"phone_nos",
"Contact",
contact_detail.name,
0,
is_primary_mobile_no,
contact_detail.creation,
contact_detail.modified,
contact_detail.modified_by,
)
)
if email_values and (count % 10000 == 0 or count == len(contact_details) - 1):
frappe.db.sql(
"""
INSERT INTO `tabContact Email`
(`idx`, `name`, `email_id`, `parentfield`, `parenttype`, `parent`, `is_primary`, `creation`,
`modified`, `modified_by`)
VALUES {}
""".format(
", ".join(["%s"] * len(email_values))
),
tuple(email_values),
)
email_values = []
if phone_values and (count % 10000 == 0 or count == len(contact_details) - 1):
frappe.db.sql(
"""
INSERT INTO `tabContact Phone`
(`idx`, `name`, `phone`, `parentfield`, `parenttype`, `parent`, `is_primary_phone`, `is_primary_mobile_no`, `creation`,
`modified`, `modified_by`)
VALUES {}
""".format(
", ".join(["%s"] * len(phone_values))
),
tuple(phone_values),
)
phone_values = []
frappe.db.add_index("Contact Phone", ["phone"])
frappe.db.add_index("Contact Email", ["email_id"])
|
py | 1a369b89bdbd451a6b2be27e18c60bf61bafff3c | """ abstract class for all kinds of simulated connection """
from copy import copy
from enum import Enum
from simulator.eventSimulator import TickListener, NOPREDICT
__author__ = "Mirko Palmer <[email protected]>, Philipp S. Tiesel <[email protected]>"
__copyright__ = "Copyright 2017, FG INET, TU Berlin"
__license__ = "RELAXED CRAPL v0 BETA 1"
class state(Enum):
IDLE = 1
BUSY = 2
CLOSED = 3
connectionCounterCounter = -1
def connectionCounter():
global connectionCounterCounter
connectionCounterCounter += 1
return connectionCounterCounter
class Connection(TickListener):
def __init__(self, idleTimeout, ssl, origin, transferManager, eventSimulator):
self.rStorage = Connection.ConnectionStorage()
self.pStorage = None
self.pRun = NOPREDICT
self.idleTimeout = idleTimeout
self.eventSimulator = eventSimulator
self.transferManager = transferManager
self.ssl = ssl
self.origin = origin
self.id = connectionCounter()
class ConnectionStorage(object):
def __init__(self):
self.desiredBw = 0.0
self.availableBw = 0.0
self.idleTimestamp = None
def clone(self):
clone = copy(self)
return clone
def _storageSwitch(self, pRun):
if self.pRun != pRun:
self.pStorage = self.rStorage.clone()
self.pRun = pRun
if pRun == NOPREDICT:
return self.rStorage
else:
return self.pStorage
def _notifyNew(self, storage, time, pRun):
assert storage.state == state.IDLE
def _notifyIdle(self, storage, time, pRun):
assert storage.state == state.IDLE
self.transferManager.idledConnection(self, time, pRun)
def _notifyBusy(self, storage, time, pRun):
assert storage.state == state.BUSY
self.transferManager.busiedConnection(self, time, pRun)
def _notifyClosed(self, storage, time, pRun):
assert storage.state == state.CLOSED
self.transferManager.closedConnection(self, time, pRun)
def getIdleTimestamp(self, pRun):
storage = self._storageSwitch(pRun)
return storage.idleTimestamp
def getDesiredBw(self, time, pRun):
storage = self._storageSwitch(pRun)
return storage.desiredBw
def getAvailableBw(self, time, pRun):
storage = self._storageSwitch(pRun)
return storage.availableBw
def setAvailableBw(self, availableBw, time, pRun):
storage = self._storageSwitch(pRun)
# only do something if bandwidth changed
if storage.availableBw != availableBw:
# update local cache
storage.availableBw = availableBw
def _tickTime(self, start, end, pRun):
pass
def __str__(self):
return ""
|
py | 1a369bffec34903e323d319d2b28167179962d07 | # Copyright 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Index, MetaData, Table
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
Index('uuid', instances.c.uuid, unique=True).create(migrate_engine)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
Index('uuid', instances.c.uuid, unique=True).drop(migrate_engine)
|
py | 1a369d1a568824687f37ec125030fd8e4d398b8d | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/5/22 18:19
# @FileName: config.py
import tensorflow as tf
import logging
logging.basicConfig(format="%(asctime)s : %(levelname)s : %(message)s", level=logging.INFO)
FLAGS = tf.app.flags.FLAGS
"""
about URL clear config
"""
# 保存URL字符串的文件
tf.app.flags.DEFINE_string("train_src_file", "../data/source.txt", "train source file dir")
# 保存URL标签的文件
tf.app.flags.DEFINE_string("train_tgt_file", "../data/target.txt", "train target file dir")
# URL根据char2ve处理的配置文件
tf.app.flags.DEFINE_string("word2vec_model_path", "../data/char2ve/vector.bin", "word to vector model path")
tf.app.flags.DEFINE_string("vocab_file", "../data/char2ve/vocab.txt", "vocab file dir")
tf.app.flags.DEFINE_string("vocab_vector_file", "../data/char2ve/vocab_vector.npy", "vocab vector file")
tf.app.flags.DEFINE_string("model_path", "../model_filt/", "model path")
tf.app.flags.DEFINE_string("test_src_file", "../data/source_test.txt", "test source file dir")
tf.app.flags.DEFINE_string("test_tgt_file", "../data/target_test.txt", "test target file dir")
tf.app.flags.DEFINE_string("model_pb_file", "../data/abnormal_detection_model.pb", "converted model file")
"""
公共配置
"""
tf.app.flags.DEFINE_integer("embedding_size", 100, "vocab vector embedding size")
# 数据集的连续元素的个数,并组合成一个单批.
tf.app.flags.DEFINE_integer("batch_size", 100, "batch size")
tf.app.flags.DEFINE_integer("num_steps", 200, "number of input string max length")
# 训练的次数
tf.app.flags.DEFINE_integer("epoch", 120, "number of training epoch")
"""
RNN层配置
"""
tf.app.flags.DEFINE_integer("num_layers", 3, "number of rnn layer")
tf.app.flags.DEFINE_integer("num_hidden", 15, "hidden layer output dimension")
tf.app.flags.DEFINE_float("input_keep_prob", 0.5, "input keep prob")
tf.app.flags.DEFINE_float("output_keep_prob", 0.5, "output keep prob")
tf.app.flags.DEFINE_float("state_keep_prob", 1.0, "state keep prob")
tf.app.flags.DEFINE_string("tb_path", "./tb/", "tensorboard file path")
"""
学习速率配置
"""
tf.app.flags.DEFINE_float("learning_rate", 0.01, "learning rate")
tf.app.flags.DEFINE_integer("decay_steps", 10, "decay steps")
tf.app.flags.DEFINE_float("decay_rate", 0.9, "decay rate")
"""
cpu core config
"""
tf.app.flags.DEFINE_integer("cpu_num", 4, "cpu core number")
|
py | 1a369d8ed0504df7cdc57ef633a1364eabff0da2 | # Copyright (c) 2016 CloudNative, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import unittest
import re
import time
from cruddy.calculatedvalue import CalculatedValue
class TestCalculatedValue(unittest.TestCase):
def setUp(self):
self.uuid_re = re.compile('[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}')
def tearDown(self):
pass
def test_uuid_create(self):
cv = CalculatedValue.check('<on-create:uuid>')
self.assertEqual(cv.operation, 'create')
self.assertTrue(self.uuid_re.match(cv.value))
def test_uuid_update(self):
cv = CalculatedValue.check('<on-update:uuid>')
self.assertEqual(cv.operation, 'update')
self.assertTrue(self.uuid_re.match(cv.value))
def test_ts_create(self):
cv = CalculatedValue.check('<on-create:timestamp>')
before = int(time.time() * 1000)
self.assertEqual(cv.operation, 'create')
self.assertGreaterEqual(cv.value, before)
self.assertLessEqual(cv.value, int(time.time() * 1000))
def test_bad_operation(self):
cv = CalculatedValue.check('<on-foobar:uuid>')
self.assertIsNone(cv)
def test_bad_token(self):
cv = CalculatedValue.check('<on-create:foobar>')
self.assertIsNone(cv)
|
py | 1a369d97727efa43eb92767ce55c9b528a6fad42 | """Summary
"""
from PyQt5.QtWidgets import QGraphicsRectItem
from . import slicestyles as styles
from .sliceextras import PreXoverItemGroup, WEDGE_RECT
_RADIUS = styles.SLICE_HELIX_RADIUS
class PreXoverManager(QGraphicsRectItem):
"""Summary
Attributes:
active_group (TYPE): Description
active_neighbor_group (TYPE): Description
groups (dict): Description
neighbor_pairs (tuple): Description
neighbor_prexover_items (dict): Description
part_item (TYPE): Description
prexover_item_map (dict): Description
virtual_helix_item (cadnano.views.sliceview.virtualhelixitem.VirtualHelixItem): Description
"""
def __init__(self, part_item):
"""Summary
Args:
part_item (TYPE): Description
"""
super(PreXoverManager, self).__init__(part_item)
self.part_item = part_item
self.virtual_helix_item = None
self.active_group = None
self.active_neighbor_group = None
self.groups = {}
# dictionary of tuple of a
# (PreXoverItemGroup, PreXoverItemGroup, List[PreXoverItem])
# tracks connections between prexovers
self.prexover_item_map = {}
self.neighbor_prexover_items = {} # just a dictionary of neighbors
self.neighbor_pairs = () # accounting for neighbor pairing
self._active_items = []
# end def
def partItem(self):
"""Summary
Returns:
TYPE: Description
"""
return self.part_item
# end def
def clearPreXoverItemGroups(self):
"""Summary
Returns:
TYPE: Description
"""
groups = self.groups
while groups:
k, item = groups.popitem()
item.remove()
if self.active_group is not None:
self.active_group.remove()
self.active_group = None
self._active_items = []
self.prexover_item_map = {}
self.neighbor_prexover_items = {}
if self.virtual_helix_item is not None:
self.virtual_helix_item.setZValue(styles.ZSLICEHELIX)
# end def
def hideGroups(self):
"""Summary
Returns:
TYPE: Description
"""
self.clearPreXoverItemGroups()
if self.active_group is not None:
self.active_group.hide()
for group in self.groups.values():
group.hide()
self.virtual_helix_item = None
# end def
def activateVirtualHelix(self, virtual_helix_item, idx, per_neighbor_hits, pairs):
"""Create PreXoverItemGroups for the active virtual_helix_item and its
neighbors and connect the neighboring bases
Args:
virtual_helix_item (cadnano.views.sliceview.virtualhelixitem.VirtualHelixItem): Description
idx (int): the base index within the virtual helix
per_neighbor_hits (TYPE): Description
pairs (TYPE): Description
"""
self.clearPreXoverItemGroups()
pxis = self.prexover_item_map
neighbor_pxis_dict = self.neighbor_prexover_items # for avoiding duplicates)
self.neighbor_pairs = pairs
self.virtual_helix_item = virtual_helix_item
part_item = self.part_item
groups = self.groups
self.active_group = agroup = PreXoverItemGroup(_RADIUS, WEDGE_RECT,
virtual_helix_item, True)
id_num = virtual_helix_item.idNum()
virtual_helix_item.setZValue(styles.ZSLICEHELIX + 10)
fwd_st_type, rev_st_type = True, False # for clarity in the call to constructors
for neighbor_id, hits in per_neighbor_hits.items():
nvhi = part_item.idToVirtualHelixItem(neighbor_id)
ngroup = PreXoverItemGroup(_RADIUS, WEDGE_RECT, nvhi, False)
groups[neighbor_id] = ngroup
fwd_axis_hits, rev_axis_hits = hits
# n_step_size = nvhi.getProperty('bases_per_repeat')
for idx, fwd_idxs, rev_idxs in fwd_axis_hits:
neighbor_pxis = []
# print((id_num, fwd_st_type, idx))
pxis[(id_num, fwd_st_type, idx)] = (agroup.getItemIdx(fwd_st_type, idx),
ngroup,
neighbor_pxis
)
for j in fwd_idxs:
nkey = (neighbor_id, fwd_st_type, j)
npxi = neighbor_pxis_dict.get(nkey)
if npxi is None:
npxi = ngroup.getItemIdx(fwd_st_type, j)
neighbor_pxis_dict[nkey] = npxi
neighbor_pxis.append(npxi)
for j in rev_idxs:
nkey = (neighbor_id, rev_st_type, j)
npxi = neighbor_pxis_dict.get(nkey)
if npxi is None:
npxi = ngroup.getItemIdx(rev_st_type, j)
neighbor_pxis_dict[nkey] = npxi
neighbor_pxis.append(npxi)
for idx, fwd_idxs, rev_idxs in rev_axis_hits:
neighbor_pxis = []
# print((id_num, rev_st_type, idx))
pxis[(id_num, rev_st_type, idx)] = (agroup.getItemIdx(rev_st_type, idx),
ngroup,
neighbor_pxis
)
for j in fwd_idxs:
nkey = (neighbor_id, fwd_st_type, j)
npxi = neighbor_pxis_dict.get(nkey)
if npxi is None:
npxi = ngroup.getItemIdx(fwd_st_type, j)
neighbor_pxis_dict[nkey] = npxi
neighbor_pxis.append(npxi)
for j in rev_idxs:
nkey = (neighbor_id, rev_st_type, j)
npxi = neighbor_pxis_dict.get(nkey)
if npxi is None:
npxi = ngroup.getItemIdx(rev_st_type, j)
neighbor_pxis_dict[nkey] = npxi
neighbor_pxis.append(npxi)
# end for per_neighbor_hits
# end def
def activateNeighbors(self, id_num, is_fwd, idx):
"""Summary
Args:
id_num (int): VirtualHelix ID number. See `NucleicAcidPart` for description and related methods.
is_fwd (bool): True if fwd (top) strand, False if rev (bottom) strand
idx (int): the base index within the virtual helix
Returns:
TYPE: Description
Raises:
ValueError: Description
"""
# print("ACTIVATING neighbors", id_num, idx)
if self.active_group is None:
return
agroup = self.active_group
if id_num != agroup.id_num:
raise ValueError("not active id_num {} != {}".format(id_num, agroup.id_num))
active_items = self._active_items
item = self.prexover_item_map.get((id_num, is_fwd, idx))
if item is None:
apxi = agroup.getItemIdx(is_fwd, idx)
apxi.setActive5p(True) if is_fwd else apxi.setActive3p(True)
agroup.active_wedge_gizmo.pointToPreXoverItem(apxi)
active_items.append(apxi)
else:
apxi, npxig, neighbor_list = item
pairs = self.neighbor_pairs[0] if is_fwd else self.neighbor_pairs[1]
check_5prime = pairs.get(idx)
is_5prime_strand = None
if check_5prime is not None:
is_5prime_strand = check_5prime[0]
else:
if is_fwd and idx == 0:
is_5prime_strand = False
elif not is_5prime_strand and self.virtual_helix_item.getProperty('length') == idx + 1:
is_5prime_strand = False
else:
is_5prime_strand = True
agroup.active_wedge_gizmo.pointToPreXoverItem(apxi)
active_items.append(apxi)
self.active_neighbor_group = npxig
# print("Should have {} neighbors".format(len(neighbor_list)))
# color = neighbor_list[0].color if neighbor_list else '#aaaaa'
# angle = 0
for npxi in neighbor_list:
npxi.setActive3p(True, apxi) if is_5prime_strand else npxi.setActive5p(True, apxi)
active_items.append(npxi)
apxi.setActive5p(True, npxi) if is_5prime_strand else apxi.setActive3p(True, npxi)
# end def
def deactivateNeighbors(self):
"""Summary
Returns:
TYPE: Description
"""
while self._active_items:
npxi = self._active_items.pop()
npxi.setActive3p(False)
npxi.setActive5p(False)
if self.active_neighbor_group is None:
return
wg = self.active_neighbor_group.active_wedge_gizmo
if wg is not None:
wg.deactivate()
self.active_neighbor_group = None
# end def
# end class
|
py | 1a369e22fc802c776239bb6afaba9602715e0331 | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
# file: main.py
from .sequiturpython.grammar import Grammar, Symbol
from .sequiturpython.symbol import RuleIndex, RULE_INDEX_STR
# Few constants for presentation logics
#RULE_INDEX_STR = "^%s"
SEQUENCE_KEY = "S"
ARROW = "→"
NEWLINE_REPLACEMENT = "↵"
SPACE_REPLACEMENT = "_"
TAB_REPLACEMENT = "↹"
class AlphabetsTransformer:
def __init__(self):
self.alphabets_encoder = [chr(num) for num in range(1000)]
self.alphabets_decoder = {key: idx for idx, key in enumerate(self.alphabets_encoder)}
def list_ids2alphabets(self, one_list):
return [self.alphabets_encoder[cur_ele] for cur_ele in one_list]
def list_alphabets2ids(self, one_list):
return [self.alphabets_decoder[cur_ele] for cur_ele in one_list]
class Rule(list):
""" Rule class keeps track of digrams on a list """
def __new__(cls, v=[]):
obj = list.__new__(cls, [v])
obj.c = 0 # set default counter value
obj.i = RuleIndex(0) # set default index value
return obj
def ind(self, i=None):
""" Set and get index """
if i is not None:
self.i = RuleIndex(i)
return self.i
def inc(self, n=1):
""" Increase counter """
self.c += n
return self.c
def dec(self, n=1):
""" Decrease counter """
self.c -= n
return self.c
def cnt(self):
""" Get counter """
return self.c
def replace(self, rule):
"""
Replace rule digram values by other rule digrams. This is not used for Sequencer2!
If self rule is: [[1, 2], [2, 3]] and 1 is replaced with rule given on argument: [['a', 'b'], ['b', 'c']]
would become: [['a', 'b'], ['b', 'c'], ['c', 2], [2, 3]]
"""
for ind, digram in enumerate(self):
# Digram has two values, potentially rule indexes
# both of them must be compared with the given rule index
for j, el in enumerate(digram):
ind += j # j = 0 or 1
if isinstance(el, RuleIndex) and el == rule.ind():
if ind > 0:
self[ind-1][1] = rule[0][0]
if ind < len(self):
self[ind][0] = rule[-1][1]
self[ind:ind] = rule[:]
class Sequencer(list):
""" Main class to use algorithm. This implements the digram based approach for the algo. """
def __init__(self, seq=[], utilize=True):
self.first = None
if seq:
for c in seq:
self.stream(c, utilize)
def utilize(self):
""" Remove redundant rules i.e. if rule is used only once on rules """
rules = self[1:]
for rule1 in rules:
# only rules with count = 1
if rule1 is None or rule1.cnt() != 1:
continue
for rule2 in rules:
# iterate over all rules except the excluded rule and None
if rule2 is None or rule2 is rule1:
continue
rule2.replace(rule1)
# free up the slot for the next reoccurring rule
self[rule1.ind()] = None
def find(self, digram):
""" Find given digram from main rule / sequence and rest of the rules """
for i, rule in enumerate(self):
if rule is None:
continue
# main rule
if i == 0:
j = rule.index(digram) if digram in rule[:-1] else None
if j is not None:
return 0, j, -1
# rules with one digram
elif len(rule) == 1:
if rule[0] == digram:
return i, 0, -1
# rules with multiple digrams
else:
j = rule.index(digram) if digram in rule else None
if j is not None:
return i, j, 1
return (-1, -1, -1)
def new_rule(self, rule):
""" New rule creator helper """
# get new index from empty slots if available
if None in self:
c = rule.ind(self.index(None))
self[c] = rule
# else get new index from total length of the sequence
else:
c = rule.ind(len(self))
self.append(rule)
return c
def stream(self, c, utilize=True):
""" Main sequence handler / algorithm """
# create first item, if not exists yet
if self.first is None:
self.first = c
r = [[None, c]]
self.append(Rule(r))
return
main = self[0]
util = False
# loop as many times as there are no more repeating digrams
while True:
# create a new digram from previous digram last item and coming item c
digram = [main[-1][1], c]
# search if main sequence of rest of the rules has the digram
ind, j, k = self.find(digram)
# rule is a list of digrams, the first digram is instantiated here
rule = Rule([digram])
# digram found from main rule
if ind == 0:
# increase potential previous rule index
if isinstance(c, RuleIndex):
self[c].inc()
# get a new item by rule creation
c = self.new_rule(rule)
# every new rule will get counter increased by two
self[c].inc(2)
# decrease counter of the replaced rules
if isinstance(main[j-1][1], RuleIndex):
self[main[j-1][1]].dec()
util = True
if isinstance(main[j+1][0], RuleIndex):
self[main[j+1][0]].dec()
util = True
# replace certain items with a new rule item: c
main[-1][1] = main[j+1][0] = main[j-1][1] = c
del main[j]
# break while loop
break
else:
# digram was not found from the main sequence, but is found from the other rules
if ind > 0:
# digram was found especially from longer rules, i.e. rules that are longer than one digram long
if k > 0:
# get a new item by rule creation
c = self.new_rule(rule)
# increase counter
rule.inc()
# change rule content by adding new index
if j < len(self[ind])-1:
self[ind][j+1][0] = c
if j-1 > -1:
self[ind][j-1][1] = c
# delete old rule digram
del self[ind][j]
else:
# create index for the next digram
c = RuleIndex(ind)
# remove last item from the main sequence
l = main.pop()
# if the rightmost value of the removed rule is a RuleIndex, decrease counter
if isinstance(l[1], RuleIndex):
self[l[1]].dec()
util = True
# digram was not found from the main sequence or from the rules
else:
# append new object to the main sequence
main.append(digram)
# if character is an index, increment counter
if isinstance(c, RuleIndex):
self[c].inc()
# break while loop
break
# if rule utility is on (as it is recommended by default), remove redundant rules
if utilize and util:
self.utilize()
def grammar_recursive(self, rule, recursive=False):
""" Grammar helper function """
if not isinstance(rule, list):
return str(rule)
s = ''
for i, r in enumerate(rule):
if isinstance(r, list):
if i == 0:
s += str(self.grammar_recursive(r, recursive))
elif isinstance(r[1], RuleIndex):
s += "%s" % (self.grammar_recursive(self[r[1]], recursive) if recursive else RULE_INDEX_STR % r[1])
else:
s += str(self.grammar_recursive(r[1], recursive))
elif isinstance(r, RuleIndex):
s += "%s" % (self.grammar_recursive(self[r], recursive) if recursive else RULE_INDEX_STR % r)
else:
s += str(r).replace("\r\n", NEWLINE_REPLACEMENT).\
replace("\n", NEWLINE_REPLACEMENT).\
replace("\r", "").\
replace("\t", TAB_REPLACEMENT).\
replace(" ", SPACE_REPLACEMENT)
return s
def grammar_sequence(self, join=False):
""" Retrieve the main sequence / rule from the sequencer """
x = [item[1] for item in self[0]]
return {SEQUENCE_KEY: self.grammar_recursive(x, False) if join else x}
def grammar_rules(self, join=False, recursive=False):
""" Retrieve rest of the rules from the sequencer """
return {x.ind(): self.grammar_recursive(x, recursive) if join else x for x in self[1:] if x}
def resolve(self, flatten=True):
"""
When sequencer has succesfully created rules from the given input,
resolve method can be used to decode compressed sequence back to the original input.
Flatten argument can be used to keep/unkeep hierarchic structure present on a returned list.
"""
def _recur(ind):
if isinstance(ind, RuleIndex) and self[ind] is not None:
b = []
l = len(self[ind])-1
for i, item in enumerate(self[ind]):
if item is None:
continue
if i == 0:
b.append(_recur(item[0]))
b.append(_recur(item[1]))
elif i == l:
b.append(_recur(item[1]))
else:
b.append(_recur(item[1]))
return b
else:
return ind
# start from main sequence / first rule
items = [_recur(item[1]) for item in self[0]]
# should we flatten the result?
return flatten_list(items) if flatten else items
def get(self):
""" Getter for sequence """
return list(self)
def __str__(self):
"""
String representation of the sequencer.
This merges only the first of the rules i.e. the main sequence
"""
return ''.join([(RULE_INDEX_STR % i) if isinstance(i, RuleIndex) else str(i) for i in [item[1] for item in self[0]]])
class Sequencer2(list):
""" Main class to use algorithm. This implements the array slice based approach for the algo. """
def __init__(self, seq=[], utilize=True):
self += [Rule([])]
if seq:
for c in seq:
self.stream(c, utilize)
def find(self, rule):
ind, x, j = (-1, -1, -1)
i = 0
for x in self:
if x:
j = self.digram_index(x, rule)
if j > -1:
x = len(x)
ind = i
break
i += 1
return (ind, x, j)
def digram_index(self, target, digram):
l = len(target)-1
# target list length smaller than 2
if l < 1:
return -1
# if target and digram are equal in length, we can compare them directly
if l == 1:
return 0 if target == digram else -1
i = 0
while i < l:
# find "digrams" from target list and match with passed digram argument
if target[i:i+2] == digram:
return i
i += 1
return -1
def stream(self, c, utilize = True):
""" Main sequence handler / algorithm """
s = self
main = s[0]
if len(main) < 2:
main.append(c)
else:
util = False
# loop as many times as there are no more repeating digrams
while True:
# create new digram
rule = Rule(main[-1:]+[c])
# find digram from main sequence or other rules
ind, x, j = self.find(rule)
# if main sequence has digram
if ind == 0:
# reuse temporarily disabled index?
if None in s:
i = rule.ind(s.index(None))
s[i] = rule
else:
# create new unique index
i = rule.ind(len(s))
s.append(rule)
# increment rule counter
s[i].inc()
# replace digram left item
main[j] = i
# remove digram right item
del main[j+1]
else:
# main sequence didnt have digram, how about other rules?
if ind > 0:
# digram is found from long rules
if x > 2:
c = rule.ind(len(s))
s.append(rule)
rule.inc()
# change rule content by adding new index
c1 = s[ind][j+2:]
del s[ind][j:]
s[ind] += [c] + c1
else:
# lets try to retrieve index from all rules for the next digram
c = RuleIndex(s.index(rule))
# remove last item from main sequence
l = main.pop()
# if removed object is an index, decrease count
if isinstance(l, RuleIndex) and s[l] is not None:
s[l].dec()
util = True
else:
# append new object to the main sequence
main.append(c)
# if character is an index, increment count
if isinstance(c, RuleIndex):
s[c].inc()
break
if utilize and util:
self.utilize()
def utilize(self):
# remove redundant rules i.e. if rule is used only once on right side of the rules list
for rule in self:
# only rules with count = 1
if rule is None or rule.cnt() != 1:
continue
self[rule.ind()] = None
for r in self:
# all rules except the excluded rule
if r is None or r is rule:
continue
ind = 0
l = len(r)
while ind < l:
if isinstance(r[ind], RuleIndex) and r[ind] == rule.ind():
c = r[ind+1:]
del r[ind:]
r += rule + c
ind += 1
def grammar_recursive(self, rule, recursive=False):
s = ''
for r in rule:
if isinstance(r, list):
s += str(self.grammar_recursive(r, recursive))
elif isinstance(r, RuleIndex):
s += "%s" % (self.grammar_recursive(self[r], recursive) if recursive else RULE_INDEX_STR % r)
else:
s += str(r).replace("\r\n", NEWLINE_REPLACEMENT).\
replace("\n", NEWLINE_REPLACEMENT).\
replace("\r", "").\
replace("\t", TAB_REPLACEMENT).\
replace(" ", SPACE_REPLACEMENT)
return s
def grammar_sequence(self, join=False):
""" Retrieve the main sequence / rule from the sequencer """
return {SEQUENCE_KEY: self.grammar_recursive(self[0]) if join else self[0]}
def grammar_rules(self, join=False, recursive=False):
""" Retrieve rest of the rules from the sequencer """
return {x.ind(): self.grammar_recursive(x, recursive) if join else x for x in self[1:] if x}
def resolve(self, flatten=True):
"""
When sequencer has succesfully created rules from the given input,
resolve method can be used to decode compressed sequence back to the original input.
Flatten argument can be used to keep/unkeep hierarchic structure present on a returned list.
"""
def _recur(i):
if not isinstance(i, RuleIndex):
return i
return [_recur(x) for x in self[i]]
# start from main sequence / first rule
items = [_recur(item) for item in self[0]]
# should we flatten the result?
return flatten_list(items) if flatten else items
def get(self):
""" Getter for sequence """
return list(self)
def __str__(self):
"""
String representation of the sequencer.
This merges only the first of the rules i.e. the main sequence
"""
return ''.join([(RULE_INDEX_STR % i) if isinstance(i, RuleIndex) else str(i) for i in self[0]])
class Sequencer3():
"""
Main class to use algorithm.
This implements Sequitur from the JavaScript version to Python based approach for the algo:
https://github.com/mspandit/sequitur-python
"""
def __init__(self, seq = None, utilize = True):
self.first = None
self.grammar_cache = None
self.g = Grammar()
self.production = self.g.root_production
if seq:
for c in seq:
self.stream(c, utilize)
def stream(self, c, utilize = True):
self.production.last().insert_after(Symbol.factory(self.g, c))
if self.first is None:
self.first = True
return
match = self.g.get_index(self.production.last().prev)
if not match:
self.g.add_index(self.production.last().prev)
elif match.next != self.production.last().prev:
self.production.last().prev.process_match(match)
def grammar_recursive(self, rule, recursive=False):
s = ''
for r in rule:
if isinstance(r, list):
s += str(self.grammar_recursive(r, recursive))
elif isinstance(r, RuleIndex):
s += "%s" % (self.grammar_recursive(self.get(True)[r], recursive) if recursive else RULE_INDEX_STR % r)
else:
s += str(r).replace("\r\n", NEWLINE_REPLACEMENT).\
replace("\n", NEWLINE_REPLACEMENT).\
replace("\r", "").\
replace("\t", TAB_REPLACEMENT).\
replace(" ", SPACE_REPLACEMENT)
return s
def grammar_sequence(self, join=False):
""" Retrieve the main sequence / rule from the sequencer """
x = self.get(False)[0]
return {SEQUENCE_KEY: self.grammar_recursive(x, False) if join else x}
def grammar_rules(self, join=False, recursive=False):
""" Retrieve rest of the rules from the sequencer """
rules = self.get(False)[1:]
return {(i+1): self.grammar_recursive(x, recursive) if join else x for i, x in enumerate(rules)}
def resolve(self, flatten=True):
"""
When sequencer has succesfully created rules from the given input,
resolve method can be used to decode compressed sequence back to the original input.
Flatten argument can be used to keep/unkeep hierarchic structure present on a returned list.
"""
def _recur(i):
if not isinstance(i, RuleIndex):
return i
return [_recur(x) for x in self.get()[i]]
# start from main sequence / first rule
items = [_recur(item) for item in self.get()[0]]
# should we flatten the result?
return flatten_list(items) if flatten else items
def get(self, cache=True):
if not self.grammar_cache or not cache:
self.grammar_cache = self.g.get_grammar()
return self.grammar_cache
def __str__(self):
"""
String representation of the sequencer.
This merges only the first of the rules i.e. the main sequence
"""
return ''.join([(RULE_INDEX_STR % i) if isinstance(i, RuleIndex) else str(i) for i in self.get(False)[0]])
def flatten_list(items):
""" List flattener helper function """
for i, x in enumerate(items):
while isinstance(items[i], list):
items[i:i+1] = items[i]
return items
def print_grammar(seguencer, join=True, recursive=False):
""" Nicely output grammar of the sequencer """
# main sequence only
for i, item in seguencer.grammar_sequence(join).items():
print ("%s%s" % ("%s " % i, ARROW), item)
# rules only
for i, item in seguencer.grammar_rules(join, recursive).items():
print ("%s%s" % ("%s " % i, ARROW), item)
|
py | 1a369e6a9f86d3c96098c1f52bfeedbbe727f3dc | import sys
import pandas as pd
def loadcsv() -> pd.DataFrame:
srag_2013 = pd.read_csv(
"https://opendatasus.saude.gov.br/dataset/e6b03178-551c-495c-9935-adaab4b2f966/resource/4919f202-083a-4fac-858d-99fdf1f1d765/download/influd13_limpo_final.csv",
sep=';', encoding='cp1252', dtype=str)
srag_2014 = pd.read_csv(
"https://opendatasus.saude.gov.br/dataset/e6b03178-551c-495c-9935-adaab4b2f966/resource/2182aff1-4e8b-4aee-84fc-8c9f66378a2b/download/influd14_limpo-final.csv",
sep=';', encoding='cp1252', dtype=str)
srag_2015 = pd.read_csv(
"https://opendatasus.saude.gov.br/dataset/e6b03178-551c-495c-9935-adaab4b2f966/resource/97cabeb6-f09e-47a5-8358-4036fb10b535/download/influd15_limpo-final.csv",
sep=';', encoding='cp1252', dtype=str)
srag_2016 = pd.read_csv(
"https://opendatasus.saude.gov.br/dataset/e6b03178-551c-495c-9935-adaab4b2f966/resource/dbb0fd9b-1345-47a5-86db-d3d2f4868a11/download/influd16_limpo-final.csv",
sep=';', encoding='cp1252', dtype=str)
srag_2017 = pd.read_csv(
"https://opendatasus.saude.gov.br/dataset/e6b03178-551c-495c-9935-adaab4b2f966/resource/aab28b3c-f6b8-467f-af0b-44889a062ac6/download/influd17_limpo-final.csv",
sep=';', encoding='cp1252', dtype=str)
srag_2018 = pd.read_csv(
"https://opendatasus.saude.gov.br/dataset/e6b03178-551c-495c-9935-adaab4b2f966/resource/a7b19adf-c6e6-4349-a309-7a1ec0f016a4/download/influd18_limpo-final.csv",
sep=';', encoding='cp1252', dtype=str)
srag_201314 = srag_2013.merge(srag_2014, how='outer')
srag_20131415 = srag_201314.merge(srag_2015, how='outer')
srag_2013141516 = srag_20131415.merge(srag_2016, how='outer')
srag_201314151617 = srag_2013141516.merge(srag_2017, how='outer')
srag_20131415161718 = srag_201314151617.merge(srag_2018, how='outer')
return srag_20131415161718
|
py | 1a369f676cc6db4953c8149c47ec6309bcaec900 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetVirtualNetworkPeeringResult',
'AwaitableGetVirtualNetworkPeeringResult',
'get_virtual_network_peering',
]
@pulumi.output_type
class GetVirtualNetworkPeeringResult:
"""
Peerings in a virtual network resource.
"""
def __init__(__self__, allow_forwarded_traffic=None, allow_gateway_transit=None, allow_virtual_network_access=None, etag=None, id=None, name=None, peering_state=None, provisioning_state=None, remote_address_space=None, remote_bgp_communities=None, remote_virtual_network=None, resource_guid=None, type=None, use_remote_gateways=None):
if allow_forwarded_traffic and not isinstance(allow_forwarded_traffic, bool):
raise TypeError("Expected argument 'allow_forwarded_traffic' to be a bool")
pulumi.set(__self__, "allow_forwarded_traffic", allow_forwarded_traffic)
if allow_gateway_transit and not isinstance(allow_gateway_transit, bool):
raise TypeError("Expected argument 'allow_gateway_transit' to be a bool")
pulumi.set(__self__, "allow_gateway_transit", allow_gateway_transit)
if allow_virtual_network_access and not isinstance(allow_virtual_network_access, bool):
raise TypeError("Expected argument 'allow_virtual_network_access' to be a bool")
pulumi.set(__self__, "allow_virtual_network_access", allow_virtual_network_access)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if peering_state and not isinstance(peering_state, str):
raise TypeError("Expected argument 'peering_state' to be a str")
pulumi.set(__self__, "peering_state", peering_state)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if remote_address_space and not isinstance(remote_address_space, dict):
raise TypeError("Expected argument 'remote_address_space' to be a dict")
pulumi.set(__self__, "remote_address_space", remote_address_space)
if remote_bgp_communities and not isinstance(remote_bgp_communities, dict):
raise TypeError("Expected argument 'remote_bgp_communities' to be a dict")
pulumi.set(__self__, "remote_bgp_communities", remote_bgp_communities)
if remote_virtual_network and not isinstance(remote_virtual_network, dict):
raise TypeError("Expected argument 'remote_virtual_network' to be a dict")
pulumi.set(__self__, "remote_virtual_network", remote_virtual_network)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if use_remote_gateways and not isinstance(use_remote_gateways, bool):
raise TypeError("Expected argument 'use_remote_gateways' to be a bool")
pulumi.set(__self__, "use_remote_gateways", use_remote_gateways)
@property
@pulumi.getter(name="allowForwardedTraffic")
def allow_forwarded_traffic(self) -> Optional[bool]:
"""
Whether the forwarded traffic from the VMs in the local virtual network will be allowed/disallowed in remote virtual network.
"""
return pulumi.get(self, "allow_forwarded_traffic")
@property
@pulumi.getter(name="allowGatewayTransit")
def allow_gateway_transit(self) -> Optional[bool]:
"""
If gateway links can be used in remote virtual networking to link to this virtual network.
"""
return pulumi.get(self, "allow_gateway_transit")
@property
@pulumi.getter(name="allowVirtualNetworkAccess")
def allow_virtual_network_access(self) -> Optional[bool]:
"""
Whether the VMs in the local virtual network space would be able to access the VMs in remote virtual network space.
"""
return pulumi.get(self, "allow_virtual_network_access")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="peeringState")
def peering_state(self) -> Optional[str]:
"""
The status of the virtual network peering.
"""
return pulumi.get(self, "peering_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the virtual network peering resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="remoteAddressSpace")
def remote_address_space(self) -> Optional['outputs.AddressSpaceResponse']:
"""
The reference to the remote virtual network address space.
"""
return pulumi.get(self, "remote_address_space")
@property
@pulumi.getter(name="remoteBgpCommunities")
def remote_bgp_communities(self) -> Optional['outputs.VirtualNetworkBgpCommunitiesResponse']:
"""
The reference to the remote virtual network's Bgp Communities.
"""
return pulumi.get(self, "remote_bgp_communities")
@property
@pulumi.getter(name="remoteVirtualNetwork")
def remote_virtual_network(self) -> Optional['outputs.SubResourceResponse']:
"""
The reference to the remote virtual network. The remote virtual network can be in the same or different region (preview). See here to register for the preview and learn more (https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-create-peering).
"""
return pulumi.get(self, "remote_virtual_network")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> str:
"""
The resourceGuid property of the Virtual Network peering resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="useRemoteGateways")
def use_remote_gateways(self) -> Optional[bool]:
"""
If remote gateways can be used on this virtual network. If the flag is set to true, and allowGatewayTransit on remote peering is also true, virtual network will use gateways of remote virtual network for transit. Only one peering can have this flag set to true. This flag cannot be set if virtual network already has a gateway.
"""
return pulumi.get(self, "use_remote_gateways")
class AwaitableGetVirtualNetworkPeeringResult(GetVirtualNetworkPeeringResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualNetworkPeeringResult(
allow_forwarded_traffic=self.allow_forwarded_traffic,
allow_gateway_transit=self.allow_gateway_transit,
allow_virtual_network_access=self.allow_virtual_network_access,
etag=self.etag,
id=self.id,
name=self.name,
peering_state=self.peering_state,
provisioning_state=self.provisioning_state,
remote_address_space=self.remote_address_space,
remote_bgp_communities=self.remote_bgp_communities,
remote_virtual_network=self.remote_virtual_network,
resource_guid=self.resource_guid,
type=self.type,
use_remote_gateways=self.use_remote_gateways)
def get_virtual_network_peering(resource_group_name: Optional[str] = None,
virtual_network_name: Optional[str] = None,
virtual_network_peering_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualNetworkPeeringResult:
"""
Peerings in a virtual network resource.
:param str resource_group_name: The name of the resource group.
:param str virtual_network_name: The name of the virtual network.
:param str virtual_network_peering_name: The name of the virtual network peering.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['virtualNetworkName'] = virtual_network_name
__args__['virtualNetworkPeeringName'] = virtual_network_peering_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20200701:getVirtualNetworkPeering', __args__, opts=opts, typ=GetVirtualNetworkPeeringResult).value
return AwaitableGetVirtualNetworkPeeringResult(
allow_forwarded_traffic=__ret__.allow_forwarded_traffic,
allow_gateway_transit=__ret__.allow_gateway_transit,
allow_virtual_network_access=__ret__.allow_virtual_network_access,
etag=__ret__.etag,
id=__ret__.id,
name=__ret__.name,
peering_state=__ret__.peering_state,
provisioning_state=__ret__.provisioning_state,
remote_address_space=__ret__.remote_address_space,
remote_bgp_communities=__ret__.remote_bgp_communities,
remote_virtual_network=__ret__.remote_virtual_network,
resource_guid=__ret__.resource_guid,
type=__ret__.type,
use_remote_gateways=__ret__.use_remote_gateways)
|
py | 1a369f7b2e0fbf2fb53995ac163bbd749868ba84 | #!/usr/bin/env python
"""
Perform cleanup actions
"""
import time
import random
import threading
from Utils.Timers import timeFunction
from WMCore.WorkerThreads.BaseWorkerThread import BaseWorkerThread
from WMCore.Services.ReqMgr.ReqMgr import ReqMgr
from WMCore.DAOFactory import DAOFactory
class WorkQueueManagerCleaner(BaseWorkerThread):
"""
Cleans expired items, updates element status.
"""
def __init__(self, queue, config):
"""
Initialise class members
"""
BaseWorkerThread.__init__(self)
self.forbiddenStatus = ["aborted", "aborted-completed", "force-complete", "completed"]
self.queue = queue
self.config = config
self.reqmgr2Svc = ReqMgr(self.config.General.ReqMgr2ServiceURL)
myThread = threading.currentThread()
daoFactory = DAOFactory(package="WMCore.WMBS",
logger=myThread.logger,
dbinterface=myThread.dbi)
self.finishedWorflowCheck = daoFactory(classname="Subscriptions.CountFinishedSubscriptionsByWorkflow")
def setup(self, parameters):
"""
Called at startup - introduce random delay
to avoid workers all starting at once
"""
t = random.randrange(self.idleTime)
self.logger.info('Sleeping for %d seconds before 1st loop' % t)
time.sleep(t)
@timeFunction
def algorithm(self, parameters):
"""
Check & expire negotiation failures
"""
self.queue.logger.info("Start updating & cleaning...")
try:
self.queue.performQueueCleanupActions()
# this will clean up whatever left over from above clean up.
# also if the wq replication has problem it won't delay the killing jobs in condor
# and updating wmbs status
# state lists which shouldn't be populated in wmbs. (To prevent creating work before WQE status updated)
# added completed status in the list due to the race condition
requests = self.reqmgr2Svc.getRequestByStatusFromMemoryCache(self.forbiddenStatus).getData()
results = self.finishedWorflowCheck.execute(workflowNames=requests)
requestsToKill = [reqInfo["workflow"] for reqInfo in results if reqInfo["open"] > 0]
self.queue.logger.info("Killing %d requests in WMBS ...", len(requestsToKill))
self.queue.killWMBSWorkflows(requestsToKill)
except Exception as ex:
self.queue.logger.exception("Error cleaning queue: %s", str(ex))
self.queue.logger.info("Finished updating & cleaning.")
|
py | 1a369fe55b72671aa8f70bad4127d7fb528a5163 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
import copy
import logging
import os
import subprocess
import threading
import time
from .constants import PIPE, STDOUT, DEVNULL
from .exceptions import TimeoutExpired
logger = logging.getLogger(__name__)
class Popen:
'''
It wraps multiple subprocess.popen and provides I/F like subprocess.Popen.
'''
polling_interval = 0.1
'''
Parameters
----------
popen_args_list
The list of pipechildren.PopenArgs
stderr
Specify One of pipechildren.DEVNULL, pipechildren.STDOUT, or file-like object
'''
def __init__(self, popen_args_list, stdin=None, stdout=None, stderr=None, universal_newlines=None, encoding=None, errors=None, text=None, _debug_communicate_io=False):
self.text = universal_newlines or encoding or errors or text
self.encoding = encoding
self.popen_args_list = popen_args_list
self.processes = []
self.stdin = None
self.stdout = None
self.stderr = None
self.stderr_write_end = None
self.outs = None
self.errs = None
self.pids = []
self.returncodes = []
self._debug_communicate_io = _debug_communicate_io
self._communicate_called = False
self._workers = {
"stderr_drainer": None,
"close_stderr_write_end_worker": None,
"waiter": None,
"stdin_worker": None,
"stdout_worker": None,
"stderr_worker": None
}
self._stop_workers = False
'''
Call popen with each popen_args and connect stdout -> stdin
between subprocesses.
'''
# previous stdout goes into current stdin
prev_out = stdin
for i in range(len(self.popen_args_list)):
pa = self.popen_args_list[i]
if i == len(self.popen_args_list) - 1:
# Last
_stdout = stdout
else:
_stdout = subprocess.PIPE
_stderr = pa.stderr if pa.stderr else stderr
p = subprocess.Popen(stdout=_stdout,
stdin=prev_out,
stderr=_stderr,
text=self.text,
encoding=self.encoding,
**pa.popen_kwargs)
setattr(p, "name", pa.name)
logger.info(f"Popening({pa.fullname})")
if i > 0:
"""
piped stdout/stdin is connected between subprocesses and used in
forked sub-processes. We should release them not to prevent pipe close.
"""
self.processes[-1].stdout.close()
self.processes[-1].stdout = None
self.processes.append(p)
self.pids.append(p.pid)
prev_out = p.stdout
#self._start_pipe_closer()
if stdin is PIPE:
self.stdin = self.processes[0].stdin
else:
self.stdin = None
if stdout is PIPE:
self.stdout = self.processes[-1].stdout
else:
self.stdout = None
if stderr is PIPE:
logger.debug("stderr is PIPE")
if len(self.processes) == 1:
self.stderr = self.processes[0].stderr
else:
r, w = os.pipe()
if self.text:
self.stderr = os.fdopen(r, 'r')
self.stderr_write_end = os.fdopen(w, 'w')
else:
self.stderr = os.fdopen(r, 'rb')
self.stderr_write_end = os.fdopen(w, 'wb')
self._start_stderr_drainer()
else:
self.stderr = None
self.stderr_write_end = stderr
if stderr:
self._start_stderr_drainer()
@staticmethod
def _work_text_drainer(_self, name, reader, data_writer):
'''
Generic thread reader to read data from <reader> and write
data to callback data_writer(data).
NOTE: It is STATIC method.
Called like self._work_text_drainer(self)
data_writer() gets binary data as 1st argument and needs to return
False if writer is no longer avaialb.e
'''
logger.debug(f"_work_text_drainer {name} started")
while (not _self._stop_workers):
line = reader.readline()
if not line:
break
if _self._debug_communicate_io:
logger.debug(f"{name} -> {line}")
if not data_writer(line):
break
logger.debug(f"_work_text_drainer {name} finished.")
@staticmethod
def _work_binary_drainer(_self, name, reader, data_writer):
'''
Generic thread reader to read data from <reader> and write
data to callback data_writer(data).
NOTE: It is STATIC method.
Called like self._work_binary_drainer(self)
data_writer() gets binary data as 1st argument and need to return
False if writer is no longer avaialb.e
'''
logger.debug(f"_work_binary_drainer {name} started")
while (not _self._stop_workers):
data = reader.read(4096)
if not data:
break
if _self._debug_communicate_io:
logger.debug(f"{name} -> {data}")
if not data_writer(data):
logger.debug(f"{name} -> EOF")
break
logger.debug(f"_work_binary_drainer {name} finished.")
def _start_stderr_drainer(self):
'''
drain stderr from all sub-processes and gather to one piped stderr
'''
stderr_drainer = []
def stderr_write_end_writer(data):
if self.stderr_write_end.closed:
return False
else:
self.stderr_write_end.write(data)
return True
for p in self.processes:
name=f"{p.name}_stderr_drainer"
if self.text:
drainer = lambda: self._work_text_drainer(self,
name,
p.stderr,
stderr_write_end_writer)
else:
drainer = lambda: self._work_binary_drainer(self,
name,
p.stderr,
stderr_write_end_writer)
t = threading.Thread(name=name, target=drainer)
t.start()
stderr_drainer.append(t)
self._workers["stderr_drainer"] = stderr_drainer
if self.stderr:
# We need close worker otherwise reader cannot finish reading.
def work_close_stderr_write_end():
logger.debug(f"work_close_stderr_write_end started")
drainers = self._workers["stderr_drainer"]
while not self._stop_workers:
alive = False
for t in drainers:
if t.is_alive():
alive = True
break
if not alive:
break
logger.debug(f"work_close_stderr_write_end finished")
self.stderr_write_end.close()
close_stderr_write_end_worker = threading.Thread(
target=work_close_stderr_write_end,
name=name)
close_stderr_write_end_worker.start()
self._workers["close_stderr_write_end_worker"] = close_stderr_write_end_worker
def __enter__(self):
return self
def __exit__(self):
# To support "with pipechildren.Popen() as p:"
self.wait()
def poll(self):
'''
Check if child process has terminated. Set and return returncode list attribute. Otherwise, returns None.
Returns
----------
returncode
list of returncode of subprocesses.
'''
self.returncodes = [p.poll() for p in self.processes]
if None in self.returncodes:
return None
return self.returncodes
def wait(self, timeout=None):
'''
Wait for child processes to terminate. Set and return returncode attribute.
If the process does not terminate after timeout seconds,
raise a TimeoutExpired exception.
It is safe to catch this exception and retry the wait.
Returns
----------
returncodes
list of returncodes of subprocesses.
'''
logger.debug("wait started")
def work_wait(name, p, timeout):
logger.debug(f"waiter {name} started")
ret = None
try:
ret = p.wait(timeout=timeout)
except subprocess.TimeoutExpired:
logger.debug(f"waiter {name} timed out.")
else:
logger.debug(f"waiter {name} finished")
return ret
waiter = []
for p in self.processes:
name = f"{p.name}_waiter"
t = threading.Thread(
target=lambda: work_wait(name, p, timeout),
name=name)
t.start()
waiter.append(t)
self._workers["waiter"] = waiter
for t in waiter:
t.join()
self._workers["waiter"] = None
returncodes = self.poll()
if returncodes is None:
raise TimeoutExpired(self.popen_args_list, timeout, stdout=self.outs, stderr=self.errs)
logger.debug("wait finished")
return returncodes
def _time_left_sec(self, timeout_at):
if timeout_at:
time_left_sec = (timeout_at - datetime.now()).total_seconds()
if time_left_sec < 0:
return 0
else:
return time_left_sec
return None
def get_timeout_at(self, timeout):
return datetime.now() + timedelta(seconds=timeout)
def _start_communicate_pipes(self, input=input):
'''
Start threads below. It's called only once when communicate is called first time.
- Thread1: write <input> to stdin if stdin is PIPE and <input> is given.
- Thread2: read stdout to outs if stdout is PIPE
- Thread3: read stderr to errs if stderr is PIPE
'''
logger.debug("_start_communicate_pipes called")
def work_stdin(input=None):
'''
Thread worker to write <input> to stdin
'''
logger.debug("stdin_worker started")
start = 0
step = 4096
end = start + step
while not self._stop_workers and not self.stdin.closed:
if len(input) > end:
if self._debug_communicate_io:
logger.debug(f"->stdin {input[start:end]}")
self.stdin.write(input[start:end])
else:
if self._debug_communicate_io:
logger.debug(f"->stdin {input[start:]}")
self.stdin.write(input[start:])
break
start += step
end += step
self.stdin.close()
logger.debug("stdin_worker finished")
def add_to_outs_writer(data):
'''
Writer used by stdout drainer thread
'''
self.outs += data
return True
def add_to_errs_writer(data):
'''
Writer used by stderr drainer thread
'''
self.errs += data
return True
if input and self.stdin:
stdin_worker = threading.Thread(
target=lambda: work_stdin(input=input),
name="stdin_worker")
stdin_worker.start()
self._workers["stdin_worker"] = stdin_worker
elif self.stdin:
self.stdin.close()
if self.stdout:
if self.text:
self.outs = ''
drainer = lambda: self._work_text_drainer(self,
'stdout_drainer',
self.stdout,
add_to_outs_writer)
else:
self.outs = b''
drainer = lambda: self._work_binary_drainer(self,
'stdout_drainer',
self.stdout,
add_to_outs_writer)
stdout_worker = threading.Thread(
target=drainer,
name="stdout_worker")
stdout_worker.start()
self._workers["stdout_worker"] = stdout_worker
if self.stderr:
if self.text:
self.errs = ''
drainer = lambda: self._work_text_drainer(self,
'stderr_drainer',
self.stderr,
add_to_errs_writer)
else:
self.errs = b''
drainer = lambda: self._work_binary_drainer(self,
'stderr_drainer',
self.stderr,
add_to_errs_writer)
stderr_worker = threading.Thread(
target=drainer,
name="stderr_worker")
stderr_worker.start()
self._workers["stderr_worker"] = stderr_worker
def communicate(self, input=None, timeout=None):
'''
Send data to stdin. Read data from stdout and stderr, until end-of-file is reached.
Wait for process to terminate. The optional input argument should be data to be sent
to the upper stream child process, or None, if no data should be sent to the child.
If streams were opened in text mode, input must be a string. Otherwise, it must be bytes.
Returns
----------
stdout_data
stdout of down most process
stderr_data
stderr of whole process if pipechildren.PIPE is specified.
The data will be strings if streams were opened in text mode; otherwise, bytes.
'''
logger.debug("communicate called")
if len(self.processes) == 1:
# In this case, just call subprocess.communicate
self.outs, self.errs = self.processes[0].communicate(input=input, timeout=timeout)
return self.outs, self.errs
firsttime = True
if self._communicate_called:
firsttime = False
self._communicate_called = True
if firsttime:
self._start_communicate_pipes(input=input)
timeout_at = None
if timeout:
timeout_at = self.get_timeout_at(timeout)
self.wait(timeout=timeout)
# If self.wait() timedout, it raises to caller out of thie method.
# If we reach here, all processes have finished.
# Close stdin first then wait for the end of output workers.
if self.stdin:
self.stdin.close()
timedout = False
if self._workers["stdin_worker"]:
timeout_left = self._time_left_sec(timeout_at)
self._workers["stdin_worker"].join(timeout=timeout_left)
timedout = self._workers["stdin_worker"].is_alive()
if self._workers["stdout_worker"] and not timedout:
timeout_left = self._time_left_sec(timeout_at)
self._workers["stdout_worker"].join(timeout=timeout_left)
timedout = self._workers["stdout_worker"].is_alive()
if self._workers["stderr_worker"] and not timedout:
timeout_left = self._time_left_sec(timeout_at)
self._workers["stderr_worker"].join(timeout=timeout_left)
if not timedout:
timedout = self._workers["stderr_worker"].is_alive()
if timedout:
raise TimeoutExpired(self.popen_args_list, timeout, stdout=self.outs, stderr=self.errs)
# Guard all workers from running just in case.
self._stop_workers = True
# Close up pipes
if self.stdout:
self.stdout.close()
if self.stderr:
self.stderr.close()
for p in self.processes:
if p.stderr:
p.stderr.close()
return self.outs, self.errs
def kill(self, *args):
if args and isinstance(args[0], list):
for i in args[0]:
self.processes[i].kill()
else:
for p in self.processes:
p.kill()
def terminate(self, *args):
if args and isinstance(args[0], list):
for i in args[0]:
self.processes[i].terminate()
else:
for p in self.processes:
p.terminate()
def send_signal(self, signal, *args):
if args and isinstance(args[0], list):
for i in args[0]:
self.processes[i].send_signal(signal)
else:
for p in self.processes:
p.send_signal(signal)
|
py | 1a36a00f1fba90f0525d96633f1d422d1c2c6092 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Import helper for FSL"""
if __debug__:
from mvpa2.base import debug
debug('INIT', 'mvpa2.misc.fsl')
from mvpa2.misc.fsl.base import *
from mvpa2.misc.fsl.flobs import *
if __debug__:
debug('INIT', 'mvpa2.misc.fsl end')
|
py | 1a36a07f8c0860e9f3be851c2a6ca3f7f8996dd3 | # Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
import tempfile
import unittest
from unittest.mock import patch
import click.testing
import cpo.config
import cpo.utils.file
import cpo.utils.operating_system
from cpo.cpo import cli
from cpo.lib.dependency_manager.plugins.ibm_cloud_terraform_provider_plugin import (
IBMCloudTerraformProviderPlugIn,
)
class TestDownloadDependencies(unittest.TestCase):
def add_os_specific_executable_extension(self, executable_name: str) -> str:
operating_system = cpo.utils.operating_system.get_operating_system()
if operating_system == cpo.utils.operating_system.OperatingSystem.WINDOWS:
executable_name += ".exe"
return executable_name
def check_executable_exists(self, bin_directory_path: pathlib.Path, executable_name: str):
self.assertTrue(
(pathlib.Path(bin_directory_path) / self.add_os_specific_executable_extension(executable_name)).exists()
)
@patch(
"cpo.config.binaries_manager.configuration_manager.get_home_directory_path",
return_value=pathlib.Path(tempfile.gettempdir()),
)
def test_command(self, test_mock):
"""Tests that cpo adm download-dependencies downloads
dependencies"""
bin_directory_path = cpo.config.configuration_manager.get_bin_directory_path()
terraform_plugins_directory_path = IBMCloudTerraformProviderPlugIn().get_terraform_plugins_directory_path()
for entry in bin_directory_path.glob("*"):
if entry.is_file():
os.remove(entry)
runner = click.testing.CliRunner()
result = runner.invoke(cli, ["adm", "download-dependencies"])
self.assertEqual(result.exit_code, 0)
self.assertGreaterEqual(
len(list(terraform_plugins_directory_path.glob("terraform-provider-ibm*"))),
1,
)
self.check_executable_exists(bin_directory_path, "ibmcloud")
self.check_executable_exists(bin_directory_path, "oc")
self.check_executable_exists(bin_directory_path, "terraform")
if __name__ == "__main__":
unittest.main()
|
py | 1a36a16e881162ab0e38cd6a9dd8dbd1a827b8a8 | # -*- coding: utf-8 -*-
# Copyright (c) The python-semanticversion project
# This code is distributed under the two-clause BSD License.
from __future__ import unicode_literals
import functools
import re
from .compat import base_cmp
def _to_int(value):
try:
return int(value), True
except ValueError:
return value, False
def _has_leading_zero(value):
return (value
and value[0] == '0'
and value.isdigit()
and value != '0')
def identifier_cmp(a, b):
"""Compare two identifier (for pre-release/build components)."""
a_cmp, a_is_int = _to_int(a)
b_cmp, b_is_int = _to_int(b)
if a_is_int and b_is_int:
# Numeric identifiers are compared as integers
return base_cmp(a_cmp, b_cmp)
elif a_is_int:
# Numeric identifiers have lower precedence
return -1
elif b_is_int:
return 1
else:
# Non-numeric identifiers are compared by a natural comparison
# adapted from https://stackoverflow.com/questions/8408125/python-natural-comparison-between-strings
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return base_cmp(alphanum_key(a_cmp), alphanum_key(b_cmp))
def identifier_list_cmp(a, b):
"""Compare two identifier list (pre-release/build components).
The rule is:
- Identifiers are paired between lists
- They are compared from left to right
- If all first identifiers match, the longest list is greater.
>>> identifier_list_cmp(['1', '2'], ['1', '2'])
0
>>> identifier_list_cmp(['1', '2a'], ['1', '2b'])
-1
>>> identifier_list_cmp(['1'], ['1', '2'])
-1
"""
identifier_pairs = zip(a, b)
for id_a, id_b in identifier_pairs:
cmp_res = identifier_cmp(id_a, id_b)
if cmp_res != 0:
return cmp_res
# alpha1.3 < alpha1.3.1
return base_cmp(len(a), len(b))
class Version(object):
version_re = re.compile(r'^(\d+)\.(\d+)\.(\d+)(?:-([0-9a-zA-Z.-]+))?(?:\+([0-9a-zA-Z.-]+))?$')
partial_version_re = re.compile(r'^(\d+)(?:\.(\d+)(?:\.(\d+))?)?(?:-([0-9a-zA-Z.-]*))?(?:\+([0-9a-zA-Z.-]*))?$')
def __init__(self, version_string, partial=False):
major, minor, patch, prerelease, build = self.parse(version_string, partial)
self.major = major
self.minor = minor
self.patch = patch
self.prerelease = prerelease
self.build = build
self.partial = partial
@classmethod
def _coerce(cls, value, allow_none=False):
if value is None and allow_none:
return value
return int(value)
def next_major(self):
if self.prerelease and self.minor == 0 and self.patch == 0:
return Version('.'.join(str(x) for x in [self.major, self.minor, self.patch]))
else:
return Version('.'.join(str(x) for x in [self.major + 1, 0, 0]))
def next_minor(self):
if self.prerelease and self.patch == 0:
return Version('.'.join(str(x) for x in [self.major, self.minor, self.patch]))
else:
return Version(
'.'.join(str(x) for x in [self.major, self.minor + 1, 0]))
def next_patch(self):
if self.prerelease:
return Version('.'.join(str(x) for x in [self.major, self.minor, self.patch]))
else:
return Version(
'.'.join(str(x) for x in [self.major, self.minor, self.patch + 1]))
@classmethod
def coerce(cls, version_string, partial=False):
"""Coerce an arbitrary version string into a semver-compatible one.
The rule is:
- If not enough components, fill minor/patch with zeroes; unless
partial=True
- If more than 3 dot-separated components, extra components are "build"
data. If some "build" data already appeared, append it to the
extra components
Examples:
>>> Version.coerce('0.1')
Version(0, 1, 0)
>>> Version.coerce('0.1.2.3')
Version(0, 1, 2, (), ('3',))
>>> Version.coerce('0.1.2.3+4')
Version(0, 1, 2, (), ('3', '4'))
>>> Version.coerce('0.1+2-3+4_5')
Version(0, 1, 0, (), ('2-3', '4-5'))
"""
base_re = re.compile(r'^\d+(?:\.\d+(?:\.\d+)?)?')
match = base_re.match(version_string)
if not match:
raise ValueError(
"Version string lacks a numerical component: %r"
% version_string
)
version = version_string[:match.end()]
if not partial:
# We need a not-partial version.
while version.count('.') < 2:
version += '.0'
if match.end() == len(version_string):
return Version(version, partial=partial)
rest = version_string[match.end():]
# Cleanup the 'rest'
rest = re.sub(r'[^a-zA-Z0-9+.-]', '-', rest)
if rest[0] == '+':
# A 'build' component
prerelease = ''
build = rest[1:]
elif rest[0] == '.':
# An extra version component, probably 'build'
prerelease = ''
build = rest[1:]
elif rest[0] == '-':
rest = rest[1:]
if '+' in rest:
prerelease, build = rest.split('+', 1)
else:
prerelease, build = rest, ''
elif '+' in rest:
prerelease, build = rest.split('+', 1)
else:
prerelease, build = rest, ''
build = build.replace('+', '.')
if prerelease:
version = '%s-%s' % (version, prerelease)
if build:
version = '%s+%s' % (version, build)
return cls(version, partial=partial)
@classmethod
def parse(cls, version_string, partial=False, coerce=False):
"""Parse a version string into a Version() object.
Args:
version_string (str), the version string to parse
partial (bool), whether to accept incomplete input
coerce (bool), whether to try to map the passed in string into a
valid Version.
"""
if not version_string:
raise ValueError('Invalid empty version string: %r' % version_string)
if partial:
version_re = cls.partial_version_re
else:
version_re = cls.version_re
match = version_re.match(version_string)
if not match:
raise ValueError('Invalid version string: %r' % version_string)
major, minor, patch, prerelease, build = match.groups()
if _has_leading_zero(major):
raise ValueError("Invalid leading zero in major: %r" % version_string)
if _has_leading_zero(minor):
raise ValueError("Invalid leading zero in minor: %r" % version_string)
if _has_leading_zero(patch):
raise ValueError("Invalid leading zero in patch: %r" % version_string)
major = int(major)
minor = cls._coerce(minor, partial)
patch = cls._coerce(patch, partial)
if prerelease is None:
if partial and (build is None):
# No build info, strip here
return (major, minor, patch, None, None)
else:
prerelease = ()
elif prerelease == '':
prerelease = ()
else:
prerelease = tuple(prerelease.split('.'))
cls._validate_identifiers(prerelease, allow_leading_zeroes=False)
if build is None:
if partial:
build = None
else:
build = ()
elif build == '':
build = ()
else:
build = tuple(build.split('.'))
cls._validate_identifiers(build, allow_leading_zeroes=True)
return (major, minor, patch, prerelease, build)
@classmethod
def _validate_identifiers(cls, identifiers, allow_leading_zeroes=False):
for item in identifiers:
if not item:
raise ValueError(
"Invalid empty identifier %r in %r"
% (item, '.'.join(identifiers))
)
if item[0] == '0' and item.isdigit() and item != '0' and not allow_leading_zeroes:
raise ValueError("Invalid leading zero in identifier %r" % item)
def __iter__(self):
return iter((self.major, self.minor, self.patch, self.prerelease, self.build))
def __str__(self):
version = '%d' % self.major
if self.minor is not None:
version = '%s.%d' % (version, self.minor)
if self.patch is not None:
version = '%s.%d' % (version, self.patch)
if self.prerelease or (self.partial and self.prerelease == () and self.build is None):
version = '%s-%s' % (version, '.'.join(self.prerelease))
if self.build or (self.partial and self.build == ()):
version = '%s+%s' % (version, '.'.join(self.build))
return version
def __repr__(self):
return '%s(%r%s)' % (
self.__class__.__name__,
str(self),
', partial=True' if self.partial else '',
)
@classmethod
def _comparison_functions(cls, partial=False):
"""Retrieve comparison methods to apply on version components.
This is a private API.
Args:
partial (bool): whether to provide 'partial' or 'strict' matching.
Returns:
5-tuple of cmp-like functions.
"""
def prerelease_cmp(a, b):
"""Compare prerelease components.
Special rule: a version without prerelease component has higher
precedence than one with a prerelease component.
"""
if a and b:
return identifier_list_cmp(a, b)
elif a:
# Versions with prerelease field have lower precedence
return -1
elif b:
return 1
else:
return 0
def build_cmp(a, b):
"""Compare build metadata.
Special rule: there is no ordering on build metadata.
"""
if a == b:
return 0
else:
return NotImplemented
def make_optional(orig_cmp_fun):
"""Convert a cmp-like function to consider 'None == *'."""
@functools.wraps(orig_cmp_fun)
def alt_cmp_fun(a, b):
if a is None or b is None:
return 0
return orig_cmp_fun(a, b)
return alt_cmp_fun
if partial:
return [
base_cmp, # Major is still mandatory
make_optional(base_cmp),
make_optional(base_cmp),
make_optional(prerelease_cmp),
make_optional(build_cmp),
]
else:
return [
base_cmp,
base_cmp,
base_cmp,
prerelease_cmp,
build_cmp,
]
def __compare(self, other):
comparison_functions = self._comparison_functions(partial=self.partial or other.partial)
comparisons = zip(comparison_functions, self, other)
for cmp_fun, self_field, other_field in comparisons:
cmp_res = cmp_fun(self_field, other_field)
if cmp_res != 0:
return cmp_res
return 0
def __hash__(self):
return hash((self.major, self.minor, self.patch, self.prerelease, self.build))
def __cmp__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.__compare(other)
def __compare_helper(self, other, condition, notimpl_target):
"""Helper for comparison.
Allows the caller to provide:
- The condition
- The return value if the comparison is meaningless (ie versions with
build metadata).
"""
if not isinstance(other, self.__class__):
return NotImplemented
cmp_res = self.__cmp__(other)
if cmp_res is NotImplemented:
return notimpl_target
return condition(cmp_res)
def __eq__(self, other):
return self.__compare_helper(other, lambda x: x == 0, notimpl_target=False)
def __ne__(self, other):
return self.__compare_helper(other, lambda x: x != 0, notimpl_target=True)
def __lt__(self, other):
return self.__compare_helper(other, lambda x: x < 0, notimpl_target=False)
def __le__(self, other):
return self.__compare_helper(other, lambda x: x <= 0, notimpl_target=False)
def __gt__(self, other):
return self.__compare_helper(other, lambda x: x > 0, notimpl_target=False)
def __ge__(self, other):
return self.__compare_helper(other, lambda x: x >= 0, notimpl_target=False)
class SpecItem(object):
"""A requirement specification."""
KIND_ANY = '*'
KIND_LT = '<'
KIND_LTE = '<='
KIND_EQUAL = '=='
KIND_SHORTEQ = '='
KIND_EMPTY = ''
KIND_GTE = '>='
KIND_GT = '>'
KIND_NEQ = '!='
KIND_CARET = '^'
KIND_TILDE = '~'
KIND_COMPATIBLE = '~='
# Map a kind alias to its full version
KIND_ALIASES = {
KIND_SHORTEQ: KIND_EQUAL,
KIND_EMPTY: KIND_EQUAL,
}
re_spec = re.compile(r'^(<|<=||=|==|>=|>|!=|\^|~|~=)(\d.*)$')
def __init__(self, requirement_string):
kind, spec = self.parse(requirement_string)
self.kind = kind
self.spec = spec
@classmethod
def parse(cls, requirement_string):
if not requirement_string:
raise ValueError("Invalid empty requirement specification: %r" % requirement_string)
# Special case: the 'any' version spec.
if requirement_string == '*':
return (cls.KIND_ANY, '')
match = cls.re_spec.match(requirement_string)
if not match:
raise ValueError("Invalid requirement specification: %r" % requirement_string)
kind, version = match.groups()
if kind in cls.KIND_ALIASES:
kind = cls.KIND_ALIASES[kind]
spec = Version(version, partial=True)
if spec.build is not None and kind not in (cls.KIND_EQUAL, cls.KIND_NEQ):
raise ValueError(
"Invalid requirement specification %r: build numbers have no ordering."
% requirement_string
)
return (kind, spec)
def match(self, version):
if self.kind == self.KIND_ANY:
return True
elif self.kind == self.KIND_LT:
return version < self.spec
elif self.kind == self.KIND_LTE:
return version <= self.spec
elif self.kind == self.KIND_EQUAL:
return version == self.spec
elif self.kind == self.KIND_GTE:
return version >= self.spec
elif self.kind == self.KIND_GT:
return version > self.spec
elif self.kind == self.KIND_NEQ:
return version != self.spec
elif self.kind == self.KIND_CARET:
if self.spec.major != 0:
upper = self.spec.next_major()
elif self.spec.minor != 0:
upper = self.spec.next_minor()
else:
upper = self.spec.next_patch()
return self.spec <= version < upper
elif self.kind == self.KIND_TILDE:
return self.spec <= version < self.spec.next_minor()
elif self.kind == self.KIND_COMPATIBLE:
if self.spec.patch is not None:
upper = self.spec.next_minor()
else:
upper = self.spec.next_major()
return self.spec <= version < upper
else: # pragma: no cover
raise ValueError('Unexpected match kind: %r' % self.kind)
def __str__(self):
return '%s%s' % (self.kind, self.spec)
def __repr__(self):
return '<SpecItem: %s %r>' % (self.kind, self.spec)
def __eq__(self, other):
if not isinstance(other, SpecItem):
return NotImplemented
return self.kind == other.kind and self.spec == other.spec
def __hash__(self):
return hash((self.kind, self.spec))
class Spec(object):
def __init__(self, *specs_strings):
subspecs = [self.parse(spec) for spec in specs_strings]
self.specs = sum(subspecs, ())
@classmethod
def parse(self, specs_string):
spec_texts = specs_string.split(',')
return tuple(SpecItem(spec_text) for spec_text in spec_texts)
def match(self, version):
"""Check whether a Version satisfies the Spec."""
return all(spec.match(version) for spec in self.specs)
def filter(self, versions):
"""Filter an iterable of versions satisfying the Spec."""
for version in versions:
if self.match(version):
yield version
def select(self, versions):
"""Select the best compatible version among an iterable of options."""
options = list(self.filter(versions))
if options:
return max(options)
return None
def __contains__(self, version):
if isinstance(version, Version):
return self.match(version)
return False
def __iter__(self):
return iter(self.specs)
def __str__(self):
return ','.join(str(spec) for spec in self.specs)
def __repr__(self):
return '<Spec: %r>' % (self.specs,)
def __eq__(self, other):
if not isinstance(other, Spec):
return NotImplemented
return set(self.specs) == set(other.specs)
def __hash__(self):
return hash(self.specs)
def compare(v1, v2):
return base_cmp(Version(v1), Version(v2))
def match(spec, version):
return Spec(spec).match(Version(version))
def validate(version_string):
"""Validates a version string againt the SemVer specification."""
try:
Version.parse(version_string)
return True
except ValueError:
return False
|
py | 1a36a22852b7abed44fd77678103a95ac8ca055a | import unittest
from polynomials_on_simplices.geometry.primitives.simplex import inside_simplex, unit
from polynomials_on_simplices.probability_theory.uniform_sampling import (
closed_unit_interval_sample, left_closed_interval_sample, nsimplex_sampling, open_unit_interval_sample,
right_closed_interval_sample)
class TestUnitIntervalSample(unittest.TestCase):
def test_closed_unit_interval_sample(self):
s = closed_unit_interval_sample()
self.assertTrue(s >= 0.0)
self.assertTrue(s <= 1.0)
def test_left_closed_interval_sample(self):
s = left_closed_interval_sample()
self.assertTrue(s >= 0.0)
self.assertTrue(s < 1.0)
def test_right_closed_interval_sample(self):
s = right_closed_interval_sample()
self.assertTrue(s > 0.0)
self.assertTrue(s <= 1.0)
def test_open_unit_interval_sample(self):
s = open_unit_interval_sample()
self.assertTrue(s > 0.0)
self.assertTrue(s < 1.0)
class TestNSimplexSampling(unittest.TestCase):
def test_inside(self):
points = nsimplex_sampling(3, 3)
for i in range(3):
self.assertTrue(inside_simplex(points[i], unit(3)))
if __name__ == '__main__':
unittest.main()
|
py | 1a36a24c80e2198e1fcafb92db85bd8ff4640a72 | """Fully connected layer."""
import numpy as np
import theano
import theano.tensor as T
from athenet.layers import WeightedLayer
class FullyConnectedLayer(WeightedLayer):
"""Fully connected layer."""
def __init__(self, n_out, n_in=None, input_layer_name=None, name='fc'):
"""Create fully connected layer.
:param integer n_out: Number of output neurons.
:param integer n_in: Number of input neurons.
"""
super(FullyConnectedLayer, self).__init__(input_layer_name, name)
self._n_in = None
self.W_shared = None
self.n_out = n_out
self.n_in = n_in
@property
def n_in(self):
"""Number of input neurons."""
return self._n_in
@n_in.setter
def n_in(self, value):
if not value or self._n_in == value:
return
self._n_in = value
W_value = np.asarray(
np.random.normal(
loc=0.,
scale=np.sqrt(1. / self.n_out),
size=(self.n_in, self.n_out)
),
dtype=theano.config.floatX
)
self.W_shared = theano.shared(W_value, borrow=True)
b_value = np.zeros((self.n_out,), dtype=theano.config.floatX)
self.b_shared = theano.shared(b_value, borrow=True)
@property
def input_shape(self):
return self.n_in
@input_shape.setter
def input_shape(self, value):
self.n_in = np.prod(value)
@property
def output_shape(self):
return self.n_out
def _reshape_input(self, raw_layer_input):
"""Return input in the correct format for fully connected layer.
:param raw_layer_input: Input in the format (n_batches, n_in) or
compatible.
:type raw_layer_input: pair of integers
"""
return raw_layer_input.flatten(2)
def _get_output(self, layer_input):
"""Return layer's output.
:param layer_input: Input in the format (n_batches, n_in).
:return: Layer output.
"""
return T.dot(self.input, self.W_shared) + self.b_shared
|
py | 1a36a2c0cd5c5d5863310e4b5f5715d933d13f48 | #!/usr/bin/env python3
"""
TODO
USAGE:
yb_create_log_query_history.py [options]
PURPOSE:
Build/update long term history db table/views sourced from the sys.log_query view.
OPTIONS:
See the command line help message for all options.
(yb_create_log_query_history.py --help)
Output:
Action taken, like:
--created log_query_history table, log_query_history_text table and log_query_history_v view
--inserted X queries into log_query_history and log_query_history_text
"""
import getpass, re
from yb_common import Common, DBConnect, Text, Util
class create_log_query_history(Util):
"""Build/update long term history db table/views sourced from the sys.log_query view.
"""
config = {
'description': (
'Build/update long term history db table/views sourced from the sys.log_query view.'
'\n'
'\nnote:'
'\n On the first execution the create_log_query_history will;'
'\n 1. request super user credentials to create supporting stored procs.'
'\n 2. create the history query table, query_text table and query view.'
'\n Every run inserts new log queries into the history query and query_text tables.')
, 'optional_args_single': []
, 'usage_example': {
'cmd_line_args': """@$HOME/conn.args --log_table_name user_log_query_hist --where_clause "username NOT LIKE 'sys_ybd_%'" """
, 'file_args': [Util.conn_args_file] } }
def additional_args(self):
log_query_hist_grp = self.args_handler.args_parser.add_argument_group(
'log query history arguments')
log_query_hist_grp.add_argument("--log_table_name", default="log_query_history"
, help="the object name prefix used for the 2 log tables and view, defaults to 'log_query_history'")
log_query_hist_grp.add_argument("--where_clause", default="TRUE"
, help=("where clause applied to sys.log_query to limit the queries for which history is maintained,"
" defaults to 'TRUE' meaning all queries") )
def complete_db_conn(self):
if self.db_conn.ybdb['is_super_user']:
self.args_handler.args_parser.error("dbuser '%s' must not ba a db super user..." % self.db_conn.ybdb['user'])
return
def create_log_query_history(self):
result = self.db_conn.ybsql_query("""
SELECT create_log_query_history_p(
'{log_table_name}'
, $${where_clause}$$);""".format(
log_table_name=Common.quote_object_paths(self.args_handler.args.log_table_name)
, where_clause=self.args_handler.args.where_clause) )
return(result)
def create_su_db_conn(self):
su_env = self.db_conn.env.copy()
su_env['conn_db'] = self.db_conn.database
su_env['dbuser'] = input("Enter the super user name to create required stored procs with: ")
prompt = ("Enter the password for cluster %s, user %s: "
% (Text.color(su_env['host'], fg='cyan')
, Text.color(su_env['dbuser'], fg='cyan')))
su_env['pwd'] = getpass.getpass(prompt)
DBConnect.set_env(su_env)
self.su_db_conn = DBConnect(env=su_env, conn_type='su')
DBConnect.set_env(self.db_conn.env_pre)
if not self.su_db_conn.ybdb['is_super_user']:
Common.error("dbuser '%s' is not a super user..." % su_env['dbuser'])
def create_stored_procs(self):
filename = '%s/sql/log_query_history/materialize_sys_log_query_p.sql' % Common.util_dir_path
sql = open(filename).read()
sql = ("""SET SCHEMA '%s';
%s;
GRANT EXECUTE ON PROCEDURE materialize_sys_log_query_p(VARCHAR, VARCHAR, VARCHAR, BOOLEAN) TO %s;"""
% (self.db_conn.schema, sql, self.db_conn.env['dbuser']) )
result = self.su_db_conn.ybsql_query(sql)
result.on_error_exit()
filename = '%s/sql/log_query_history/create_log_query_history_p.sql' % Common.util_dir_path
sql = open(filename).read()
result = self.db_conn.ybsql_query(sql)
result.on_error_exit()
def fix_stored_proc_stdout(self, result):
"""stored procs print everything to stderr. This routine moves all stderr
lines starting with 'INFO: --' to stdout."""
matches = re.finditer(r"^(INFO:\s*)?(--.*)$", result.stderr, re.MULTILINE)
stdout = ''
stderr = ''
for matchNum, match in enumerate(matches, start=1):
if (match.group(1)):
stdout = ('%s\n%s' % (stdout, match.group(2))) if len(stdout) else match.group(2)
else:
stderr = ('%s\n%s' % (stderr, match.group(2))) if len(stderr) else match.group(2)
result.proc_return = result.stdout
result.stdout = stdout if len(stdout.strip()) else ''
result.stderr = stderr if len(stderr.strip()) else ''
def execute(self):
self.complete_db_conn()
result = self.create_log_query_history()
if re.search(r"create_log_query_history_p.*does not exist", result.stderr):
self.create_su_db_conn()
self.create_stored_procs()
result = self.create_log_query_history()
self.fix_stored_proc_stdout(result)
result.on_error_exit()
result.write()
exit(result.exit_code)
def main():
clqh = create_log_query_history()
clqh.execute()
if __name__ == "__main__":
main() |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.