filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_15519 | #!/usr/bin/python3.6
import sys, os, importlib
from .system import console, execute
from .util import glob_with_extensions, glob_folders_with_name_match
from .build_config import BuildConfig
from .build_target import BuildTarget
from .build_dependency import BuildDependency
from .dependency_chain import load_dependency_chain, execute_task_chain, find_dependency, get_full_flattened_deps
from .init_project import mama_init_project
def print_title():
console(f'========= Mama Build Tool ==========')
def print_usage():
console('mama [actions...] [args...]')
console(' actions:')
console(' init - create initial mamafile.py and CMakeLists.txt')
console(' list - list all mama dependencies on this project')
console(' build - configure and build main project or specific target, this can clone, but does not pull')
console(' update - update and build target dependencies after calling git pull')
console(' deploy - runs PAPA deploy stage by gathering all libraries and assets')
console(' serve - Equivalent of `update build deploy`')
console(' clean - clean main project or specific target')
console(' rebuild - clean, update and build main project or specific target')
console(' reclone - wipe specific target dependency and clone it again')
console(' wipe - alias of reclone')
console(' test - run tests for main project or specific target')
console(' start=arg - start a specific tool via mamafile.start(args)')
console(' add - add new dependency')
console(' new - create new mama build file')
console(' open=<tgt> - open a project file')
console(' help - shows this help list')
console(' install utils:')
console(' install-clang6 - configures and installs clang6 for linux')
console(' install-msbuild - configures and installs MSBuild for linux')
console(' args:')
console(' windows - build for windows')
console(' linux - build for linux')
console(' macos - build for macos')
console(' ios - build for ios')
console(' android - build for android')
console(' android-N - build for android targeting specific API level, ex: android-26')
console(' clang - prefer clang for linux (default on linux/macos/ios/android)')
console(' gcc - prefer gcc for linux')
console(' fortran - enable automatic fortran detection (or configure this in mamafile)')
console(' release - (default) CMake configuration RelWithDebInfo')
console(' debug - CMake configuration Debug')
console(' arch=x86 - Override cross-compiling architecture: (x86, x64, arm, arm64)')
console(' x86 - Shorthand for arch=x86, all shorthands: x86 x64 arm arm64')
console(' jobs=N - Max number of parallel compilations. (default=system.core.count)')
console(' target=P - Name of the target')
console(' all - Short for target=all')
console(' silent - Greatly reduces verbosity')
console(' verbose - Greatly increases verbosity for build dependencies and cmake')
console(' examples:')
console(' mama init Initialize a new project. Tries to create mamafile.py and CMakeLists.txt')
console(' mama build Update and build main project only. This only clones, but does not update!')
console(' mama build x86 opencv Cross compile build target opencv to x86 architecture')
console(' mama build android Cross compile to arm64 android NDK')
console(' mama build android-26 arm Cross compile to armv7 android NDK API level 26')
console(' mama update Update all dependencies by doing git pull and build.')
console(' mama clean Cleans main project only.')
console(' mama clean x86 opencv Cleans main project only.')
console(' mama clean all Cleans EVERYTHING in the dependency chain for current arch.')
console(' mama rebuild Cleans, update and build main project only.')
console(' mama build dep1 Update and build dep1 only.')
console(' mama update dep1 Update and build the specified target.')
console(' mama serve android Update, build and deploy for Android')
console(' mama wipe dep1 Wipe target dependency completely and clone again.')
console(' mama test Run tests on main project.')
console(' mama test=arg Run tests on main project with an argument.')
console(' mama test="arg1 arg2" Run tests on main project with multiple arguments.')
console(' mama test dep1 Run tests on target dependency project.')
console(' mama start=dbtool Call main project mamafile start() with args [`dbtool`].')
console(' environment:')
console(' setenv("NINJA") Path to NINJA build executable')
console(' setenv("ANDROID_HOME") Path to Android SDK if auto-detect fails')
def open_project(config: BuildConfig, root_dependency: BuildDependency):
name = config.target if config.target and config.target != 'all' else config.open
found = root_dependency if name == 'root' else find_dependency(root_dependency, name)
if not found:
raise KeyError(f'No project named {name}')
if config.windows:
solutions = glob_with_extensions(found.build_dir, ['.sln'])
if not solutions:
raise EnvironmentError('Could not find any Visual Studio solutions!')
execute(f'start {solutions[0]}', echo=True)
elif config.macos or config.ios:
projects = glob_folders_with_name_match(found.build_dir, ['.xcodeproj'])
if not projects:
raise EnvironmentError('Could not find any Xcode projects!')
execute(f'open {projects[0]}', echo=True)
elif config.linux:
raise EnvironmentError('Linux IDE selection not implemented. Try opening this folder with CLion.')
#execute(f'xdg-open', echo=True)
elif config.android:
raise EnvironmentError('Android IDE selection not implemented. Try opening this folder with Android Studio.')
def set_target_from_unused_args(config: BuildConfig):
for arg in config.unused_args:
if config.target:
console(f"ERROR: Deduced Target='{arg}' from unused argument, but target is already set to '{config.target}'")
exit(-1)
else:
config.target = arg
def check_config_target(config: BuildConfig, root: BuildDependency):
if config.target and config.target != 'all':
dep = find_dependency(root, config.target)
if dep is None:
console(f"ERROR: specified target='{config.target}' not found!")
exit(-1)
def main():
if sys.version_info < (3, 6):
console('FATAL ERROR: MamaBuild requires Python 3.6')
exit(-1)
if len(sys.argv) == 1 or 'help' in sys.argv:
print_title()
print_usage()
exit(-1)
config = BuildConfig(sys.argv[1:])
if config.print:
print_title()
source_dir = os.getcwd()
name = os.path.basename(source_dir)
root = BuildDependency(name, config, BuildTarget, src=source_dir, is_root=True)
if config.mama_init:
mama_init_project(root)
return
if config.convenient_install:
config.run_convenient_installs()
return
has_cmake = root.cmakelists_exists()
if not root.mamafile_exists() and not has_cmake:
console('FATAL ERROR: mamafile.py not found and CMakeLists.txt not found')
exit(-1)
if config.unused_args:
set_target_from_unused_args(config)
if config.update:
if not config.target:
config.target = 'all'
if config.print: console(f'Updating all targets')
else:
if config.print: console(f'Updating {config.target} target')
if config.rebuild:
config.build = True
config.clean = True
if config.clean and not config.target:
root.clean()
load_dependency_chain(root)
check_config_target(config, root)
if config.list:
print(f'Dependency List: {get_full_flattened_deps(root)}')
if config.target:
dep = find_dependency(root, config.target)
if dep:
target:BuildTarget = dep.target
target.package()
inc, libs = target.get_target_products(config.target)
inc, libs = '\n '.join(inc.split(';')), '\n '.join(libs.split(';'))
print(f"target {config.target} includes:\n {inc}")
print(f"target {config.target} libraries:\n {libs}")
return
if config.android: config.init_ndk_path()
if config.raspi: config.init_raspi_path()
execute_task_chain(root)
if config.open:
open_project(config, root)
def __main__():
main()
if __name__ == '__main__':
main()
|
the-stack_0_15520 | while True:
n = input()
if n == "END": break
if n == "1":
print(1)
continue
ans = 1
p = len(n)
tmp = 0
while True:
if tmp == p:
break
tmp = p
p = len(str(p))
ans += 1
print(ans) |
the-stack_0_15522 | import numpy as np
import os.path
def subset_x_y(target, features, start_index:int, end_index:int):
"""Keep only the rows for X and y sets from the specified indexes
Parameters
----------
target : pd.DataFrame
Dataframe containing the target
features : pd.DataFrame
Dataframe containing all features
features : int
Index of the starting observation
features : int
Index of the ending observation
Returns
-------
pd.DataFrame
Subsetted Pandas dataframe containing the target
pd.DataFrame
Subsetted Pandas dataframe containing all features
"""
return features[start_index:end_index], target[start_index:end_index]
def split_sets_random(df, target_col, test_ratio=0.2, to_numpy=False):
"""Split sets randomly
Parameters
----------
df : pd.DataFrame
Input dataframe
target_col : str
Name of the target column
test_ratio : float
Ratio used for the validation and testing sets (default: 0.2)
Returns
-------
Numpy Array
Features for the training set
Numpy Array
Target for the training set
Numpy Array
Features for the validation set
Numpy Array
Target for the validation set
Numpy Array
Features for the testing set
Numpy Array
Target for the testing set
"""
from sklearn.model_selection import train_test_split
features, target = pop_target(df=df, target_col=target_col, to_numpy=to_numpy)
X_data, X_test, y_data, y_test = train_test_split(features, target, test_size=test_ratio, random_state=8)
val_ratio = test_ratio / (1 - test_ratio)
X_train, X_val, y_train, y_val = train_test_split(X_data, y_data, test_size=val_ratio, random_state=8)
return X_train, y_train, X_val, y_val, X_test, y_test
def save_sets(X_train=None, y_train=None, X_val=None, y_val=None, X_test=None, y_test=None, path='../data/processed/'):
import numpy as np
if X_train is not None:
np.save(f'{path}X_train', X_train)
if X_val is not None:
np.save(f'{path}X_val', X_val)
if X_test is not None:
np.save(f'{path}X_test', X_test)
if y_train is not None:
np.save(f'{path}y_train', y_train)
if y_val is not None:
np.save(f'{path}y_val', y_val)
if y_test is not None:
np.save(f'{path}y_test', y_test)
def load_sets(path='../data/processed/', val=False):
import numpy as np
import os.path
X_train = np.load(f'{path}X_train.npy') if os.path.isfile(f'{path}X_train.npy') else None
X_val = np.load(f'{path}X_val.npy' ) if os.path.isfile(f'{path}X_val.npy') else None
X_test = np.load(f'{path}X_test.npy' ) if os.path.isfile(f'{path}X_test.npy') else None
y_train = np.load(f'{path}y_train.npy') if os.path.isfile(f'{path}y_train.npy') else None
y_val = np.load(f'{path}y_val.npy' ) if os.path.isfile(f'{path}y_val.npy') else None
y_test = np.load(f'{path}y_test.npy' ) if os.path.isfile(f'{path}y_test.npy') else None
return X_train, y_train, X_val, y_val, X_test, y_test
def pop_target(df, target_col, to_numpy=False):
"""Extract target variable from dataframe and convert to nympy arrays if required
Parameters
----------
df : pd.DataFrame
Dataframe
target_col : str
Name of the target variable
to_numpy : bool
Flag stating to convert to numpy array or not
Returns
-------
pd.DataFrame/Numpy array
Subsetted Pandas dataframe containing all features
pd.DataFrame/Numpy array
Subsetted Pandas dataframe containing the target
"""
df_copy = df.copy()
target = df_copy.pop(target_col)
if to_numpy:
df_copy = df_copy.to_numpy()
target = target.to_numpy()
return df_copy, target |
the-stack_0_15524 | # pylint: disable=missing-docstring,no-self-use,no-member,misplaced-comparison-constant,expression-not-assigned
import logging
from unittest.mock import patch, Mock
import pytest
from expecter import expect
import yorm
from yorm import common
from yorm.decorators import attr
from yorm.types import Dictionary, List
from yorm.types import String, Integer
from . import strip
log = logging.getLogger(__name__)
# CLASSES #####################################################################
@attr(abc=Integer)
class SampleDictionary(Dictionary):
"""Sample dictionary container."""
@attr(var1=Integer)
@attr(var2=String)
class SampleDictionaryWithInitialization(Dictionary):
"""Sample dictionary container with initialization."""
def __init__(self, var1, var2, var3):
super().__init__()
self.var1 = var1
self.var2 = var2
self.var3 = var3
@attr(all=String)
class StringList(List):
"""Sample list container."""
class UnknownList(List):
"""Sample list container."""
# TESTS #######################################################################
class TestDictionary:
"""Unit tests for the `Dictionary` container."""
obj = {'abc': 123}
class SampleClass:
def __init__(self):
self.abc = 42
class SampleClass2:
def __init__(self):
self.unmapped = Mock()
data_value = [
(obj, obj),
(None, {'abc': 0}),
("key=value", {'key': "value", 'abc': 0}),
("key=", {'key': "", 'abc': 0}),
("key", {'key': None, 'abc': 0}),
]
value_data = [
(obj, obj),
(SampleClass(), {'abc': 42}),
(SampleClass2(), {'abc': 0}),
([], {'abc': 0}),
]
def setup_method(self, _):
"""Reset the class' mapped attributes before each test."""
common.attrs[SampleDictionary] = {'abc': Integer}
@pytest.mark.parametrize("data,value", data_value)
def test_to_value(self, data, value):
"""Verify input data is converted to values."""
assert value == SampleDictionary.to_value(data)
@pytest.mark.parametrize("value,data", value_data)
def test_to_data(self, value, data):
"""Verify values are converted to output data."""
assert data == SampleDictionary.to_data(value)
def test_not_implemented(self):
"""Verify `Dictionary` cannot be used directly."""
with pytest.raises(NotImplementedError):
Dictionary()
def test_dict_as_object(self):
"""Verify a `Dictionary` can be used as an attribute."""
dictionary = SampleDictionaryWithInitialization(1, 2, 3.0)
value = {'var1': 1, 'var2': '2'}
value2 = dictionary.to_value(dictionary)
assert value == value2
# keys are not accessible as attributes
assert not hasattr(value2, 'var1')
assert not hasattr(value2, 'var2')
assert not hasattr(value2, 'var3')
def test_unknown_attrributes_are_ignored(self):
obj = SampleDictionary.create_default()
obj.update_value({'key': "value", 'abc': 7}, auto_track=False)
assert {'abc': 7} == obj
class TestList:
"""Unit tests for the `List` container."""
obj = ["a", "b", "c"]
data_value = [
(obj, obj),
(None, []),
([None], []),
("a b c", ["a", "b", "c"]),
("a,b,c", ["a", "b", "c"]),
("abc", ["abc"]),
("a\nb\nc", ["a", "b", "c"]),
(4.2, ['4.2']),
(("a", "b"), ["a", "b"]),
]
value_data = [
(obj, obj),
([], [None]),
]
@pytest.mark.parametrize("data,value", data_value)
def test_to_value(self, data, value):
"""Verify input data is converted to values."""
assert value == StringList.to_value(data)
@pytest.mark.parametrize("value,data", value_data)
def test_to_data(self, value, data):
"""Verify values are converted to output data."""
assert data == StringList.to_data(value)
def test_item_type(self):
"""Verify list item type can be determined."""
assert String == StringList.item_type
def test_item_type_none(self):
"""Verify list item type defaults to None."""
assert None is UnknownList.item_type
def test_not_implemented(self):
"""Verify `List` cannot be used directly."""
with pytest.raises(NotImplementedError):
List()
with pytest.raises(NotImplementedError):
UnknownList()
def test_shortened_syntax(self):
cls = List.of_type(Integer)
expect(cls.__name__) == "IntegerList"
expect(common.attrs[cls]) == {'all': Integer}
class TestExtensions:
"""Unit tests for extensions to the container classes."""
class FindMixin:
def find(self, value):
for value2 in self:
if value.lower() == value2.lower():
return value2
return None
@yorm.attr(a=yorm.types.String)
class MyDictionary(Dictionary, FindMixin):
pass
@yorm.attr(all=yorm.types.String)
class MyList(List, FindMixin):
pass
def test_converted_dict_keeps_type(self):
my_dict = self.MyDictionary()
my_dict['a'] = 1
my_dict2 = self.MyDictionary.to_value(my_dict)
assert 'a' == my_dict2.find('A')
assert None is my_dict2.find('B')
def test_converted_list_keeps_type(self):
my_list = self.MyList()
my_list.append('a')
my_list2 = self.MyList.to_value(my_list)
assert 'a' == my_list2.find('A')
assert None is my_list2.find('B')
@patch('yorm.settings.fake', True)
class TestReservedNames:
class MyObject:
def __init__(self, items=None):
self.items = items or []
def __repr__(self):
return "<my_object>"
def test_list_named_items(self):
my_object = self.MyObject()
yorm.sync_object(my_object, "fake/path", {'items': StringList})
log.info("Appending value to list of items...")
my_object.items.append('foo')
log.info("Checking object contents...")
assert strip("""
items:
- foo
""") == my_object.__mapper__.text
log.info("Writing new file contents...")
my_object.__mapper__.text = strip("""
items:
- bar
""")
log.info("Checking file contents...")
assert ['bar'] == my_object.items
|
the-stack_0_15525 | '''
Created on 2015/12/29
:author: hubo
'''
from __future__ import print_function
from vlcp.utils.connector import async_processor, async_to_async, Connector,\
generator_to_async
from vlcp.event.event import withIndices, Event, M_
from vlcp.config import defaultconfig
from vlcp.server.module import Module, call_api
import functools
import threading
import signal
from vlcp.event.runnable import RoutineContainer
from vlcp.event.runnable import RoutineException
import pdb
import code
from vlcp.config.config import manager
from vlcp.protocol.protocol import Protocol
from vlcp.event.connection import Client
import os
import socket
import re
from vlcp.event.core import InterruptedBySignalException
from queue import Queue, PriorityQueue
import traceback
import sys
import _thread as thread
def console_help():
print(Console._full_help)
def restore_console():
if not hasattr(Console, '_instance') or not Console._instance:
raise ValueError('Console is not loaded')
Console._instance.restore_console()
@withIndices('type')
class ConsoleEvent(Event):
canignore = False
@withIndices()
class ConsoleServiceCall(Event):
pass
@withIndices('waiter')
class ConsoleServiceCancel(Event):
pass
@withIndices('socket')
class SocketInjectDone(Event):
pass
@withIndices()
class InterruptPoller(Event):
pass
class Waiter(object):
def __init__(self):
self.event = threading.Event()
self.event.clear()
self.exception = None
self.result = None
def wait(self, timeout = None):
self.event.wait(timeout)
if self.exception:
raise self.exception
else:
return self.result
def raise_exception(self, exc):
self.exception = exc
self.event.set()
def send_result(self, val):
self.result = val
self.event.set()
@defaultconfig
class Console(Module):
'''
VLCP debugging console.
Besides the normal functions of Python interactive console,
Following variables are provided for debugging purpose:
server, manager, container
Following functions can be used to control VLCP running:
callapi, capture, sendevent, subroutine, execute, breakpoint, syscall,
resume, debug, restore_console, console_help
For details call console_help()
'''
_full_help = '''
VLCP debugging console.
Besides the normal functions of python interactive console,
following variables are provided for debugging purpose:
server - current running VLCP server
manager - current configuration manager
container - internal used routine container
Following functions can be used to control VLCP running:
callapi(modulename, functionname, **kwargs)
- Call module API modulename/functionname with kwargs, return result
capture(matchers, blocking = False, breakpoint = False, captureonce = False, callback = None)
- Capture events matched with specified matchers and print the event. Other parameters:
- blocking: if True, wait until the events are captured
- breakpoint: if True, suspend the event loop and wait for resume()
- captureonce: if True, remove the matchers on first capture
- callback: func(event, matcher) called on every capture if specified
sendevent(event, emerge = False)
- Send specified event to scheduler. if merge = True, send immediately without block
subroutine(routine)
- create a new routine in container.
execute(routine)
- execute the routine in container, and return the return value
breakpoint()
- stop running and wait for resume().
syscall(syscall_func)
- execute syscall_func in syscall context
resume()
- resume from breakpoint
debug()
- resume from breakpoint with pdb.set_trace() to enter pdb debugging. Suspend the interactive console
to work with pdb.
restore_console()
- Prepare to continue in pdb and resume the console. Type in pdb:
clear
import vlcp.service.debugging.console
vlcp.service.debugging.console.restore_console()
continue
console_help()
- show this help
'''
service = False
# Directly start VLCP in the console mode. By default, the console module creates a
# telnet server and wait for a connection. The console can be used in the telnet session.
# With startinconsole = True, the module uses stdin/stdout to create the console.
_default_startinconsole = False
# Default telnet connection URL, this is a passive connection on port 9923, so use::
#
# telnet localhost 9923
#
# to connect to the console.
_default_telnetconsole = 'ptcp://localhost:9923/'
# If SSL is configured (with pssl://...), specify the private key file
_default_key = None
# If SSL is configured, specify the certificate file
_default_certificate = None
# If SSL is configured, specify the CA file
_default_ca_certs = None
async def _service_routine(self):
self.apiroutine.subroutine(self._intercept_main())
csc = ConsoleServiceCall.createMatcher()
while True:
ev = await csc
self.apiroutine.subroutine(ev.routine, True)
async def _service_call_routine(self, waiter, call):
try:
r = await self.apiroutine.with_exception(call, ConsoleServiceCancel.createMatcher(waiter))
except RoutineException:
pass
except Exception as exc:
waiter.raise_exception(exc)
else:
waiter.send_result(r)
async def _intercept_main(self):
cr = self.apiroutine.currentroutine
self.sendEventQueue = Queue()
_console_connect_event = threading.Event()
_console_connect_event.clear()
await self.apiroutine.wait_for_send(ConsoleEvent('initproxy'))
if not self.startinconsole:
p = Protocol()
p.persist = True
p.createqueue = False
async def init(connection):
sock = connection.socket
self.telnet_socket = sock
self.scheduler.unregisterPolling(connection.socket)
connection.socket = None
connection.connected = False
_console_connect_event.set()
await SocketInjectDone.createMatcher(sock)
p.init = init
p.reconnect_init = init
Client(self.telnetconsole, p, self.scheduler, self.key, self.certificate, self.ca_certs).start()
def syscall_threaded_main(scheduler, processor):
# Detach self
scheduler.unregisterall(cr)
scheduler.syscallfunc = None
scheduler.syscallrunnable = None
self._threaded_main_quit = False
def threaded_main():
try:
scheduler.main(False, False)
finally:
self._threaded_main_quit = True
_console_connect_event.set()
t = threading.Thread(target=threaded_main)
t.daemon = True
t.start()
try:
if self.startinconsole:
self._interactive()
else:
while not self._threaded_main_quit:
try:
while not _console_connect_event.is_set():
# There is a bug in Python 2.x that wait without timeout cannot be
# interrupted by signal
_console_connect_event.wait(3600)
if self._threaded_main_quit:
break
except InterruptedBySignalException:
# This signal should interrupt the poller, but poller is not in the main thread
# Send an event through the proxy will do the trick
self.sendEventQueue.put((InterruptPoller(),))
continue
pstdin_r, pstdin_w = os.pipe()
pstdout_r, pstdout_w = os.pipe()
orig_stdin = sys.stdin
orig_stdout = sys.stdout
orig_stderr = sys.stderr
try:
pstdin = os.fdopen(pstdin_r, 'rU')
pstdout = os.fdopen(pstdout_w, 'w')
sys.stdin = pstdin
sys.stdout = pstdout
sys.stderr = pstdout
sock = self.telnet_socket
sock.setblocking(True)
self.telnet_socket = None
_console_connect_event.clear()
t = threading.Thread(target=self._telnet_server, args=(pstdin_w, pstdout_r, sock, orig_stdout))
t.daemon = True
t.start()
try:
self._interactive()
except SystemExit:
pass
if not t.is_alive():
break
self.sendEventQueue.put((SocketInjectDone(sock),))
finally:
try:
sock.shutdown(socket.SHUT_RDWR)
except Exception:
pass
try:
pstdin.close()
except Exception:
pass
try:
pstdout.close()
except Exception:
pass
sys.stdin = orig_stdin
sys.stdout = orig_stdout
sys.stderr = orig_stderr
except SystemExit:
pass
finally:
async def _quit():
scheduler.quit()
self.sendEventQueue.put((ConsoleServiceCall(routine=_quit()),))
self.sendEventQueue.put(None)
if self.startinconsole:
print('Wait for scheduler end, this may take some time...')
t.join()
# Cannot inject the event loop from yield_()
await self.apiroutine.do_events()
await self.apiroutine.syscall(syscall_threaded_main, True)
def _telnet_server_writer(self, queue, sock):
lastseq = -1
while True:
t, seq, val = queue.get()
if t < 0:
break
if t != 2 or seq >= lastseq:
try:
sock.sendall(val)
except Exception:
break
if t == 0:
lastseq = seq
def _telnet_server_writer2(self, pstdout_r, queue, lock, orig_stdout):
while True:
data = os.read(pstdout_r, 1024)
if data == '':
os.close(pstdout_r)
break
data, _ = re.subn(br'\r?\n', b'\r\n', data)
lock.acquire()
try:
self._telnet_seq += 1
seq = self._telnet_seq
finally:
lock.release()
queue.put((2, seq, data))
def _telnet_server(self, pstdin_w, pstdout_r, sock, orig_stdout):
queue = PriorityQueue()
inputbuffer = b''
self._telnet_seq = 0
try:
t = threading.Thread(target=self._telnet_server_writer, args=(queue, sock))
t.daemon = True
t.start()
lock = threading.Lock()
def writeall(data):
start = 0
while start < len(data):
size = os.write(pstdin_w, data[start:])
start += size
def sendcontrol(t, data):
lock.acquire()
try:
self._telnet_seq += 1
seq = self._telnet_seq
finally:
lock.release()
queue.put((t, seq, data))
t2 = threading.Thread(target=self._telnet_server_writer2, args=(pstdout_r, queue, lock, orig_stdout))
t2.daemon = True
t2.start()
escaping = False
option = None
while True:
newdata = sock.recv(1024)
if newdata == b'':
break
for i in range(0, len(newdata)):
c = newdata[i:i+1]
if escaping:
if option == b'\xfd' and c == b'\x06':
sendcontrol(1, b'\xff\xfb\x06')
option = None
escaping = False
elif option == b'\xfd' or option == b'\xfe':
sendcontrol(1, b'\xff\xfc' + c)
option = None
escaping = False
elif option == b'\xfb' or option == b'\xfc':
sendcontrol(1, b'\xff\xfe' + c)
option = None
escaping = False
elif c in (b'\xfb', b'\xfc', b'\xfd', b'\xfe'):
option = c
else:
option = None
if c == b'\xf3' or c == b'\xf4':
thread.interrupt_main()
escaping = False
else:
if c == b'\x03':
thread.interrupt_main()
elif c == b'\x08':
inputbuffer = inputbuffer[:-1]
elif c == b'\x00':
inputbuffer += b'\n'
writeall(inputbuffer)
inputbuffer = b''
elif c == b'\r' or c == b'\n':
inputbuffer += c
writeall(inputbuffer)
inputbuffer = b''
elif c == b'\xff':
escaping = True
else:
inputbuffer += c
except OSError:
pass
except IOError:
pass
finally:
try:
os.close(pstdin_w)
except Exception:
pass
queue.put((-1, -1, -1))
def _interactive(self):
lsignal = signal.signal(signal.SIGINT, signal.default_int_handler)
try:
_breakpoint_event = threading.Event()
_current_thread = threading.current_thread().ident
_enter_pdb = [False]
def _async_run(call):
self.sendEventQueue.put((ConsoleServiceCall(routine = call),))
def _async(func):
@functools.wraps(func)
def f(*args, **kwargs):
_async_run(func(*args, **kwargs))
return f
def _service_call_customized(factory):
waiter = Waiter()
self.sendEventQueue.put((ConsoleServiceCall(routine=factory(waiter)),))
try:
return waiter.wait()
except:
self.sendEventQueue.put((ConsoleServiceCancel(waiter),))
raise
def execute(call):
return _service_call_customized(lambda waiter: self._service_call_routine(waiter, call))
def _service(func):
@functools.wraps(func)
def f(*args, **kwargs):
return execute(func(*args, **kwargs))
return f
@_service
def callapi(modulename, functionname, **kwargs):
return call_api(self.apiroutine, modulename, functionname, kwargs)
@_service
async def sendevent(event, emerge = False):
if emerge:
self.apiroutine.scheduler.emergesend(event)
else:
await self.apiroutine.wait_for_send(event)
@_service
async def subroutine(routine):
return self.apiroutine.subroutine(routine)
@_service
async def syscall(syscall_func):
return self.apiroutine.syscall(syscall_func)
def breakpoint():
in_thread = threading.current_thread().ident
if in_thread == _current_thread:
_breakpoint()
else:
print('Enter VLCP debugging breakpoint:')
traceback.print_stack()
print('Call resume() to continue the event loop, or debug() to enter pdb')
_breakpoint_event.clear()
_breakpoint_event.wait()
if _enter_pdb[0]:
pdb.set_trace()
else:
print('Resume from breakpoint.')
@_async
async def _breakpoint():
breakpoint()
def resume():
_enter_pdb[0] = False
_breakpoint_event.set()
@_async
async def restore_console():
self._restore_console_event.set()
self.restore_console = restore_console
def debug():
_enter_pdb[0] = True
self._restore_console_event.clear()
_breakpoint_event.set()
# Switch to event loop thread, suspend the main thread, wait for restore_console
self._restore_console_event.wait()
_capture_breakpoint = breakpoint
def capture(matchers, blocking = False, breakpoint = False, captureonce = False, callback = None):
async def _capture_service(waiter):
if blocking:
csm = ConsoleServiceCancel.createMatcher(waiter)
else:
waiter.send_result(self.apiroutine.currentroutine)
firsttime = True
while firsttime or not captureonce:
if blocking:
ev, m = await M_(*(tuple(matchers) + (csm,)))
else:
ev, m = await M_(*matchers)
if blocking and m is csm:
# Cancelled
return
print('Event Captured: Capture %r with %r' % (ev, m))
if firsttime and blocking:
waiter.send_result((ev, m, self.apiroutine.currentroutine))
firsttime = False
if callback:
try:
callback(ev, m)
except Exception:
print('Exception while running callback:')
traceback.print_exc()
if breakpoint:
_capture_breakpoint()
return _service_call_customized(_capture_service)
code.interact(self.__doc__ + '\n' + 'Python ' + str(sys.version) + ' on ' + str(sys.platform),
None,
{'server':self.server,'manager':manager, 'container':self.apiroutine,
'callapi':callapi, 'capture':capture, 'sendevent':sendevent,
'subroutine':subroutine, 'breakpoint':breakpoint, 'syscall':syscall,
'resume':resume, 'debug':debug, 'restore_console':restore_console,
'console_help':console_help,'execute':execute})
finally:
signal.signal(signal.SIGINT, lsignal)
def __init__(self, server):
'''
Constructor
'''
Module.__init__(self, server)
self._ce_matcher = ConsoleEvent.createMatcher()
self.apiroutine = RoutineContainer(self.scheduler)
self.apiroutine.main = self._service_routine
self._restore_console_event = threading.Event()
@generator_to_async(True, False)
def proxy(event, matcher):
while True:
events = self.sendEventQueue.get()
if events is None:
break
yield events
@async_to_async(True, False)
@async_processor
def processor(event, matcher, queueout):
if event.type == 'initproxy':
proxy(event, matcher, queueout)
self.connector = Connector(processor, (self._ce_matcher,), self.scheduler, False)
self.routines.append(self.apiroutine)
self.routines.append(self.connector)
if __name__ == '__main__':
from vlcp.server import main
manager['module.console.startinconsole'] = True
modules = list(sys.argv[1:]) + ['__main__.Console']
main(None, modules)
|
the-stack_0_15526 | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
import pdb
def find_and_print_lowest_value(x,y, x_key, y_key):
idx = np.argmin(y)
x_min = x[idx]
y_min = y[idx]
print('The lowest value of %s is %.8f at %s %.2f' % (y_key, y_min, x_key, x_min), flush=True)
def plot_result(result, plot_key, save_dir, plot_values=None, print_lowest_value=False):
# result is a dictionary of lists
# result[plot_key] is the horizontal axis
# result[key] is vertical axis
# we plot all other keys except plot_key against plot_key in result if plot_values is None
# plot_values could aslo be a list of keys
# we only plot those keys specified in plot_values against plot_key
# print('\n Comparing current ckpt with previous saved ckpts', flush=True)
os.makedirs(save_dir, exist_ok=True)
x = np.array(result[plot_key])
order = np.argsort(x)
x = x[order]
if len(result[plot_key]) > 1:
for key in result.keys():
plot = not key == plot_key
if not plot_values is None:
plot = plot and key in plot_values
if plot:
plt.figure()
if isinstance(result[key], dict):
for sub_key in result[key].keys():
y = np.array(result[key][sub_key])
y = y[order]
plt.plot(x, y, marker = '.', label=sub_key)
if print_lowest_value:
find_and_print_lowest_value(x, y, plot_key, key+'-'+sub_key)
plt.xlabel(plot_key)
plt.legend()
else:
y = np.array(result[key])
y = y[order]
plt.plot(x, y, marker = '.')
plt.xlabel(plot_key)
plt.ylabel(key)
if print_lowest_value:
find_and_print_lowest_value(x, y, plot_key, key)
save_file = os.path.join(save_dir, key+'.png')
plt.savefig(save_file)
plt.close()
print('have save the figure for %s to the file %s' % (key, save_file), flush=True)
else:
print('Do not plot because there is only 1 value in plot key', flush=True)
return 0
if __name__ == '__main__':
file_name = '../exp_shapenet/T1000_betaT0.02_shape_generation_noise_reduce_factor_5_corrected/eval_results/total_eval_result.pkl'
handle = open(file_name, 'rb')
result = pickle.load(handle)
handle.close()
plot_key = 'iter'
save_dir = './'
plot_result(result, plot_key, save_dir)
# pdb.set_trace() |
the-stack_0_15528 | """
Regularizer class for that also supports GPU code
Michael Chen [email protected]
David Ren [email protected]
March 04, 2018
"""
import arrayfire as af
import numpy as np
from opticaltomography import settings
np_complex_datatype = settings.np_complex_datatype
np_float_datatype = settings.np_float_datatype
af_float_datatype = settings.af_float_datatype
af_complex_datatype = settings.af_complex_datatype
class Regularizer:
"""
Highest-level Regularizer class that is responsible for parsing user arguments to create proximal operators
All proximal operators operate on complex variables (real & imaginary part separately)
Pure Real:
pure_real: boolean, whether or not to enforce object to be purely real
Pure imaginary:
pure_imag: boolean, whether or not to enforce object to be purely imaginary
Positivity:
positivity_real(positivity_imag): boolean, whether or not to enforce positivity for real(imaginary) part
Negativity:
negativity_real(negativity_imag): boolean, whether or not to enforce negativity for real(imaginary) part
LASSO (L1 regularizer):
lasso: boolean, whether or not to use LASSO proximal operator
lasso_parameter: threshold for LASSO
Total variation (3D only):
total_variation: boolean, whether or not to use total variation regularization
total_variation_gpu: boolean, whether or not to use GPU implementation
total_variation_parameter: scalar, regularization parameter (lambda)
total_variation_maxitr: integer, number of each iteration for total variation
"""
def __init__(self, configs = None, verbose = True, **kwargs):
#Given all parameters, construct all proximal operators
self.prox_list = []
reg_params = kwargs
if configs != None:
reg_params = self._parseConfigs(configs)
#Purely real
if reg_params.get("pure_real", False):
self.prox_list.append(PureReal())
#Purely imaginary
if reg_params.get("pure_imag", False):
self.prox_list.append(Pureimag())
#Total Variation
if reg_params.get("total_variation", False):
if reg_params.get("total_variation_gpu", False):
self.prox_list.append(TotalVariationGPU(**reg_params))
else:
self.prox_list.append(TotalVariationCPU(**reg_params))
#L1 Regularizer (LASSO)
elif reg_params.get("lasso", False):
self.prox_list.append(Lasso(reg_params.get("lasso_parameter", 1.0)))
#Others
else:
#Positivity
positivity_real = reg_params.get("positivity_real", False)
positivity_imag = reg_params.get("positivity_imag", False)
if positivity_real or positivity_imag:
self.prox_list.append(Positivity(positivity_real, positivity_imag))
#Negativity
negativity_real = reg_params.get("negativity_real", False)
negativity_imag = reg_params.get("negativity_imag", False)
if negativity_real or negativity_imag:
self.prox_list.append(Negativity(negativity_real, negativity_imag))
if verbose:
for prox_op in self.prox_list:
print("Regularizer -", prox_op.proximal_name)
def _parseConfigs(self, configs):
params = {}
params["pure_real"] = configs.pure_real
params["pure_imag"] = configs.pure_imag
#Total variation
params["total_variation"] = configs.total_variation
params["total_variation_gpu"] = configs.total_variation_gpu
params["total_variation_maxitr"] = configs.max_iter_tv
params["total_variation_order"] = configs.order_tv
params["total_variation_parameter"] = configs.reg_tv
#LASSO
params["lasso"] = configs.lasso
params["lasso_parameter"] = configs.reg_lasso
#Positivity/Negativity
if configs.positivity_real[0]:
if configs.positivity_real[1] == "larger":
params["positivity_real"] = True
else:
params["negativity_real"] = True
if configs.positivity_imag[0]:
if configs.positivity_imag[1] == "larger":
params["positivity_imag"] = True
else:
params["negativity_imag"] = True
return params
def computeCost(self, x):
cost = 0.0
for prox_op in self.prox_list:
cost_temp = prox_op.computeCost(x)
if cost_temp != None:
cost += cost_temp
return cost
def applyRegularizer(self, x):
for prox_op in self.prox_list:
x = prox_op.computeProx(x)
return x
class ProximalOperator():
def __init__(self, proximal_name):
self.proximal_name = proximal_name
def computeCost(self):
pass
def computeProx(self):
pass
def setParameter(self):
pass
def _boundRealValue(self, x, value = 0, flag_project = True):
"""If flag is true, only values that are greater than 'value' are preserved"""
if flag_project:
x[x < value] = 0
return x
class TotalVariationGPU(ProximalOperator):
def __init__(self, **kwargs):
proximal_name = "Total Variation"
parameter = kwargs.get("total_variation_parameter", 1.0)
maxitr = kwargs.get("total_variation_maxitr", 15)
self.order = kwargs.get("total_variation_order", 1)
self.pure_real = kwargs.get("pure_real", False)
self.pure_imag = kwargs.get("pure_imag", False)
#real part
if kwargs.get("positivity_real", False):
self.realProjector = lambda x: self._boundRealValue(x, 0, True)
proximal_name = "%s+%s" % (proximal_name, "positivity_real")
elif kwargs.get("negativity_real", False):
self.realProjector = lambda x: -1.0 * self._boundRealValue(-1.0 * x, 0, True)
proximal_name = "%s+%s" % (proximal_name, "negativity_real")
else:
self.realProjector = lambda x: x
#imaginary part
if kwargs.get("positivity_imag", False):
self.imagProjector = lambda x: self._boundRealValue(x, 0, True)
proximal_name = "%s+%s" % (proximal_name, "positivity_imag")
elif kwargs.get("negativity_imag", False):
self.imagProjector = lambda x: -1.0 * self._boundRealValue(-1.0 * x, 0, True)
proximal_name = "%s+%s" % (proximal_name, "negativity_imag")
else:
self.imagProjector = lambda x: x
self.setParameter(parameter, maxitr)
super().__init__(proximal_name)
def setParameter(self, parameter, maxitr):
self.parameter = parameter
self.maxitr = maxitr
def computeCost(self, x):
return None
def _computeTVNorm(self, x):
x_norm = x**2
x_norm = af.sum(x_norm, dim = 3)**0.5
x_norm[x_norm<1.0] = 1.0
return x_norm
def computeProx(self, x):
if self.pure_real:
x = self._computeProxReal(af.real(x), self.realProjector) + 1.0j * 0.0
elif self.pure_imag:
x = 1.0j *self._computeProxReal(af.imag(x), self.imagProjector)
else:
x = self._computeProxReal(af.real(x), self.realProjector) \
+ 1.0j * self._computeProxReal(af.imag(x), self.imagProjector)
return x
def _filterD(self, x, axis):
assert axis<3, "This function only supports matrix up to 3 dimension!"
if self.order == 1:
if axis == 0:
Dx = x - af.shift(x, 1, 0, 0)
elif axis == 1:
Dx = x - af.shift(x, 0, 1, 0)
else:
Dx = x - af.shift(x, 0, 0, 1)
elif self.order == 2:
if axis == 0:
Dx = x - 2*af.shift(x, 1, 0, 0) + af.shift(x, 2, 0, 0)
elif axis == 1:
Dx = x - 2*af.shift(x, 0, 1, 0) + af.shift(x, 0, 2, 0)
else:
Dx = x - 2*af.shift(x, 0, 0, 1) + af.shift(x, 0, 0, 2)
elif self.order == 3:
if axis == 0:
Dx = x - 3*af.shift(x, 1, 0, 0) + 3*af.shift(x, 2, 0, 0) - af.shift(x, 3, 0, 0)
elif axis == 1:
Dx = x - 3*af.shift(x, 0, 1, 0) + 3*af.shift(x, 0, 2, 0) - af.shift(x, 0, 3, 0)
else:
Dx = x - 3*af.shift(x, 0, 0, 1) + 3*af.shift(x, 0, 0, 2) - af.shift(x, 0, 0, 3)
else:
raise NotImplementedError("filter orders larger than 1 are not implemented!")
return Dx
def _filterDT(self, x):
if self.order == 1:
DTx = x[:, :, :, 0] - af.shift(x[ :, :, :, 0], -1, 0, 0) + \
x[:, :, :, 1] - af.shift(x[ :, :, :, 1], 0, -1, 0) + \
x[:, :, :, 2] - af.shift(x[ :, :, :, 2], 0, 0, -1)
elif self.order == 2:
DTx = x[:, :, :, 0] - 2*af.shift(x[ :, :, :, 0], -1, 0, 0) + af.shift(x[ :, :, :, 0], -2, 0, 0) + \
x[:, :, :, 1] - 2*af.shift(x[ :, :, :, 1], 0, -1, 0) + af.shift(x[ :, :, :, 1], 0, -2, 0) + \
x[:, :, :, 2] - 2*af.shift(x[ :, :, :, 2], 0, 0, -1) + af.shift(x[ :, :, :, 2], 0, 0, -2)
elif self.order == 3:
DTx = x[:, :, :, 0] - 3*af.shift(x[ :, :, :, 0], -1, 0, 0) + 3*af.shift(x[ :, :, :, 0], -2, 0, 0) - af.shift(x[ :, :, :, 0], -3, 0, 0) + \
x[:, :, :, 1] - 3*af.shift(x[ :, :, :, 1], 0, -1, 0) + 3*af.shift(x[ :, :, :, 1], 0, -2, 0) - af.shift(x[ :, :, :, 1], 0, -3, 0) + \
x[:, :, :, 2] - 3*af.shift(x[ :, :, :, 2], 0, 0, -1) + 3*af.shift(x[ :, :, :, 2], 0, 0, -2) - af.shift(x[ :, :, :, 2], 0, 0, -3)
else:
raise NotImplementedError("filter orders larger than 1 are not implemented!")
return DTx
def _computeProxReal(self, x, projector):
t_k = 1.0
u_k = af.constant(0.0, x.shape[0], x.shape[1], x.shape[2], 3, dtype = af_float_datatype)
u_k1 = af.constant(0.0, x.shape[0], x.shape[1], x.shape[2], 3, dtype = af_float_datatype)
u_hat = af.constant(0.0, x.shape[0], x.shape[1], x.shape[2], 3, dtype = af_float_datatype)
grad_u_hat = af.constant(0.0, x.shape[0], x.shape[1], x.shape[2], dtype = af_float_datatype)
def _gradUpdate():
grad_u_hat = x - self.parameter * self._filterDT(u_hat)
return grad_u_hat
for iteration in range(self.maxitr):
if iteration > 0:
grad_u_hat = _gradUpdate()
else:
grad_u_hat[:, :, :] = x
grad_u_hat = projector(grad_u_hat)
u_k1[ :, :, :, 0] = u_hat[ :, :, :, 0] + (1.0/(12.0)**self.order/self.parameter) * self._filterD(grad_u_hat, axis=0)
u_k1[ :, :, :, 1] = u_hat[ :, :, :, 1] + (1.0/(12.0)**self.order/self.parameter) * self._filterD(grad_u_hat, axis=1)
u_k1[ :, :, :, 2] = u_hat[ :, :, :, 2] + (1.0/(12.0)**self.order/self.parameter) * self._filterD(grad_u_hat, axis=2)
u_k1_norm = self._computeTVNorm(u_k1)
u_k1[ :, :, :, 0] /= u_k1_norm
u_k1[ :, :, :, 1] /= u_k1_norm
u_k1[ :, :, :, 2] /= u_k1_norm
t_k1 = 0.5 * (1.0 + (1.0 + 4.0*t_k**2)**0.5)
beta = (t_k - 1.0)/t_k1
u_hat = (1.0 + beta)*u_k1 - beta*u_k
if iteration < self.maxitr - 1:
u_k = u_k1.copy()
return projector(_gradUpdate())
class TotalVariationCPU(TotalVariationGPU):
def _computeTVNorm(self, x):
u_k1_norm = af.to_array(x)
u_k1_norm[:, :, :, :] *= u_k1_norm
u_k1_norm = af.sum(u_k1_norm, dim = 3)**0.5
u_k1_norm[u_k1_norm<1.0] = 1.0
return np.array(u_k1_norm)
def computeProx(self, x):
if self.pure_real:
x = self._computeProxReal(np.real(x), self.realProjector) + 1.0j * 0.0
elif self.pure_imag:
x = 1.0j *self._computeProxReal(np.imag(x), self.imagProjector)
else:
x = self._computeProxReal(np.real(x), self.realProjector) \
+ 1.0j * self._computeProxReal(np.imag(x), self.imagProjector)
return af.to_array(x)
def _computeProxReal(self, x, projector):
t_k = 1.0
u_k = np.zeros(x.shape + (3,), dtype = np_float_datatype);
u_k1 = u_k.copy()
u_hat = u_k.copy()
def _gradUpdate():
u_hat_af = af.to_array(u_hat)
DTu_hat = u_hat_af[:, :, :, 0] - af.shift(u_hat_af[ :, :, :, 0], -1, 0, 0) + \
u_hat_af[:, :, :, 1] - af.shift(u_hat_af[ :, :, :, 1], 0, -1, 0) + \
u_hat_af[:, :, :, 2] - af.shift(u_hat_af[ :, :, :, 2], 0, 0, -1)
grad_u_hat = x - np.array(self.parameter * DTu_hat)
return grad_u_hat
for iteration in range(self.maxitr):
if iteration > 0:
grad_u_hat = _gradUpdate()
else:
grad_u_hat = x.copy()
grad_u_hat = projector(grad_u_hat)
u_k1[ :, :, :, 0] = u_hat[ :, :, :, 0] + (1.0/12.0/self.parameter) * (grad_u_hat-np.roll(grad_u_hat, 1, axis = 0))
u_k1[ :, :, :, 1] = u_hat[ :, :, :, 1] + (1.0/12.0/self.parameter) * (grad_u_hat-np.roll(grad_u_hat, 1, axis = 1))
u_k1[ :, :, :, 2] = u_hat[ :, :, :, 2] + (1.0/12.0/self.parameter) * (grad_u_hat-np.roll(grad_u_hat, 1, axis = 2))
u_k1_norm = self._computeTVNorm(u_k1)
u_k1[ :, :, :] /= u_k1_norm[:, :, :, np.newaxis]
t_k1 = 0.5 * (1.0 + (1.0 + 4.0*t_k**2)**0.5)
beta = (t_k - 1.0)/t_k1
u_hat = (1.0 + beta)*u_k1 - beta*u_k
if iteration < self.maxitr - 1:
u_k = u_k1.copy()
return projector(_gradUpdate())
class Positivity(ProximalOperator):
"""Enforce positivity constraint on a complex variable's real & imaginary part."""
def __init__(self, positivity_real, positivity_imag, proximal_name = "Positivity"):
super().__init__(proximal_name)
self.real = positivity_real
self.imag = positivity_imag
def computeCost(self, x):
return None
def computeProx(self, x):
if type(x).__module__ == "arrayfire.array":
x = self._boundRealValue(af.real(x), 0, self.real) +\
1.0j * self._boundRealValue(af.imag(x), 0, self.imag)
else:
x = self._boundRealValue(np.real(x), 0, self.real) +\
1.0j * self._boundRealValue(np.imag(x), 0, self.imag)
return x
class Negativity(Positivity):
"""Enforce positivity constraint on a complex variable's real & imaginary part."""
def __init__(self, negativity_real, negativity_imag):
super().__init__(negativity_real, negativity_imag, "Negativity")
def computeProx(self, x):
return (-1.) * super().computeProx((-1.) * x)
class PureReal(ProximalOperator):
"""Enforce real constraint on a complex, imaginary part will be cleared"""
def __init__(self):
super().__init__("Pure real")
def computeCost(self, x):
return None
def computeProx(self, x):
if type(x).__module__ == "arrayfire.array":
x = af.real(x) + 1j*0.0
else:
x = np.real(x) + 1j*0.0
return x
class Pureimag(ProximalOperator):
"""Enforce imaginary constraint on a complex, real part will be cleared"""
def __init__(self):
super().__init__("Pure imaginary")
def computeCost(self, x):
return None
def computeProx(self, x):
if type(x).__module__ == "arrayfire.array":
x = 1j*af.imag(x)
else:
x = 1j*x.imag
return x
class Lasso(ProximalOperator):
"""||x||_1 regularizer, soft thresholding with certain parameter"""
def __init__(self, parameter):
super().__init__("LASSO")
self.setParameter(parameter)
def _softThreshold(self, x):
if type(x).__module__ == "arrayfire.array":
#POTENTIAL BUG: af.sign implementation does not agree with documentation
x = (af.sign(x)-0.5)*(-2.0) * (af.abs(x) - self.parameter) * (af.abs(x) > self.parameter)
else:
x = np.sign(x) * (np.abs(x) - self.parameter) * (np.abs(x) > self.parameter)
return x
def setParameter(self, parameter):
self.parameter = parameter
def computeCost(self, x):
return af.norm(af.moddims(x, np.prod(x.shape)), norm_type = af.NORM.VECTOR_1)
def computeProx(self, x):
if type(x).__module__ == "arrayfire.array":
x = self._softThreshold(af.real(x)) + 1.0j * self._softThreshold(af.imag(x))
else:
x = self._softThreshold(np.real(x)) + 1.0j * self._softThreshold(np.imag(x))
return x
#TODO: implement Tikhonov
class Tikhonov(ProximalOperator):
def __init__(self):
pass
def setParameter(self, parameter):
self.parameter = parameter
def computeCost(self, x):
pass
def computeProx(self, x):
return x
#TODO: implement pure amplitude constraint
class PureAmplitude(ProximalOperator):
def computeCost(self, x):
return None
def computeProx(self, x):
return x
#TODO: implement pure phase constraint
class PurePhase(ProximalOperator):
def computeCost(self, x):
return None
def computeProx(self, x):
return x
|
the-stack_0_15529 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Element handle module."""
import copy
import logging
import math
import os.path
from typing import Any, Dict, List, Optional, TYPE_CHECKING
from pyppeteer.connection import CDPSession
from pyppeteer.execution_context import ExecutionContext, JSHandle
from pyppeteer.errors import ElementHandleError, NetworkError
from pyppeteer.helper import debugError
from pyppeteer.util import merge_dict
if TYPE_CHECKING:
from pyppeteer.frame_manager import Frame, FrameManager # noqa: F401
logger = logging.getLogger(__name__)
class ElementHandle(JSHandle):
"""ElementHandle class.
This class represents an in-page DOM element. ElementHandle can be created
by the :meth:`pyppeteer.page.Page.querySelector` method.
ElementHandle prevents DOM element from garbage collection unless the
handle is disposed. ElementHandles are automatically disposed when their
origin frame gets navigated.
ElementHandle isinstance can be used as arguments in
:meth:`pyppeteer.page.Page.querySelectorEval` and
:meth:`pyppeteer.page.Page.evaluate` methods.
"""
def __init__(self, context: ExecutionContext, client: CDPSession,
remoteObject: dict, page: Any,
frameManager: 'FrameManager') -> None:
super().__init__(context, client, remoteObject)
self._client = client
self._remoteObject = remoteObject
self._page = page
self._frameManager = frameManager
self._disposed = False
def asElement(self) -> 'ElementHandle':
"""Return this ElementHandle."""
return self
async def contentFrame(self) -> Optional['Frame']:
"""Return the content frame for the element handle.
Return ``None`` if this handle is not referencing iframe.
"""
nodeInfo = await self._client.send('DOM.describeNode', {
'objectId': self._remoteObject.get('objectId'),
})
node_obj = nodeInfo.get('node', {})
if not isinstance(node_obj.get('frameId'), str):
return None
return self._frameManager.frame(node_obj['frameId'])
async def _scrollIntoViewIfNeeded(self) -> None:
error = await self.executionContext.evaluate('''
async element => {
if (!element.isConnected)
return 'Node is detached from document';
if (element.nodeType !== Node.ELEMENT_NODE)
return 'Node is not of type HTMLElement';
const visibleRatio = await new Promise(resolve => {
const observer = new IntersectionObserver(entries => {
resolve(entries[0].intersectionRatio);
observer.disconnect();
});
observer.observe(element);
});
if (visibleRatio !== 1.0)
element.scrollIntoView({
block: 'center',
inline: 'center',
behavior: 'instant',
});
return false;
}''', self)
if error:
raise ElementHandleError(error)
async def _clickablePoint(self) -> Dict[str, float]: # noqa: C901
result = None
try:
result = await self._client.send('DOM.getContentQuads', {
'objectId': self._remoteObject.get('objectId'),
})
except Exception as e:
debugError(logger, e)
if not result or not result.get('quads'):
raise ElementHandleError(
'Node is either not visible or not an HTMLElement')
quads = []
for _quad in result.get('quads'):
_q = self._fromProtocolQuad(_quad)
if _computeQuadArea(_q) > 1:
quads.append(_q)
if not quads:
raise ElementHandleError(
'Node is either not visible or not an HTMLElement')
quad = quads[0]
x = 0
y = 0
for point in quad:
x += point['x']
y += point['y']
return {'x': x / 4, 'y': y / 4}
async def _getBoxModel(self) -> Optional[Dict]:
try:
result: Optional[Dict] = await self._client.send(
'DOM.getBoxModel',
{'objectId': self._remoteObject.get('objectId')},
)
except NetworkError as e:
debugError(logger, e)
result = None
return result
def _fromProtocolQuad(self, quad: List[int]) -> List[Dict[str, int]]:
return [
{'x': quad[0], 'y': quad[1]},
{'x': quad[2], 'y': quad[3]},
{'x': quad[4], 'y': quad[5]},
{'x': quad[6], 'y': quad[7]},
]
async def hover(self) -> None:
"""Move mouse over to center of this element.
If needed, this method scrolls element into view. If this element is
detached from DOM tree, the method raises an ``ElementHandleError``.
"""
await self._scrollIntoViewIfNeeded()
obj = await self._clickablePoint()
x = obj.get('x', 0)
y = obj.get('y', 0)
await self._page.mouse.move(x, y)
async def click(self, options: dict = None, **kwargs: Any) -> None:
"""Click the center of this element.
If needed, this method scrolls element into view. If the element is
detached from DOM, the method raises ``ElementHandleError``.
``options`` can contain the following fields:
* ``button`` (str): ``left``, ``right``, of ``middle``, defaults to
``left``.
* ``clickCount`` (int): Defaults to 1.
* ``delay`` (int|float): Time to wait between ``mousedown`` and
``mouseup`` in milliseconds. Defaults to 0.
"""
options = merge_dict(options, kwargs)
await self._scrollIntoViewIfNeeded()
obj = await self._clickablePoint()
x = obj.get('x', 0)
y = obj.get('y', 0)
await self._page.mouse.click(x, y, options)
async def uploadFile(self, *filePaths: str) -> dict:
"""Upload files."""
files = [os.path.abspath(p) for p in filePaths]
objectId = self._remoteObject.get('objectId')
return await self._client.send(
'DOM.setFileInputFiles',
{'objectId': objectId, 'files': files}
)
async def tap(self) -> None:
"""Tap the center of this element.
If needed, this method scrolls element into view. If the element is
detached from DOM, the method raises ``ElementHandleError``.
"""
await self._scrollIntoViewIfNeeded()
center = await self._clickablePoint()
x = center.get('x', 0)
y = center.get('y', 0)
await self._page.touchscreen.tap(x, y)
async def focus(self) -> None:
"""Focus on this element."""
await self.executionContext.evaluate(
'element => element.focus()', self)
async def type(self, text: str, options: Dict = None, **kwargs: Any
) -> None:
"""Focus the element and then type text.
Details see :meth:`pyppeteer.input.Keyboard.type` method.
"""
options = merge_dict(options, kwargs)
await self.focus()
await self._page.keyboard.type(text, options)
async def press(self, key: str, options: Dict = None, **kwargs: Any
) -> None:
"""Press ``key`` onto the element.
This method focuses the element, and then uses
:meth:`pyppeteer.input.keyboard.down` and
:meth:`pyppeteer.input.keyboard.up`.
:arg str key: Name of key to press, such as ``ArrowLeft``.
This method accepts the following options:
* ``text`` (str): If specified, generates an input event with this
text.
* ``delay`` (int|float): Time to wait between ``keydown`` and
``keyup``. Defaults to 0.
"""
options = merge_dict(options, kwargs)
await self.focus()
await self._page.keyboard.press(key, options)
async def boundingBox(self) -> Optional[Dict[str, float]]:
"""Return bounding box of this element.
If the element is not visible, return ``None``.
This method returns dictionary of bounding box, which contains:
* ``x`` (int): The X coordinate of the element in pixels.
* ``y`` (int): The Y coordinate of the element in pixels.
* ``width`` (int): The width of the element in pixels.
* ``height`` (int): The height of the element in pixels.
"""
result = await self._getBoxModel()
if not result:
return None
quad = result['model']['border']
x = min(quad[0], quad[2], quad[4], quad[6])
y = min(quad[1], quad[3], quad[5], quad[7])
width = max(quad[0], quad[2], quad[4], quad[6]) - x
height = max(quad[1], quad[3], quad[5], quad[7]) - y
return {'x': x, 'y': y, 'width': width, 'height': height}
async def boxModel(self) -> Optional[Dict]:
"""Return boxes of element.
Return ``None`` if element is not visible. Boxes are represented as an
list of points; each Point is a dictionary ``{x, y}``. Box points are
sorted clock-wise.
Returned value is a dictionary with the following fields:
* ``content`` (List[Dict]): Content box.
* ``padding`` (List[Dict]): Padding box.
* ``border`` (List[Dict]): Border box.
* ``margin`` (List[Dict]): Margin box.
* ``width`` (int): Element's width.
* ``height`` (int): Element's height.
"""
result = await self._getBoxModel()
if not result:
return None
model = result.get('model', {})
return {
'content': self._fromProtocolQuad(model.get('content')),
'padding': self._fromProtocolQuad(model.get('padding')),
'border': self._fromProtocolQuad(model.get('border')),
'margin': self._fromProtocolQuad(model.get('margin')),
'width': model.get('width'),
'height': model.get('height'),
}
async def screenshot(self, options: Dict = None, **kwargs: Any) -> bytes:
"""Take a screenshot of this element.
If the element is detached from DOM, this method raises an
``ElementHandleError``.
Available options are same as :meth:`pyppeteer.page.Page.screenshot`.
"""
options = merge_dict(options, kwargs)
needsViewportReset = False
boundingBox = await self.boundingBox()
if not boundingBox:
raise ElementHandleError(
'Node is either not visible or not an HTMLElement')
original_viewport = copy.deepcopy(self._page.viewport)
if (boundingBox['width'] > original_viewport['width'] or
boundingBox['height'] > original_viewport['height']):
newViewport = {
'width': max(
original_viewport['width'],
math.ceil(boundingBox['width'])
),
'height': max(
original_viewport['height'],
math.ceil(boundingBox['height'])
),
}
new_viewport = copy.deepcopy(original_viewport)
new_viewport.update(newViewport)
await self._page.setViewport(new_viewport)
needsViewportReset = True
await self._scrollIntoViewIfNeeded()
boundingBox = await self.boundingBox()
if not boundingBox:
raise ElementHandleError(
'Node is either not visible or not an HTMLElement')
_obj = await self._client.send('Page.getLayoutMetrics')
pageX = _obj['layoutViewport']['pageX']
pageY = _obj['layoutViewport']['pageY']
clip = {}
clip.update(boundingBox)
clip['x'] = clip['x'] + pageX
clip['y'] = clip['y'] + pageY
opt = {'clip': clip}
opt.update(options)
imageData = await self._page.screenshot(opt)
if needsViewportReset:
await self._page.setViewport(original_viewport)
return imageData
async def querySelector(self, selector: str) -> Optional['ElementHandle']:
"""Return first element which matches ``selector`` under this element.
If no element matches the ``selector``, returns ``None``.
"""
handle = await self.executionContext.evaluateHandle(
'(element, selector) => element.querySelector(selector)',
self, selector,
)
element = handle.asElement()
if element:
return element
await handle.dispose()
return None
async def querySelectorAll(self, selector: str) -> List['ElementHandle']:
"""Return all elements which match ``selector`` under this element.
If no element matches the ``selector``, returns empty list (``[]``).
"""
arrayHandle = await self.executionContext.evaluateHandle(
'(element, selector) => element.querySelectorAll(selector)',
self, selector,
)
properties = await arrayHandle.getProperties()
await arrayHandle.dispose()
result = []
for prop in properties.values():
elementHandle = prop.asElement()
if elementHandle:
result.append(elementHandle)
return result # type: ignore
async def querySelectorEval(self, selector: str, pageFunction: str,
*args: Any) -> Any:
"""Run ``Page.querySelectorEval`` within the element.
This method runs ``document.querySelector`` within the element and
passes it as the first argument to ``pageFunction``. If there is no
element matching ``selector``, the method raises
``ElementHandleError``.
If ``pageFunction`` returns a promise, then wait for the promise to
resolve and return its value.
``ElementHandle.Jeval`` is a shortcut of this method.
Example:
.. code:: python
tweetHandle = await page.querySelector('.tweet')
assert (await tweetHandle.querySelectorEval('.like', 'node => node.innerText')) == 100
assert (await tweetHandle.Jeval('.retweets', 'node => node.innerText')) == 10
""" # noqa: E501
elementHandle = await self.querySelector(selector)
if not elementHandle:
raise ElementHandleError(
f'Error: failed to find element matching selector "{selector}"'
)
result = await self.executionContext.evaluate(
pageFunction, elementHandle, *args)
await elementHandle.dispose()
return result
async def querySelectorAllEval(self, selector: str, pageFunction: str,
*args: Any) -> Any:
"""Run ``Page.querySelectorAllEval`` within the element.
This method runs ``Array.from(document.querySelectorAll)`` within the
element and passes it as the first argument to ``pageFunction``. If
there is no element matching ``selector``, the method raises
``ElementHandleError``.
If ``pageFunction`` returns a promise, then wait for the promise to
resolve and return its value.
Example:
.. code:: html
<div class="feed">
<div class="tweet">Hello!</div>
<div class="tweet">Hi!</div>
</div>
.. code:: python
feedHandle = await page.J('.feed')
assert (await feedHandle.JJeval('.tweet', '(nodes => nodes.map(n => n.innerText))')) == ['Hello!', 'Hi!']
""" # noqa: E501
arrayHandle = await self.executionContext.evaluateHandle(
'(element, selector) => Array.from(element.querySelectorAll(selector))', # noqa: E501
self, selector
)
result = await self.executionContext.evaluate(
pageFunction, arrayHandle, *args)
await arrayHandle.dispose()
return result
#: alias to :meth:`querySelector`
J = querySelector
#: alias to :meth:`querySelectorAll`
JJ = querySelectorAll
#: alias to :meth:`querySelectorEval`
Jeval = querySelectorEval
#: alias to :meth:`querySelectorAllEval`
JJeval = querySelectorAllEval
async def xpath(self, expression: str) -> List['ElementHandle']:
"""Evaluate the XPath expression relative to this elementHandle.
If there are no such elements, return an empty list.
:arg str expression: XPath string to be evaluated.
"""
arrayHandle = await self.executionContext.evaluateHandle(
'''(element, expression) => {
const document = element.ownerDocument || element;
const iterator = document.evaluate(expression, element, null,
XPathResult.ORDERED_NODE_ITERATOR_TYPE);
const array = [];
let item;
while ((item = iterator.iterateNext()))
array.push(item);
return array;
}''', self, expression)
properties = await arrayHandle.getProperties()
await arrayHandle.dispose()
result = []
for property in properties.values():
elementHandle = property.asElement()
if elementHandle:
result.append(elementHandle)
return result
#: alias to :meth:`xpath`
Jx = xpath
async def isIntersectingViewport(self) -> bool:
"""Return ``True`` if the element is visible in the viewport."""
return await self.executionContext.evaluate('''async element => {
const visibleRatio = await new Promise(resolve => {
const observer = new IntersectionObserver(entries => {
resolve(entries[0].intersectionRatio);
observer.disconnect();
});
observer.observe(element);
});
return visibleRatio > 0;
}''', self)
def _computeQuadArea(quad: List[Dict]) -> float:
area = 0
for i, _ in enumerate(quad):
p1 = quad[i]
p2 = quad[(i + 1) % len(quad)]
area += (p1['x'] * p2['y'] - p2['x'] * p1['y']) / 2
return area
|
the-stack_0_15530 | from abc import ABC, abstractmethod
from collections import Counter
from functools import reduce
from re import split
from sys import version_info
import pandas as pd
from flashtext import KeywordProcessor
from scattertext.ScatterChart import check_topic_model_string_format
from scattertext.features.FeatsFromSpacyDoc import FeatsFromSpacyDoc
class FeatsFromTopicModelBase(ABC):
def __init__(self, topic_model):
self._topic_model = topic_model
self._lexicon_df = self._get_lexicon_df_from_topic_model(topic_model)
def _get_lexicon_df_from_topic_model(self, topic_model):
return (pd.DataFrame(pd.Series(topic_model)
.apply(pd.Series)
.reset_index())
.melt(id_vars=['index'])
[['index', 'value']]
.rename(columns={'index': 'cat', 'value': 'term'})
.set_index('term'))
def _analyze(self, doc):
text_df = (pd.DataFrame(pd.Series(self._get_terms_from_doc(doc)))
.join(self._lexicon_df)
.dropna()
.groupby('cat')
.sum())
return text_df
def get_doc_metadata(self, doc, prefix=''):
feature_counter = Counter()
if version_info[0] >= 3:
doc = str(doc)
for category, score in self._analyze(doc).to_dict()[0].items():
feature_counter[prefix + category] = int(score)
return feature_counter
@abstractmethod
def _get_terms_from_doc(self, doc):
pass
class FeatsFromTopicModel(FeatsFromTopicModelBase, FeatsFromSpacyDoc):
def __init__(self,
topic_model,
use_lemmas=False,
entity_types_to_censor=set(),
entity_types_to_use=None,
tag_types_to_censor=set(),
strip_final_period=False,
keyword_processor_args={'case_sensitive': False}):
self._keyword_processor = KeywordProcessor(**keyword_processor_args)
self._topic_model = topic_model.copy()
if keyword_processor_args.get('case_sensitive', None) is False:
for k, v in self._topic_model.items():
self._topic_model[k] = [e.lower() for e in v]
for keyphrase in reduce(lambda x, y: set(x) | set(y), self._topic_model.values()):
self._keyword_processor.add_keyword(keyphrase)
FeatsFromSpacyDoc.__init__(self, use_lemmas, entity_types_to_censor,
tag_types_to_censor, strip_final_period)
FeatsFromTopicModelBase.__init__(self, topic_model)
def get_top_model_term_lists(self):
return self._topic_model
def _get_terms_from_doc(self, doc):
return Counter(self._keyword_processor.extract_keywords(str(doc)))
def get_feats(self, doc):
return Counter(self._get_terms_from_doc(str(doc)))
"""
class FeatsFromTopicModel(FeatsFromSpacyDoc, FeatsFromTopicModelBase):
def __init__(self,
topic_model,
use_lemmas=False,
entity_types_to_censor=set(),
tag_types_to_censor=set(),
strip_final_period=False,
**kwargs):
'''
Parameters
----------
topic_model : dict
{topicmodelname: [term1, term2, ....], ...}
Other parameters from FeatsFromSpacyDoc.__init__
'''
check_topic_model_string_format(topic_model)
self._topic_model = topic_model
self._lexicon_df = self._get_lexicon_df_from_topic_model(topic_model)
super(FeatsFromTopicModel, self).__init__(use_lemmas,
entity_types_to_censor,
tag_types_to_censor,
strip_final_period)
def _get_terms_from_doc(self, doc):
return Counter(t for t in split(r"(\W)", doc.lower()) if t.strip())
def has_metadata_term_list(self):
return True
def get_top_model_term_lists(self):
return self._topic_model
"""
|
the-stack_0_15532 | # Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
import difflib
import six
import sys
import time
from packaging.version import Version
from mock import Mock, patch
from cassandra import AlreadyExists, SignatureDescriptor, UserFunctionDescriptor, UserAggregateDescriptor
from cassandra.cluster import Cluster
from cassandra.encoder import Encoder
from cassandra.metadata import (IndexMetadata, Token, murmur3, Function, Aggregate, protect_name, protect_names,
RegisteredTableExtension, _RegisteredExtensionType, get_schema_parser,
group_keys_by_replica, NO_VALID_REPLICA)
from tests.integration import (get_cluster, use_singledc, PROTOCOL_VERSION, execute_until_pass,
BasicSegregatedKeyspaceUnitTestCase, BasicSharedKeyspaceUnitTestCase,
BasicExistingKeyspaceUnitTestCase, drop_keyspace_shutdown_cluster, CASSANDRA_VERSION,
get_supported_protocol_versions, greaterthanorequalcass30, lessthancass30, local,
greaterthancass20)
from tests.integration import greaterthancass21
def setup_module():
use_singledc()
class HostMetatDataTests(BasicExistingKeyspaceUnitTestCase):
@local
def test_broadcast_listen_address(self):
"""
Check to ensure that the broadcast and listen adresss is populated correctly
@since 3.3
@jira_ticket PYTHON-332
@expected_result They are populated for C*> 2.1.6, 2.2.0
@test_category metadata
"""
# All nodes should have the broadcast_address set
for host in self.cluster.metadata.all_hosts():
self.assertIsNotNone(host.broadcast_address)
con = self.cluster.control_connection.get_connections()[0]
local_host = con.host
# The control connection node should have the listen address set.
listen_addrs = [host.listen_address for host in self.cluster.metadata.all_hosts()]
self.assertTrue(local_host in listen_addrs)
def test_host_release_version(self):
"""
Checks the hosts release version and validates that it is equal to the
Cassandra version we are using in our test harness.
@since 3.3
@jira_ticket PYTHON-301
@expected_result host.release version should match our specified Cassandra version.
@test_category metadata
"""
for host in self.cluster.metadata.all_hosts():
self.assertTrue(host.release_version.startswith(CASSANDRA_VERSION.base_version))
@local
class MetaDataRemovalTest(unittest.TestCase):
def setUp(self):
self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, contact_points=['127.0.0.1','127.0.0.2', '127.0.0.3', '126.0.0.186'])
self.cluster.connect()
def tearDown(self):
self.cluster.shutdown()
def test_bad_contact_point(self):
"""
Checks to ensure that hosts that are not resolvable are excluded from the contact point list.
@since 3.6
@jira_ticket PYTHON-549
@expected_result Invalid hosts on the contact list should be excluded
@test_category metadata
"""
self.assertEqual(len(self.cluster.metadata.all_hosts()), 3)
class SchemaMetadataTests(BasicSegregatedKeyspaceUnitTestCase):
def test_schema_metadata_disable(self):
"""
Checks to ensure that schema metadata_enabled, and token_metadata_enabled
flags work correctly.
@since 3.3
@jira_ticket PYTHON-327
@expected_result schema metadata will not be populated when schema_metadata_enabled is fause
token_metadata will be missing when token_metadata is set to false
@test_category metadata
"""
# Validate metadata is missing where appropriate
no_schema = Cluster(schema_metadata_enabled=False)
no_schema_session = no_schema.connect()
self.assertEqual(len(no_schema.metadata.keyspaces), 0)
self.assertEqual(no_schema.metadata.export_schema_as_string(), '')
no_token = Cluster(token_metadata_enabled=False)
no_token_session = no_token.connect()
self.assertEqual(len(no_token.metadata.token_map.token_to_host_owner), 0)
# Do a simple query to ensure queries are working
query = "SELECT * FROM system.local"
no_schema_rs = no_schema_session.execute(query)
no_token_rs = no_token_session.execute(query)
self.assertIsNotNone(no_schema_rs[0])
self.assertIsNotNone(no_token_rs[0])
no_schema.shutdown()
no_token.shutdown()
def make_create_statement(self, partition_cols, clustering_cols=None, other_cols=None):
clustering_cols = clustering_cols or []
other_cols = other_cols or []
statement = "CREATE TABLE %s.%s (" % (self.keyspace_name, self.function_table_name)
if len(partition_cols) == 1 and not clustering_cols:
statement += "%s text PRIMARY KEY, " % protect_name(partition_cols[0])
else:
statement += ", ".join("%s text" % protect_name(col) for col in partition_cols)
statement += ", "
statement += ", ".join("%s text" % protect_name(col) for col in clustering_cols + other_cols)
if len(partition_cols) != 1 or clustering_cols:
statement += ", PRIMARY KEY ("
if len(partition_cols) > 1:
statement += "(" + ", ".join(protect_names(partition_cols)) + ")"
else:
statement += protect_name(partition_cols[0])
if clustering_cols:
statement += ", "
statement += ", ".join(protect_names(clustering_cols))
statement += ")"
statement += ")"
return statement
def check_create_statement(self, tablemeta, original):
recreate = tablemeta.as_cql_query(formatted=False)
self.assertEqual(original, recreate[:len(original)])
execute_until_pass(self.session, "DROP TABLE {0}.{1}".format(self.keyspace_name, self.function_table_name))
execute_until_pass(self.session, recreate)
# create the table again, but with formatting enabled
execute_until_pass(self.session, "DROP TABLE {0}.{1}".format(self.keyspace_name, self.function_table_name))
recreate = tablemeta.as_cql_query(formatted=True)
execute_until_pass(self.session, recreate)
def get_table_metadata(self):
self.cluster.refresh_table_metadata(self.keyspace_name, self.function_table_name)
return self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name]
def test_basic_table_meta_properties(self):
create_statement = self.make_create_statement(["a"], [], ["b", "c"])
self.session.execute(create_statement)
self.cluster.refresh_schema_metadata()
meta = self.cluster.metadata
self.assertNotEqual(meta.cluster_name, None)
self.assertTrue(self.keyspace_name in meta.keyspaces)
ksmeta = meta.keyspaces[self.keyspace_name]
self.assertEqual(ksmeta.name, self.keyspace_name)
self.assertTrue(ksmeta.durable_writes)
self.assertEqual(ksmeta.replication_strategy.name, 'SimpleStrategy')
self.assertEqual(ksmeta.replication_strategy.replication_factor, 1)
self.assertTrue(self.function_table_name in ksmeta.tables)
tablemeta = ksmeta.tables[self.function_table_name]
self.assertEqual(tablemeta.keyspace_name, ksmeta.name)
self.assertEqual(tablemeta.name, self.function_table_name)
self.assertEqual(tablemeta.name, self.function_table_name)
self.assertEqual([u'a'], [c.name for c in tablemeta.partition_key])
self.assertEqual([], tablemeta.clustering_key)
self.assertEqual([u'a', u'b', u'c'], sorted(tablemeta.columns.keys()))
cc = self.cluster.control_connection._connection
parser = get_schema_parser(cc, CASSANDRA_VERSION.base_version, 1)
for option in tablemeta.options:
self.assertIn(option, parser.recognized_table_options)
self.check_create_statement(tablemeta, create_statement)
def test_compound_primary_keys(self):
create_statement = self.make_create_statement(["a"], ["b"], ["c"])
create_statement += " WITH CLUSTERING ORDER BY (b ASC)"
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.assertEqual([u'a'], [c.name for c in tablemeta.partition_key])
self.assertEqual([u'b'], [c.name for c in tablemeta.clustering_key])
self.assertEqual([u'a', u'b', u'c'], sorted(tablemeta.columns.keys()))
self.check_create_statement(tablemeta, create_statement)
def test_compound_primary_keys_protected(self):
create_statement = self.make_create_statement(["Aa"], ["Bb"], ["Cc"])
create_statement += ' WITH CLUSTERING ORDER BY ("Bb" ASC)'
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.assertEqual([u'Aa'], [c.name for c in tablemeta.partition_key])
self.assertEqual([u'Bb'], [c.name for c in tablemeta.clustering_key])
self.assertEqual([u'Aa', u'Bb', u'Cc'], sorted(tablemeta.columns.keys()))
self.check_create_statement(tablemeta, create_statement)
def test_compound_primary_keys_more_columns(self):
create_statement = self.make_create_statement(["a"], ["b", "c"], ["d", "e", "f"])
create_statement += " WITH CLUSTERING ORDER BY (b ASC, c ASC)"
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.assertEqual([u'a'], [c.name for c in tablemeta.partition_key])
self.assertEqual([u'b', u'c'], [c.name for c in tablemeta.clustering_key])
self.assertEqual(
[u'a', u'b', u'c', u'd', u'e', u'f'],
sorted(tablemeta.columns.keys()))
self.check_create_statement(tablemeta, create_statement)
def test_composite_primary_key(self):
create_statement = self.make_create_statement(["a", "b"], [], ["c"])
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.assertEqual([u'a', u'b'], [c.name for c in tablemeta.partition_key])
self.assertEqual([], tablemeta.clustering_key)
self.assertEqual([u'a', u'b', u'c'], sorted(tablemeta.columns.keys()))
self.check_create_statement(tablemeta, create_statement)
def test_composite_in_compound_primary_key(self):
create_statement = self.make_create_statement(["a", "b"], ["c"], ["d", "e"])
create_statement += " WITH CLUSTERING ORDER BY (c ASC)"
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.assertEqual([u'a', u'b'], [c.name for c in tablemeta.partition_key])
self.assertEqual([u'c'], [c.name for c in tablemeta.clustering_key])
self.assertEqual([u'a', u'b', u'c', u'd', u'e'], sorted(tablemeta.columns.keys()))
self.check_create_statement(tablemeta, create_statement)
def test_compound_primary_keys_compact(self):
create_statement = self.make_create_statement(["a"], ["b"], ["c"])
create_statement += " WITH CLUSTERING ORDER BY (b ASC)"
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.assertEqual([u'a'], [c.name for c in tablemeta.partition_key])
self.assertEqual([u'b'], [c.name for c in tablemeta.clustering_key])
self.assertEqual([u'a', u'b', u'c'], sorted(tablemeta.columns.keys()))
self.check_create_statement(tablemeta, create_statement)
def test_cluster_column_ordering_reversed_metadata(self):
"""
Simple test to ensure that the metatdata associated with cluster ordering is surfaced is surfaced correctly.
Creates a table with a few clustering keys. Then checks the clustering order associated with clustering columns
and ensure it's set correctly.
@since 3.0.0
@jira_ticket PYTHON-402
@expected_result is_reversed is set on DESC order, and is False on ASC
@test_category metadata
"""
create_statement = self.make_create_statement(["a"], ["b", "c"], ["d"])
create_statement += " WITH CLUSTERING ORDER BY (b ASC, c DESC)"
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
b_column = tablemeta.columns['b']
self.assertFalse(b_column.is_reversed)
c_column = tablemeta.columns['c']
self.assertTrue(c_column.is_reversed)
def test_compound_primary_keys_more_columns_compact(self):
create_statement = self.make_create_statement(["a"], ["b", "c"], ["d"])
create_statement += " WITH CLUSTERING ORDER BY (b ASC, c ASC)"
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.assertEqual([u'a'], [c.name for c in tablemeta.partition_key])
self.assertEqual([u'b', u'c'], [c.name for c in tablemeta.clustering_key])
self.assertEqual([u'a', u'b', u'c', u'd'], sorted(tablemeta.columns.keys()))
self.check_create_statement(tablemeta, create_statement)
def test_composite_primary_key_compact(self):
create_statement = self.make_create_statement(["a", "b"], [], ["c"])
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.assertEqual([u'a', u'b'], [c.name for c in tablemeta.partition_key])
self.assertEqual([], tablemeta.clustering_key)
self.assertEqual([u'a', u'b', u'c'], sorted(tablemeta.columns.keys()))
self.check_create_statement(tablemeta, create_statement)
def test_composite_in_compound_primary_key_compact(self):
create_statement = self.make_create_statement(["a", "b"], ["c"], ["d"])
create_statement += " WITH CLUSTERING ORDER BY (c ASC)"
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.assertEqual([u'a', u'b'], [c.name for c in tablemeta.partition_key])
self.assertEqual([u'c'], [c.name for c in tablemeta.clustering_key])
self.assertEqual([u'a', u'b', u'c', u'd'], sorted(tablemeta.columns.keys()))
self.check_create_statement(tablemeta, create_statement)
@lessthancass30
def test_cql_compatibility(self):
# having more than one non-PK column is okay if there aren't any
# clustering columns
create_statement = self.make_create_statement(["a"], [], ["b", "c", "d"])
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.assertEqual([u'a'], [c.name for c in tablemeta.partition_key])
self.assertEqual([], tablemeta.clustering_key)
self.assertEqual([u'a', u'b', u'c', u'd'], sorted(tablemeta.columns.keys()))
self.assertTrue(tablemeta.is_cql_compatible)
# It will be cql compatible after CASSANDRA-10857
# since compact storage is being dropped
tablemeta.clustering_key = ["foo", "bar"]
tablemeta.columns["foo"] = None
tablemeta.columns["bar"] = None
self.assertTrue(tablemeta.is_cql_compatible)
def test_compound_primary_keys_ordering(self):
create_statement = self.make_create_statement(["a"], ["b"], ["c"])
create_statement += " WITH CLUSTERING ORDER BY (b DESC)"
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.check_create_statement(tablemeta, create_statement)
def test_compound_primary_keys_more_columns_ordering(self):
create_statement = self.make_create_statement(["a"], ["b", "c"], ["d", "e", "f"])
create_statement += " WITH CLUSTERING ORDER BY (b DESC, c ASC)"
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.check_create_statement(tablemeta, create_statement)
def test_composite_in_compound_primary_key_ordering(self):
create_statement = self.make_create_statement(["a", "b"], ["c"], ["d", "e"])
create_statement += " WITH CLUSTERING ORDER BY (c DESC)"
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
self.check_create_statement(tablemeta, create_statement)
def test_indexes(self):
create_statement = self.make_create_statement(["a"], ["b", "c"], ["d", "e", "f"])
create_statement += " WITH CLUSTERING ORDER BY (b ASC, c ASC)"
execute_until_pass(self.session, create_statement)
d_index = "CREATE INDEX d_index ON %s.%s (d)" % (self.keyspace_name, self.function_table_name)
e_index = "CREATE INDEX e_index ON %s.%s (e)" % (self.keyspace_name, self.function_table_name)
execute_until_pass(self.session, d_index)
execute_until_pass(self.session, e_index)
tablemeta = self.get_table_metadata()
statements = tablemeta.export_as_string().strip()
statements = [s.strip() for s in statements.split(';')]
statements = list(filter(bool, statements))
self.assertEqual(3, len(statements))
self.assertIn(d_index, statements)
self.assertIn(e_index, statements)
# make sure indexes are included in KeyspaceMetadata.export_as_string()
ksmeta = self.cluster.metadata.keyspaces[self.keyspace_name]
statement = ksmeta.export_as_string()
self.assertIn('CREATE INDEX d_index', statement)
self.assertIn('CREATE INDEX e_index', statement)
@greaterthancass21
def test_collection_indexes(self):
self.session.execute("CREATE TABLE %s.%s (a int PRIMARY KEY, b map<text, text>)"
% (self.keyspace_name, self.function_table_name))
self.session.execute("CREATE INDEX index1 ON %s.%s (keys(b))"
% (self.keyspace_name, self.function_table_name))
tablemeta = self.get_table_metadata()
self.assertIn('(keys(b))', tablemeta.export_as_string())
self.session.execute("DROP INDEX %s.index1" % (self.keyspace_name,))
self.session.execute("CREATE INDEX index2 ON %s.%s (b)"
% (self.keyspace_name, self.function_table_name))
tablemeta = self.get_table_metadata()
target = ' (b)' if CASSANDRA_VERSION < Version("3.0") else 'values(b))' # explicit values in C* 3+
self.assertIn(target, tablemeta.export_as_string())
# test full indexes on frozen collections, if available
if CASSANDRA_VERSION >= Version("2.1.3"):
self.session.execute("DROP TABLE %s.%s" % (self.keyspace_name, self.function_table_name))
self.session.execute("CREATE TABLE %s.%s (a int PRIMARY KEY, b frozen<map<text, text>>)"
% (self.keyspace_name, self.function_table_name))
self.session.execute("CREATE INDEX index3 ON %s.%s (full(b))"
% (self.keyspace_name, self.function_table_name))
tablemeta = self.get_table_metadata()
self.assertIn('(full(b))', tablemeta.export_as_string())
def test_compression_disabled(self):
create_statement = self.make_create_statement(["a"], ["b"], ["c"])
create_statement += " WITH compression = {}"
self.session.execute(create_statement)
tablemeta = self.get_table_metadata()
expected = "compression = {}" if CASSANDRA_VERSION < Version("3.0") else "compression = {'enabled': 'false'}"
self.assertIn(expected, tablemeta.export_as_string())
def test_non_size_tiered_compaction(self):
"""
test options for non-size-tiered compaction strategy
Creates a table with LeveledCompactionStrategy, specifying one non-default option. Verifies that the option is
present in generated CQL, and that other legacy table parameters (min_threshold, max_threshold) are not included.
@since 2.6.0
@jira_ticket PYTHON-352
@expected_result the options map for LeveledCompactionStrategy does not contain min_threshold, max_threshold
@test_category metadata
"""
create_statement = self.make_create_statement(["a"], [], ["b", "c"])
create_statement += "WITH COMPACTION = {'class': 'LeveledCompactionStrategy', 'tombstone_threshold': '0.3'}"
self.session.execute(create_statement)
table_meta = self.get_table_metadata()
cql = table_meta.export_as_string()
self.assertIn("'tombstone_threshold': '0.3'", cql)
self.assertIn("LeveledCompactionStrategy", cql)
self.assertNotIn("min_threshold", cql)
self.assertNotIn("max_threshold", cql)
def test_refresh_schema_metadata(self):
"""
test for synchronously refreshing all cluster metadata
test_refresh_schema_metadata tests all cluster metadata is refreshed when calling refresh_schema_metadata().
It creates a second cluster object with schema_event_refresh_window=-1 such that schema refreshes are disabled
for schema change push events. It then alters the cluster, creating a new keyspace, using the first cluster
object, and verifies that the cluster metadata has not changed in the second cluster object. It then calls
refresh_schema_metadata() and verifies that the cluster metadata is updated in the second cluster object.
Similarly, it then proceeds to altering keyspace, table, UDT, UDF, and UDA metadata and subsequently verfies
that these metadata is updated when refresh_schema_metadata() is called.
@since 2.6.0
@jira_ticket PYTHON-291
@expected_result Cluster, keyspace, table, UDT, UDF, and UDA metadata should be refreshed when refresh_schema_metadata() is called.
@test_category metadata
"""
cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1)
cluster2.connect()
self.assertNotIn("new_keyspace", cluster2.metadata.keyspaces)
# Cluster metadata modification
self.session.execute("CREATE KEYSPACE new_keyspace WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}")
self.assertNotIn("new_keyspace", cluster2.metadata.keyspaces)
cluster2.refresh_schema_metadata()
self.assertIn("new_keyspace", cluster2.metadata.keyspaces)
# Keyspace metadata modification
self.session.execute("ALTER KEYSPACE {0} WITH durable_writes = false".format(self.keyspace_name))
self.assertTrue(cluster2.metadata.keyspaces[self.keyspace_name].durable_writes)
cluster2.refresh_schema_metadata()
self.assertFalse(cluster2.metadata.keyspaces[self.keyspace_name].durable_writes)
# Table metadata modification
table_name = "test"
self.session.execute("CREATE TABLE {0}.{1} (a int PRIMARY KEY, b text)".format(self.keyspace_name, table_name))
cluster2.refresh_schema_metadata()
self.session.execute("ALTER TABLE {0}.{1} ADD c double".format(self.keyspace_name, table_name))
self.assertNotIn("c", cluster2.metadata.keyspaces[self.keyspace_name].tables[table_name].columns)
cluster2.refresh_schema_metadata()
self.assertIn("c", cluster2.metadata.keyspaces[self.keyspace_name].tables[table_name].columns)
if PROTOCOL_VERSION >= 3:
# UDT metadata modification
self.session.execute("CREATE TYPE {0}.user (age int, name text)".format(self.keyspace_name))
self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].user_types, {})
cluster2.refresh_schema_metadata()
self.assertIn("user", cluster2.metadata.keyspaces[self.keyspace_name].user_types)
if PROTOCOL_VERSION >= 4:
# UDF metadata modification
self.session.execute("""CREATE FUNCTION {0}.sum_int(key int, val int)
RETURNS NULL ON NULL INPUT
RETURNS int
LANGUAGE javascript AS 'key + val';""".format(self.keyspace_name))
self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].functions, {})
cluster2.refresh_schema_metadata()
self.assertIn("sum_int(int,int)", cluster2.metadata.keyspaces[self.keyspace_name].functions)
# UDA metadata modification
self.session.execute("""CREATE AGGREGATE {0}.sum_agg(int)
SFUNC sum_int
STYPE int
INITCOND 0"""
.format(self.keyspace_name))
self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].aggregates, {})
cluster2.refresh_schema_metadata()
self.assertIn("sum_agg(int)", cluster2.metadata.keyspaces[self.keyspace_name].aggregates)
# Cluster metadata modification
self.session.execute("DROP KEYSPACE new_keyspace")
self.assertIn("new_keyspace", cluster2.metadata.keyspaces)
cluster2.refresh_schema_metadata()
self.assertNotIn("new_keyspace", cluster2.metadata.keyspaces)
cluster2.shutdown()
def test_refresh_keyspace_metadata(self):
"""
test for synchronously refreshing keyspace metadata
test_refresh_keyspace_metadata tests that keyspace metadata is refreshed when calling refresh_keyspace_metadata().
It creates a second cluster object with schema_event_refresh_window=-1 such that schema refreshes are disabled
for schema change push events. It then alters the keyspace, disabling durable_writes, using the first cluster
object, and verifies that the keyspace metadata has not changed in the second cluster object. Finally, it calls
refresh_keyspace_metadata() and verifies that the keyspace metadata is updated in the second cluster object.
@since 2.6.0
@jira_ticket PYTHON-291
@expected_result Keyspace metadata should be refreshed when refresh_keyspace_metadata() is called.
@test_category metadata
"""
cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1)
cluster2.connect()
self.assertTrue(cluster2.metadata.keyspaces[self.keyspace_name].durable_writes)
self.session.execute("ALTER KEYSPACE {0} WITH durable_writes = false".format(self.keyspace_name))
self.assertTrue(cluster2.metadata.keyspaces[self.keyspace_name].durable_writes)
cluster2.refresh_keyspace_metadata(self.keyspace_name)
self.assertFalse(cluster2.metadata.keyspaces[self.keyspace_name].durable_writes)
cluster2.shutdown()
def test_refresh_table_metadata(self):
"""
test for synchronously refreshing table metadata
test_refresh_table_metatadata tests that table metadata is refreshed when calling test_refresh_table_metatadata().
It creates a second cluster object with schema_event_refresh_window=-1 such that schema refreshes are disabled
for schema change push events. It then alters the table, adding a new column, using the first cluster
object, and verifies that the table metadata has not changed in the second cluster object. Finally, it calls
test_refresh_table_metatadata() and verifies that the table metadata is updated in the second cluster object.
@since 2.6.0
@jira_ticket PYTHON-291
@expected_result Table metadata should be refreshed when refresh_table_metadata() is called.
@test_category metadata
"""
table_name = "test"
self.session.execute("CREATE TABLE {0}.{1} (a int PRIMARY KEY, b text)".format(self.keyspace_name, table_name))
cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1)
cluster2.connect()
self.assertNotIn("c", cluster2.metadata.keyspaces[self.keyspace_name].tables[table_name].columns)
self.session.execute("ALTER TABLE {0}.{1} ADD c double".format(self.keyspace_name, table_name))
self.assertNotIn("c", cluster2.metadata.keyspaces[self.keyspace_name].tables[table_name].columns)
cluster2.refresh_table_metadata(self.keyspace_name, table_name)
self.assertIn("c", cluster2.metadata.keyspaces[self.keyspace_name].tables[table_name].columns)
cluster2.shutdown()
@greaterthanorequalcass30
def test_refresh_metadata_for_mv(self):
"""
test for synchronously refreshing materialized view metadata
test_refresh_table_metadata_for_materialized_views tests that materialized view metadata is refreshed when calling
test_refresh_table_metatadata() with the materialized view name as the table. It creates a second cluster object
with schema_event_refresh_window=-1 such that schema refreshes are disabled for schema change push events.
It then creates a new materialized view , using the first cluster object, and verifies that the materialized view
metadata has not changed in the second cluster object. Finally, it calls test_refresh_table_metatadata() with the
materialized view name as the table name, and verifies that the materialized view metadata is updated in the
second cluster object.
@since 3.0.0
@jira_ticket PYTHON-371
@expected_result Materialized view metadata should be refreshed when refresh_table_metadata() is called.
@test_category metadata
"""
self.session.execute("CREATE TABLE {0}.{1} (a int PRIMARY KEY, b text)".format(self.keyspace_name, self.function_table_name))
cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1)
cluster2.connect()
try:
self.assertNotIn("mv1", cluster2.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views)
self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT b FROM {0}.{1} WHERE b IS NOT NULL PRIMARY KEY (a, b)"
.format(self.keyspace_name, self.function_table_name))
self.assertNotIn("mv1", cluster2.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views)
cluster2.refresh_table_metadata(self.keyspace_name, "mv1")
self.assertIn("mv1", cluster2.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views)
finally:
cluster2.shutdown()
original_meta = self.cluster.metadata.keyspaces[self.keyspace_name].views['mv1']
self.assertIs(original_meta, self.session.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views['mv1'])
self.cluster.refresh_materialized_view_metadata(self.keyspace_name, 'mv1')
current_meta = self.cluster.metadata.keyspaces[self.keyspace_name].views['mv1']
self.assertIsNot(current_meta, original_meta)
self.assertIsNot(original_meta, self.session.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views['mv1'])
self.assertEqual(original_meta.as_cql_query(), current_meta.as_cql_query())
cluster3 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1)
cluster3.connect()
try:
self.assertNotIn("mv2", cluster3.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views)
self.session.execute("CREATE MATERIALIZED VIEW {0}.mv2 AS SELECT b FROM {0}.{1} WHERE b IS NOT NULL PRIMARY KEY (a, b)"
.format(self.keyspace_name, self.function_table_name))
self.assertNotIn("mv2", cluster3.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views)
cluster3.refresh_materialized_view_metadata(self.keyspace_name, 'mv2')
self.assertIn("mv2", cluster3.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views)
finally:
cluster3.shutdown()
def test_refresh_user_type_metadata(self):
"""
test for synchronously refreshing UDT metadata in keyspace
test_refresh_user_type_metadata tests that UDT metadata in a keyspace is refreshed when calling refresh_user_type_metadata().
It creates a second cluster object with schema_event_refresh_window=-1 such that schema refreshes are disabled
for schema change push events. It then alters the keyspace, creating a new UDT, using the first cluster
object, and verifies that the UDT metadata has not changed in the second cluster object. Finally, it calls
refresh_user_type_metadata() and verifies that the UDT metadata in the keyspace is updated in the second cluster object.
@since 2.6.0
@jira_ticket PYTHON-291
@expected_result UDT metadata in the keyspace should be refreshed when refresh_user_type_metadata() is called.
@test_category metadata
"""
if PROTOCOL_VERSION < 3:
raise unittest.SkipTest("Protocol 3+ is required for UDTs, currently testing against {0}".format(PROTOCOL_VERSION))
cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1)
cluster2.connect()
self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].user_types, {})
self.session.execute("CREATE TYPE {0}.user (age int, name text)".format(self.keyspace_name))
self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].user_types, {})
cluster2.refresh_user_type_metadata(self.keyspace_name, "user")
self.assertIn("user", cluster2.metadata.keyspaces[self.keyspace_name].user_types)
cluster2.shutdown()
@greaterthancass20
def test_refresh_user_type_metadata_proto_2(self):
"""
Test to insure that protocol v1/v2 surface UDT metadata changes
@since 3.7.0
@jira_ticket PYTHON-106
@expected_result UDT metadata in the keyspace should be updated regardless of protocol version
@test_category metadata
"""
supported_versions = get_supported_protocol_versions()
if 2 not in supported_versions: # 1 and 2 were dropped in the same version
raise unittest.SkipTest("Protocol versions 1 and 2 are not supported in Cassandra version ".format(CASSANDRA_VERSION))
for protocol_version in (1, 2):
cluster = Cluster(protocol_version=protocol_version)
session = cluster.connect()
self.assertEqual(cluster.metadata.keyspaces[self.keyspace_name].user_types, {})
session.execute("CREATE TYPE {0}.user (age int, name text)".format(self.keyspace_name))
self.assertIn("user", cluster.metadata.keyspaces[self.keyspace_name].user_types)
self.assertIn("age", cluster.metadata.keyspaces[self.keyspace_name].user_types["user"].field_names)
self.assertIn("name", cluster.metadata.keyspaces[self.keyspace_name].user_types["user"].field_names)
session.execute("ALTER TYPE {0}.user ADD flag boolean".format(self.keyspace_name))
self.assertIn("flag", cluster.metadata.keyspaces[self.keyspace_name].user_types["user"].field_names)
session.execute("ALTER TYPE {0}.user RENAME flag TO something".format(self.keyspace_name))
self.assertIn("something", cluster.metadata.keyspaces[self.keyspace_name].user_types["user"].field_names)
session.execute("DROP TYPE {0}.user".format(self.keyspace_name))
self.assertEqual(cluster.metadata.keyspaces[self.keyspace_name].user_types, {})
cluster.shutdown()
def test_refresh_user_function_metadata(self):
"""
test for synchronously refreshing UDF metadata in keyspace
test_refresh_user_function_metadata tests that UDF metadata in a keyspace is refreshed when calling
refresh_user_function_metadata(). It creates a second cluster object with schema_event_refresh_window=-1 such
that schema refreshes are disabled for schema change push events. It then alters the keyspace, creating a new
UDF, using the first cluster object, and verifies that the UDF metadata has not changed in the second cluster
object. Finally, it calls refresh_user_function_metadata() and verifies that the UDF metadata in the keyspace
is updated in the second cluster object.
@since 2.6.0
@jira_ticket PYTHON-291
@expected_result UDF metadata in the keyspace should be refreshed when refresh_user_function_metadata() is called.
@test_category metadata
"""
if PROTOCOL_VERSION < 4:
raise unittest.SkipTest("Protocol 4+ is required for UDFs, currently testing against {0}".format(PROTOCOL_VERSION))
cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1)
cluster2.connect()
self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].functions, {})
self.session.execute("""CREATE FUNCTION {0}.sum_int(key int, val int)
RETURNS NULL ON NULL INPUT
RETURNS int
LANGUAGE javascript AS 'key + val';""".format(self.keyspace_name))
self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].functions, {})
cluster2.refresh_user_function_metadata(self.keyspace_name, UserFunctionDescriptor("sum_int", ["int", "int"]))
self.assertIn("sum_int(int,int)", cluster2.metadata.keyspaces[self.keyspace_name].functions)
cluster2.shutdown()
def test_refresh_user_aggregate_metadata(self):
"""
test for synchronously refreshing UDA metadata in keyspace
test_refresh_user_aggregate_metadata tests that UDA metadata in a keyspace is refreshed when calling
refresh_user_aggregate_metadata(). It creates a second cluster object with schema_event_refresh_window=-1 such
that schema refreshes are disabled for schema change push events. It then alters the keyspace, creating a new
UDA, using the first cluster object, and verifies that the UDA metadata has not changed in the second cluster
object. Finally, it calls refresh_user_aggregate_metadata() and verifies that the UDF metadata in the keyspace
is updated in the second cluster object.
@since 2.6.0
@jira_ticket PYTHON-291
@expected_result UDA metadata in the keyspace should be refreshed when refresh_user_aggregate_metadata() is called.
@test_category metadata
"""
if PROTOCOL_VERSION < 4:
raise unittest.SkipTest("Protocol 4+ is required for UDAs, currently testing against {0}".format(PROTOCOL_VERSION))
cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=-1)
cluster2.connect()
self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].aggregates, {})
self.session.execute("""CREATE FUNCTION {0}.sum_int(key int, val int)
RETURNS NULL ON NULL INPUT
RETURNS int
LANGUAGE javascript AS 'key + val';""".format(self.keyspace_name))
self.session.execute("""CREATE AGGREGATE {0}.sum_agg(int)
SFUNC sum_int
STYPE int
INITCOND 0"""
.format(self.keyspace_name))
self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].aggregates, {})
cluster2.refresh_user_aggregate_metadata(self.keyspace_name, UserAggregateDescriptor("sum_agg", ["int"]))
self.assertIn("sum_agg(int)", cluster2.metadata.keyspaces[self.keyspace_name].aggregates)
cluster2.shutdown()
@greaterthanorequalcass30
def test_multiple_indices(self):
"""
test multiple indices on the same column.
Creates a table and two indices. Ensures that both indices metatdata is surface appropriately.
@since 3.0.0
@jira_ticket PYTHON-276
@expected_result IndexMetadata is appropriately surfaced
@test_category metadata
"""
self.session.execute("CREATE TABLE {0}.{1} (a int PRIMARY KEY, b map<text, int>)".format(self.keyspace_name, self.function_table_name))
self.session.execute("CREATE INDEX index_1 ON {0}.{1}(b)".format(self.keyspace_name, self.function_table_name))
self.session.execute("CREATE INDEX index_2 ON {0}.{1}(keys(b))".format(self.keyspace_name, self.function_table_name))
indices = self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].indexes
self.assertEqual(len(indices), 2)
index_1 = indices["index_1"]
index_2 = indices['index_2']
self.assertEqual(index_1.table_name, "test_multiple_indices")
self.assertEqual(index_1.name, "index_1")
self.assertEqual(index_1.kind, "COMPOSITES")
self.assertEqual(index_1.index_options["target"], "values(b)")
self.assertEqual(index_1.keyspace_name, "schemametadatatests")
self.assertEqual(index_2.table_name, "test_multiple_indices")
self.assertEqual(index_2.name, "index_2")
self.assertEqual(index_2.kind, "COMPOSITES")
self.assertEqual(index_2.index_options["target"], "keys(b)")
self.assertEqual(index_2.keyspace_name, "schemametadatatests")
@greaterthanorequalcass30
def test_table_extensions(self):
s = self.session
ks = self.keyspace_name
ks_meta = s.cluster.metadata.keyspaces[ks]
t = self.function_table_name
v = t + 'view'
s.execute("CREATE TABLE %s.%s (k text PRIMARY KEY, v int)" % (ks, t))
s.execute("CREATE MATERIALIZED VIEW %s.%s AS SELECT * FROM %s.%s WHERE v IS NOT NULL PRIMARY KEY (v, k)" % (ks, v, ks, t))
table_meta = ks_meta.tables[t]
view_meta = table_meta.views[v]
self.assertFalse(table_meta.extensions)
self.assertFalse(view_meta.extensions)
original_table_cql = table_meta.export_as_string()
original_view_cql = view_meta.export_as_string()
# extensions registered, not present
# --------------------------------------
class Ext0(RegisteredTableExtension):
name = t
@classmethod
def after_table_cql(cls, table_meta, ext_key, ext_blob):
return "%s %s %s %s" % (cls.name, table_meta.name, ext_key, ext_blob)
class Ext1(Ext0):
name = t + '##'
self.assertFalse(table_meta.extensions)
self.assertFalse(view_meta.extensions)
self.assertIn(Ext0.name, _RegisteredExtensionType._extension_registry)
self.assertIn(Ext1.name, _RegisteredExtensionType._extension_registry)
self.assertEqual(len(_RegisteredExtensionType._extension_registry), 2)
self.cluster.refresh_table_metadata(ks, t)
table_meta = ks_meta.tables[t]
view_meta = table_meta.views[v]
self.assertEqual(table_meta.export_as_string(), original_table_cql)
self.assertEqual(view_meta.export_as_string(), original_view_cql)
update_t = s.prepare('UPDATE system_schema.tables SET extensions=? WHERE keyspace_name=? AND table_name=?') # for blob type coercing
update_v = s.prepare('UPDATE system_schema.views SET extensions=? WHERE keyspace_name=? AND view_name=?')
# extensions registered, one present
# --------------------------------------
ext_map = {Ext0.name: six.b("THA VALUE")}
[(s.execute(update_t, (ext_map, ks, t)), s.execute(update_v, (ext_map, ks, v)))
for _ in self.cluster.metadata.all_hosts()] # we're manipulating metadata - do it on all hosts
self.cluster.refresh_table_metadata(ks, t)
self.cluster.refresh_materialized_view_metadata(ks, v)
table_meta = ks_meta.tables[t]
view_meta = table_meta.views[v]
self.assertIn(Ext0.name, table_meta.extensions)
new_cql = table_meta.export_as_string()
self.assertNotEqual(new_cql, original_table_cql)
self.assertIn(Ext0.after_table_cql(table_meta, Ext0.name, ext_map[Ext0.name]), new_cql)
self.assertNotIn(Ext1.name, new_cql)
self.assertIn(Ext0.name, view_meta.extensions)
new_cql = view_meta.export_as_string()
self.assertNotEqual(new_cql, original_view_cql)
self.assertIn(Ext0.after_table_cql(view_meta, Ext0.name, ext_map[Ext0.name]), new_cql)
self.assertNotIn(Ext1.name, new_cql)
# extensions registered, one present
# --------------------------------------
ext_map = {Ext0.name: six.b("THA VALUE"),
Ext1.name: six.b("OTHA VALUE")}
[(s.execute(update_t, (ext_map, ks, t)), s.execute(update_v, (ext_map, ks, v)))
for _ in self.cluster.metadata.all_hosts()] # we're manipulating metadata - do it on all hosts
self.cluster.refresh_table_metadata(ks, t)
self.cluster.refresh_materialized_view_metadata(ks, v)
table_meta = ks_meta.tables[t]
view_meta = table_meta.views[v]
self.assertIn(Ext0.name, table_meta.extensions)
self.assertIn(Ext1.name, table_meta.extensions)
new_cql = table_meta.export_as_string()
self.assertNotEqual(new_cql, original_table_cql)
self.assertIn(Ext0.after_table_cql(table_meta, Ext0.name, ext_map[Ext0.name]), new_cql)
self.assertIn(Ext1.after_table_cql(table_meta, Ext1.name, ext_map[Ext1.name]), new_cql)
self.assertIn(Ext0.name, view_meta.extensions)
self.assertIn(Ext1.name, view_meta.extensions)
new_cql = view_meta.export_as_string()
self.assertNotEqual(new_cql, original_view_cql)
self.assertIn(Ext0.after_table_cql(view_meta, Ext0.name, ext_map[Ext0.name]), new_cql)
self.assertIn(Ext1.after_table_cql(view_meta, Ext1.name, ext_map[Ext1.name]), new_cql)
class TestCodeCoverage(unittest.TestCase):
def test_export_schema(self):
"""
Test export schema functionality
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cluster.connect()
self.assertIsInstance(cluster.metadata.export_schema_as_string(), six.string_types)
cluster.shutdown()
def test_export_keyspace_schema(self):
"""
Test export keyspace schema functionality
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cluster.connect()
for keyspace in cluster.metadata.keyspaces:
keyspace_metadata = cluster.metadata.keyspaces[keyspace]
self.assertIsInstance(keyspace_metadata.export_as_string(), six.string_types)
self.assertIsInstance(keyspace_metadata.as_cql_query(), six.string_types)
cluster.shutdown()
def assert_equal_diff(self, received, expected):
if received != expected:
diff_string = '\n'.join(difflib.unified_diff(expected.split('\n'),
received.split('\n'),
'EXPECTED', 'RECEIVED',
lineterm=''))
self.fail(diff_string)
def assert_startswith_diff(self, received, prefix):
if not received.startswith(prefix):
prefix_lines = prefix.split('\n')
diff_string = '\n'.join(difflib.unified_diff(prefix_lines,
received.split('\n')[:len(prefix_lines)],
'EXPECTED', 'RECEIVED',
lineterm=''))
self.fail(diff_string)
@greaterthancass20
def test_export_keyspace_schema_udts(self):
"""
Test udt exports
"""
if PROTOCOL_VERSION < 3:
raise unittest.SkipTest(
"Protocol 3.0+ is required for UDT change events, currently testing against %r"
% (PROTOCOL_VERSION,))
if sys.version_info[0:2] != (2, 7):
raise unittest.SkipTest('This test compares static strings generated from dict items, which may change orders. Test with 2.7.')
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
session.execute("""
CREATE KEYSPACE export_udts
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}
AND durable_writes = true;
""")
session.execute("""
CREATE TYPE export_udts.street (
street_number int,
street_name text)
""")
session.execute("""
CREATE TYPE export_udts.zip (
zipcode int,
zip_plus_4 int)
""")
session.execute("""
CREATE TYPE export_udts.address (
street_address frozen<street>,
zip_code frozen<zip>)
""")
session.execute("""
CREATE TABLE export_udts.users (
user text PRIMARY KEY,
addresses map<text, frozen<address>>)
""")
expected_prefix = """CREATE KEYSPACE export_udts WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} AND durable_writes = true;
CREATE TYPE export_udts.street (
street_number int,
street_name text
);
CREATE TYPE export_udts.zip (
zipcode int,
zip_plus_4 int
);
CREATE TYPE export_udts.address (
street_address frozen<street>,
zip_code frozen<zip>
);
CREATE TABLE export_udts.users (
user text PRIMARY KEY,
addresses map<text, frozen<address>>"""
self.assert_startswith_diff(cluster.metadata.keyspaces['export_udts'].export_as_string(), expected_prefix)
table_meta = cluster.metadata.keyspaces['export_udts'].tables['users']
expected_prefix = """CREATE TABLE export_udts.users (
user text PRIMARY KEY,
addresses map<text, frozen<address>>"""
self.assert_startswith_diff(table_meta.export_as_string(), expected_prefix)
cluster.shutdown()
@greaterthancass21
def test_case_sensitivity(self):
"""
Test that names that need to be escaped in CREATE statements are
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
ksname = 'AnInterestingKeyspace'
cfname = 'AnInterestingTable'
session.execute("DROP KEYSPACE IF EXISTS {0}".format(ksname))
session.execute("""
CREATE KEYSPACE "%s"
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}
""" % (ksname,))
session.execute("""
CREATE TABLE "%s"."%s" (
k int,
"A" int,
"B" int,
"MyColumn" int,
PRIMARY KEY (k, "A"))
WITH CLUSTERING ORDER BY ("A" DESC)
""" % (ksname, cfname))
session.execute("""
CREATE INDEX myindex ON "%s"."%s" ("MyColumn")
""" % (ksname, cfname))
session.execute("""
CREATE INDEX "AnotherIndex" ON "%s"."%s" ("B")
""" % (ksname, cfname))
ksmeta = cluster.metadata.keyspaces[ksname]
schema = ksmeta.export_as_string()
self.assertIn('CREATE KEYSPACE "AnInterestingKeyspace"', schema)
self.assertIn('CREATE TABLE "AnInterestingKeyspace"."AnInterestingTable"', schema)
self.assertIn('"A" int', schema)
self.assertIn('"B" int', schema)
self.assertIn('"MyColumn" int', schema)
self.assertIn('PRIMARY KEY (k, "A")', schema)
self.assertIn('WITH CLUSTERING ORDER BY ("A" DESC)', schema)
self.assertIn('CREATE INDEX myindex ON "AnInterestingKeyspace"."AnInterestingTable" ("MyColumn")', schema)
self.assertIn('CREATE INDEX "AnotherIndex" ON "AnInterestingKeyspace"."AnInterestingTable" ("B")', schema)
cluster.shutdown()
def test_already_exists_exceptions(self):
"""
Ensure AlreadyExists exception is thrown when hit
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
ksname = 'test3rf'
cfname = 'test'
ddl = '''
CREATE KEYSPACE %s
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '3'}'''
self.assertRaises(AlreadyExists, session.execute, ddl % ksname)
ddl = '''
CREATE TABLE %s.%s (
k int PRIMARY KEY,
v int )'''
self.assertRaises(AlreadyExists, session.execute, ddl % (ksname, cfname))
cluster.shutdown()
@local
def test_replicas(self):
"""
Ensure cluster.metadata.get_replicas return correctly when not attached to keyspace
"""
if murmur3 is None:
raise unittest.SkipTest('the murmur3 extension is not available')
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
self.assertEqual(cluster.metadata.get_replicas('test3rf', 'key'), [])
cluster.connect('test3rf')
self.assertNotEqual(list(cluster.metadata.get_replicas('test3rf', six.b('key'))), [])
host = list(cluster.metadata.get_replicas('test3rf', six.b('key')))[0]
self.assertEqual(host.datacenter, 'dc1')
self.assertEqual(host.rack, 'r1')
cluster.shutdown()
def test_token_map(self):
"""
Test token mappings
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cluster.connect('test3rf')
ring = cluster.metadata.token_map.ring
owners = list(cluster.metadata.token_map.token_to_host_owner[token] for token in ring)
get_replicas = cluster.metadata.token_map.get_replicas
for ksname in ('test1rf', 'test2rf', 'test3rf'):
self.assertNotEqual(list(get_replicas(ksname, ring[0])), [])
for i, token in enumerate(ring):
self.assertEqual(set(get_replicas('test3rf', token)), set(owners))
self.assertEqual(set(get_replicas('test2rf', token)), set([owners[i], owners[(i + 1) % 3]]))
self.assertEqual(set(get_replicas('test1rf', token)), set([owners[i]]))
cluster.shutdown()
class TokenMetadataTest(unittest.TestCase):
"""
Test of TokenMap creation and other behavior.
"""
@local
def test_token(self):
expected_node_count = len(get_cluster().nodes)
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cluster.connect()
tmap = cluster.metadata.token_map
self.assertTrue(issubclass(tmap.token_class, Token))
self.assertEqual(expected_node_count, len(tmap.ring))
cluster.shutdown()
class KeyspaceAlterMetadata(unittest.TestCase):
"""
Test verifies that table metadata is preserved on keyspace alter
"""
def setUp(self):
self.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
self.session = self.cluster.connect()
name = self._testMethodName.lower()
crt_ks = '''
CREATE KEYSPACE %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1} AND durable_writes = true''' % name
self.session.execute(crt_ks)
def tearDown(self):
name = self._testMethodName.lower()
self.session.execute('DROP KEYSPACE %s' % name)
self.cluster.shutdown()
def test_keyspace_alter(self):
"""
Table info is preserved upon keyspace alter:
Create table
Verify schema
Alter ks
Verify that table metadata is still present
PYTHON-173
"""
name = self._testMethodName.lower()
self.session.execute('CREATE TABLE %s.d (d INT PRIMARY KEY)' % name)
original_keyspace_meta = self.cluster.metadata.keyspaces[name]
self.assertEqual(original_keyspace_meta.durable_writes, True)
self.assertEqual(len(original_keyspace_meta.tables), 1)
self.session.execute('ALTER KEYSPACE %s WITH durable_writes = false' % name)
new_keyspace_meta = self.cluster.metadata.keyspaces[name]
self.assertNotEqual(original_keyspace_meta, new_keyspace_meta)
self.assertEqual(new_keyspace_meta.durable_writes, False)
class IndexMapTests(unittest.TestCase):
keyspace_name = 'index_map_tests'
@property
def table_name(self):
return self._testMethodName.lower()
@classmethod
def setup_class(cls):
cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cls.session = cls.cluster.connect()
try:
if cls.keyspace_name in cls.cluster.metadata.keyspaces:
cls.session.execute("DROP KEYSPACE %s" % cls.keyspace_name)
cls.session.execute(
"""
CREATE KEYSPACE %s
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'};
""" % cls.keyspace_name)
cls.session.set_keyspace(cls.keyspace_name)
except Exception:
cls.cluster.shutdown()
raise
@classmethod
def teardown_class(cls):
try:
cls.session.execute("DROP KEYSPACE %s" % cls.keyspace_name)
finally:
cls.cluster.shutdown()
def create_basic_table(self):
self.session.execute("CREATE TABLE %s (k int PRIMARY KEY, a int)" % self.table_name)
def drop_basic_table(self):
self.session.execute("DROP TABLE %s" % self.table_name)
def test_index_updates(self):
self.create_basic_table()
ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
table_meta = ks_meta.tables[self.table_name]
self.assertNotIn('a_idx', ks_meta.indexes)
self.assertNotIn('b_idx', ks_meta.indexes)
self.assertNotIn('a_idx', table_meta.indexes)
self.assertNotIn('b_idx', table_meta.indexes)
self.session.execute("CREATE INDEX a_idx ON %s (a)" % self.table_name)
self.session.execute("ALTER TABLE %s ADD b int" % self.table_name)
self.session.execute("CREATE INDEX b_idx ON %s (b)" % self.table_name)
ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
table_meta = ks_meta.tables[self.table_name]
self.assertIsInstance(ks_meta.indexes['a_idx'], IndexMetadata)
self.assertIsInstance(ks_meta.indexes['b_idx'], IndexMetadata)
self.assertIsInstance(table_meta.indexes['a_idx'], IndexMetadata)
self.assertIsInstance(table_meta.indexes['b_idx'], IndexMetadata)
# both indexes updated when index dropped
self.session.execute("DROP INDEX a_idx")
# temporarily synchronously refresh the schema metadata, until CASSANDRA-9391 is merged in
self.cluster.refresh_table_metadata(self.keyspace_name, self.table_name)
ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
table_meta = ks_meta.tables[self.table_name]
self.assertNotIn('a_idx', ks_meta.indexes)
self.assertIsInstance(ks_meta.indexes['b_idx'], IndexMetadata)
self.assertNotIn('a_idx', table_meta.indexes)
self.assertIsInstance(table_meta.indexes['b_idx'], IndexMetadata)
# keyspace index updated when table dropped
self.drop_basic_table()
ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
self.assertNotIn(self.table_name, ks_meta.tables)
self.assertNotIn('a_idx', ks_meta.indexes)
self.assertNotIn('b_idx', ks_meta.indexes)
def test_index_follows_alter(self):
self.create_basic_table()
idx = self.table_name + '_idx'
self.session.execute("CREATE INDEX %s ON %s (a)" % (idx, self.table_name))
ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
table_meta = ks_meta.tables[self.table_name]
self.assertIsInstance(ks_meta.indexes[idx], IndexMetadata)
self.assertIsInstance(table_meta.indexes[idx], IndexMetadata)
self.session.execute('ALTER KEYSPACE %s WITH durable_writes = false' % self.keyspace_name)
old_meta = ks_meta
ks_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
self.assertIsNot(ks_meta, old_meta)
table_meta = ks_meta.tables[self.table_name]
self.assertIsInstance(ks_meta.indexes[idx], IndexMetadata)
self.assertIsInstance(table_meta.indexes[idx], IndexMetadata)
self.drop_basic_table()
class FunctionTest(unittest.TestCase):
"""
Base functionality for Function and Aggregate metadata test classes
"""
def setUp(self):
"""
Tests are skipped if run with native protocol version < 4
"""
if PROTOCOL_VERSION < 4:
raise unittest.SkipTest("Function metadata requires native protocol version 4+")
@property
def function_name(self):
return self._testMethodName.lower()
@classmethod
def setup_class(cls):
if PROTOCOL_VERSION >= 4:
cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cls.keyspace_name = cls.__name__.lower()
cls.session = cls.cluster.connect()
cls.session.execute("CREATE KEYSPACE IF NOT EXISTS %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}" % cls.keyspace_name)
cls.session.set_keyspace(cls.keyspace_name)
cls.keyspace_function_meta = cls.cluster.metadata.keyspaces[cls.keyspace_name].functions
cls.keyspace_aggregate_meta = cls.cluster.metadata.keyspaces[cls.keyspace_name].aggregates
@classmethod
def teardown_class(cls):
if PROTOCOL_VERSION >= 4:
cls.session.execute("DROP KEYSPACE IF EXISTS %s" % cls.keyspace_name)
cls.cluster.shutdown()
class Verified(object):
def __init__(self, test_case, meta_class, element_meta, **function_kwargs):
self.test_case = test_case
self.function_kwargs = dict(function_kwargs)
self.meta_class = meta_class
self.element_meta = element_meta
def __enter__(self):
tc = self.test_case
expected_meta = self.meta_class(**self.function_kwargs)
tc.assertNotIn(expected_meta.signature, self.element_meta)
tc.session.execute(expected_meta.as_cql_query())
tc.assertIn(expected_meta.signature, self.element_meta)
generated_meta = self.element_meta[expected_meta.signature]
self.test_case.assertEqual(generated_meta.as_cql_query(), expected_meta.as_cql_query())
return self
def __exit__(self, exc_type, exc_val, exc_tb):
tc = self.test_case
tc.session.execute("DROP %s %s.%s" % (self.meta_class.__name__, tc.keyspace_name, self.signature))
tc.assertNotIn(self.signature, self.element_meta)
@property
def signature(self):
return SignatureDescriptor.format_signature(self.function_kwargs['name'],
self.function_kwargs['argument_types'])
class VerifiedFunction(Verified):
def __init__(self, test_case, **kwargs):
super(FunctionTest.VerifiedFunction, self).__init__(test_case, Function, test_case.keyspace_function_meta, **kwargs)
class VerifiedAggregate(Verified):
def __init__(self, test_case, **kwargs):
super(FunctionTest.VerifiedAggregate, self).__init__(test_case, Aggregate, test_case.keyspace_aggregate_meta, **kwargs)
class FunctionMetadata(FunctionTest):
def make_function_kwargs(self, called_on_null=True):
return {'keyspace': self.keyspace_name,
'name': self.function_name,
'argument_types': ['double', 'int'],
'argument_names': ['d', 'i'],
'return_type': 'double',
'language': 'java',
'body': 'return new Double(0.0);',
'called_on_null_input': called_on_null}
def test_functions_after_udt(self):
"""
Test to to ensure functions come after UDTs in in keyspace dump
test_functions_after_udt creates a basic function. Then queries that function and make sure that in the results
that UDT's are listed before any corresponding functions, when we dump the keyspace
Ideally we would make a function that takes a udt type, but this presently fails because C* c059a56 requires
udt to be frozen to create, but does not store meta indicating frozen
SEE https://issues.apache.org/jira/browse/CASSANDRA-9186
Maybe update this after release
kwargs = self.make_function_kwargs()
kwargs['argument_types'][0] = "frozen<%s>" % udt_name
expected_meta = Function(**kwargs)
with self.VerifiedFunction(self, **kwargs):
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result UDT's should come before any functions
@test_category function
"""
self.assertNotIn(self.function_name, self.keyspace_function_meta)
udt_name = 'udtx'
self.session.execute("CREATE TYPE %s (x int)" % udt_name)
with self.VerifiedFunction(self, **self.make_function_kwargs()):
# udts must come before functions in keyspace dump
keyspace_cql = self.cluster.metadata.keyspaces[self.keyspace_name].export_as_string()
type_idx = keyspace_cql.rfind("CREATE TYPE")
func_idx = keyspace_cql.find("CREATE FUNCTION")
self.assertNotIn(-1, (type_idx, func_idx), "TYPE or FUNCTION not found in keyspace_cql: " + keyspace_cql)
self.assertGreater(func_idx, type_idx)
def test_function_same_name_diff_types(self):
"""
Test to verify to that functions with different signatures are differentiated in metadata
test_function_same_name_diff_types Creates two functions. One with the same name but a slightly different
signature. Then ensures that both are surfaced separately in our metadata.
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result function with the same name but different signatures should be surfaced separately
@test_category function
"""
# Create a function
kwargs = self.make_function_kwargs()
with self.VerifiedFunction(self, **kwargs):
# another function: same name, different type sig.
self.assertGreater(len(kwargs['argument_types']), 1)
self.assertGreater(len(kwargs['argument_names']), 1)
kwargs['argument_types'] = kwargs['argument_types'][:1]
kwargs['argument_names'] = kwargs['argument_names'][:1]
# Ensure they are surfaced separately
with self.VerifiedFunction(self, **kwargs):
functions = [f for f in self.keyspace_function_meta.values() if f.name == self.function_name]
self.assertEqual(len(functions), 2)
self.assertNotEqual(functions[0].argument_types, functions[1].argument_types)
def test_function_no_parameters(self):
"""
Test to verify CQL output for functions with zero parameters
Creates a function with no input parameters, verify that CQL output is correct.
@since 2.7.1
@jira_ticket PYTHON-392
@expected_result function with no parameters should generate proper CQL
@test_category function
"""
kwargs = self.make_function_kwargs()
kwargs['argument_types'] = []
kwargs['argument_names'] = []
kwargs['return_type'] = 'bigint'
kwargs['body'] = 'return System.currentTimeMillis() / 1000L;'
with self.VerifiedFunction(self, **kwargs) as vf:
fn_meta = self.keyspace_function_meta[vf.signature]
self.assertRegexpMatches(fn_meta.as_cql_query(), "CREATE FUNCTION.*%s\(\) .*" % kwargs['name'])
def test_functions_follow_keyspace_alter(self):
"""
Test to verify to that functions maintain equality after a keyspace is altered
test_functions_follow_keyspace_alter creates a function then alters a the keyspace associated with that function.
After the alter we validate that the function maintains the same metadata
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result functions are the same after parent keyspace is altered
@test_category function
"""
# Create function
with self.VerifiedFunction(self, **self.make_function_kwargs()):
original_keyspace_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
self.session.execute('ALTER KEYSPACE %s WITH durable_writes = false' % self.keyspace_name)
# After keyspace alter ensure that we maintain function equality.
try:
new_keyspace_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
self.assertNotEqual(original_keyspace_meta, new_keyspace_meta)
self.assertIs(original_keyspace_meta.functions, new_keyspace_meta.functions)
finally:
self.session.execute('ALTER KEYSPACE %s WITH durable_writes = true' % self.keyspace_name)
def test_function_cql_called_on_null(self):
"""
Test to verify to that that called on null argument is honored on function creation.
test_functions_follow_keyspace_alter create two functions. One with the called_on_null_input set to true,
the other with it set to false. We then verify that the metadata constructed from those function is correctly
reflected
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result functions metadata correctly reflects called_on_null_input flag.
@test_category function
"""
kwargs = self.make_function_kwargs()
kwargs['called_on_null_input'] = True
with self.VerifiedFunction(self, **kwargs) as vf:
fn_meta = self.keyspace_function_meta[vf.signature]
self.assertRegexpMatches(fn_meta.as_cql_query(), "CREATE FUNCTION.*\) CALLED ON NULL INPUT RETURNS .*")
kwargs['called_on_null_input'] = False
with self.VerifiedFunction(self, **kwargs) as vf:
fn_meta = self.keyspace_function_meta[vf.signature]
self.assertRegexpMatches(fn_meta.as_cql_query(), "CREATE FUNCTION.*\) RETURNS NULL ON NULL INPUT RETURNS .*")
class AggregateMetadata(FunctionTest):
@classmethod
def setup_class(cls):
if PROTOCOL_VERSION >= 4:
super(AggregateMetadata, cls).setup_class()
cls.session.execute("""CREATE OR REPLACE FUNCTION sum_int(s int, i int)
RETURNS NULL ON NULL INPUT
RETURNS int
LANGUAGE javascript AS 's + i';""")
cls.session.execute("""CREATE OR REPLACE FUNCTION sum_int_two(s int, i int, j int)
RETURNS NULL ON NULL INPUT
RETURNS int
LANGUAGE javascript AS 's + i + j';""")
cls.session.execute("""CREATE OR REPLACE FUNCTION "List_As_String"(l list<text>)
RETURNS NULL ON NULL INPUT
RETURNS int
LANGUAGE javascript AS ''''' + l';""")
cls.session.execute("""CREATE OR REPLACE FUNCTION extend_list(s list<text>, i int)
CALLED ON NULL INPUT
RETURNS list<text>
LANGUAGE java AS 'if (i != null) s.add(i.toString()); return s;';""")
cls.session.execute("""CREATE OR REPLACE FUNCTION update_map(s map<int, int>, i int)
RETURNS NULL ON NULL INPUT
RETURNS map<int, int>
LANGUAGE java AS 's.put(new Integer(i), new Integer(i)); return s;';""")
cls.session.execute("""CREATE TABLE IF NOT EXISTS t
(k int PRIMARY KEY, v int)""")
for x in range(4):
cls.session.execute("INSERT INTO t (k,v) VALUES (%s, %s)", (x, x))
cls.session.execute("INSERT INTO t (k) VALUES (%s)", (4,))
def make_aggregate_kwargs(self, state_func, state_type, final_func=None, init_cond=None):
return {'keyspace': self.keyspace_name,
'name': self.function_name + '_aggregate',
'argument_types': ['int'],
'state_func': state_func,
'state_type': state_type,
'final_func': final_func,
'initial_condition': init_cond,
'return_type': "does not matter for creation"}
def test_return_type_meta(self):
"""
Test to verify to that the return type of a an aggregate is honored in the metadata
test_return_type_meta creates an aggregate then ensures the return type of the created
aggregate is correctly surfaced in the metadata
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result aggregate has the correct return typ in the metadata
@test_category aggregate
"""
with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('sum_int', 'int', init_cond='1')) as va:
self.assertEqual(self.keyspace_aggregate_meta[va.signature].return_type, 'int')
def test_init_cond(self):
"""
Test to verify that various initial conditions are correctly surfaced in various aggregate functions
test_init_cond creates several different types of aggregates, and given various initial conditions it verifies that
they correctly impact the aggregate's execution
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result initial conditions are correctly evaluated as part of the aggregates
@test_category aggregate
"""
# This is required until the java driver bundled with C* is updated to support v4
c = Cluster(protocol_version=3)
s = c.connect(self.keyspace_name)
encoder = Encoder()
expected_values = range(4)
# int32
for init_cond in (-1, 0, 1):
cql_init = encoder.cql_encode_all_types(init_cond)
with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('sum_int', 'int', init_cond=cql_init)) as va:
sum_res = s.execute("SELECT %s(v) AS sum FROM t" % va.function_kwargs['name'])[0].sum
self.assertEqual(sum_res, int(init_cond) + sum(expected_values))
# list<text>
for init_cond in ([], ['1', '2']):
cql_init = encoder.cql_encode_all_types(init_cond)
with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('extend_list', 'list<text>', init_cond=cql_init)) as va:
list_res = s.execute("SELECT %s(v) AS list_res FROM t" % va.function_kwargs['name'])[0].list_res
self.assertListEqual(list_res[:len(init_cond)], init_cond)
self.assertEqual(set(i for i in list_res[len(init_cond):]),
set(str(i) for i in expected_values))
# map<int,int>
expected_map_values = dict((i, i) for i in expected_values)
expected_key_set = set(expected_values)
for init_cond in ({}, {1: 2, 3: 4}, {5: 5}):
cql_init = encoder.cql_encode_all_types(init_cond)
with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('update_map', 'map<int, int>', init_cond=cql_init)) as va:
map_res = s.execute("SELECT %s(v) AS map_res FROM t" % va.function_kwargs['name'])[0].map_res
self.assertDictContainsSubset(expected_map_values, map_res)
init_not_updated = dict((k, init_cond[k]) for k in set(init_cond) - expected_key_set)
self.assertDictContainsSubset(init_not_updated, map_res)
c.shutdown()
def test_aggregates_after_functions(self):
"""
Test to verify that aggregates are listed after function in metadata
test_aggregates_after_functions creates an aggregate, and then verifies that they are listed
after any function creations when the keypspace dump is preformed
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result aggregates are declared after any functions
@test_category aggregate
"""
# functions must come before functions in keyspace dump
with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('extend_list', 'list<text>')):
keyspace_cql = self.cluster.metadata.keyspaces[self.keyspace_name].export_as_string()
func_idx = keyspace_cql.find("CREATE FUNCTION")
aggregate_idx = keyspace_cql.rfind("CREATE AGGREGATE")
self.assertNotIn(-1, (aggregate_idx, func_idx), "AGGREGATE or FUNCTION not found in keyspace_cql: " + keyspace_cql)
self.assertGreater(aggregate_idx, func_idx)
def test_same_name_diff_types(self):
"""
Test to verify to that aggregates with different signatures are differentiated in metadata
test_same_name_diff_types Creates two Aggregates. One with the same name but a slightly different
signature. Then ensures that both are surfaced separately in our metadata.
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result aggregates with the same name but different signatures should be surfaced separately
@test_category function
"""
kwargs = self.make_aggregate_kwargs('sum_int', 'int', init_cond='0')
with self.VerifiedAggregate(self, **kwargs):
kwargs['state_func'] = 'sum_int_two'
kwargs['argument_types'] = ['int', 'int']
with self.VerifiedAggregate(self, **kwargs):
aggregates = [a for a in self.keyspace_aggregate_meta.values() if a.name == kwargs['name']]
self.assertEqual(len(aggregates), 2)
self.assertNotEqual(aggregates[0].argument_types, aggregates[1].argument_types)
def test_aggregates_follow_keyspace_alter(self):
"""
Test to verify to that aggregates maintain equality after a keyspace is altered
test_aggregates_follow_keyspace_alter creates a function then alters a the keyspace associated with that
function. After the alter we validate that the function maintains the same metadata
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result aggregates are the same after parent keyspace is altered
@test_category function
"""
with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('sum_int', 'int', init_cond='0')):
original_keyspace_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
self.session.execute('ALTER KEYSPACE %s WITH durable_writes = false' % self.keyspace_name)
try:
new_keyspace_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
self.assertNotEqual(original_keyspace_meta, new_keyspace_meta)
self.assertIs(original_keyspace_meta.aggregates, new_keyspace_meta.aggregates)
finally:
self.session.execute('ALTER KEYSPACE %s WITH durable_writes = true' % self.keyspace_name)
def test_cql_optional_params(self):
"""
Test to verify that the initial_cond and final_func parameters are correctly honored
test_cql_optional_params creates various aggregates with different combinations of initial_condition,
and final_func parameters set. It then ensures they are correctly honored.
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result initial_condition and final_func parameters are honored correctly
@test_category function
"""
kwargs = self.make_aggregate_kwargs('extend_list', 'list<text>')
encoder = Encoder()
# no initial condition, final func
self.assertIsNone(kwargs['initial_condition'])
self.assertIsNone(kwargs['final_func'])
with self.VerifiedAggregate(self, **kwargs) as va:
meta = self.keyspace_aggregate_meta[va.signature]
self.assertIsNone(meta.initial_condition)
self.assertIsNone(meta.final_func)
cql = meta.as_cql_query()
self.assertEqual(cql.find('INITCOND'), -1)
self.assertEqual(cql.find('FINALFUNC'), -1)
# initial condition, no final func
kwargs['initial_condition'] = encoder.cql_encode_all_types(['init', 'cond'])
with self.VerifiedAggregate(self, **kwargs) as va:
meta = self.keyspace_aggregate_meta[va.signature]
self.assertEqual(meta.initial_condition, kwargs['initial_condition'])
self.assertIsNone(meta.final_func)
cql = meta.as_cql_query()
search_string = "INITCOND %s" % kwargs['initial_condition']
self.assertGreater(cql.find(search_string), 0, '"%s" search string not found in cql:\n%s' % (search_string, cql))
self.assertEqual(cql.find('FINALFUNC'), -1)
# no initial condition, final func
kwargs['initial_condition'] = None
kwargs['final_func'] = 'List_As_String'
with self.VerifiedAggregate(self, **kwargs) as va:
meta = self.keyspace_aggregate_meta[va.signature]
self.assertIsNone(meta.initial_condition)
self.assertEqual(meta.final_func, kwargs['final_func'])
cql = meta.as_cql_query()
self.assertEqual(cql.find('INITCOND'), -1)
search_string = 'FINALFUNC "%s"' % kwargs['final_func']
self.assertGreater(cql.find(search_string), 0, '"%s" search string not found in cql:\n%s' % (search_string, cql))
# both
kwargs['initial_condition'] = encoder.cql_encode_all_types(['init', 'cond'])
kwargs['final_func'] = 'List_As_String'
with self.VerifiedAggregate(self, **kwargs) as va:
meta = self.keyspace_aggregate_meta[va.signature]
self.assertEqual(meta.initial_condition, kwargs['initial_condition'])
self.assertEqual(meta.final_func, kwargs['final_func'])
cql = meta.as_cql_query()
init_cond_idx = cql.find("INITCOND %s" % kwargs['initial_condition'])
final_func_idx = cql.find('FINALFUNC "%s"' % kwargs['final_func'])
self.assertNotIn(-1, (init_cond_idx, final_func_idx))
self.assertGreater(init_cond_idx, final_func_idx)
class BadMetaTest(unittest.TestCase):
"""
Test behavior when metadata has unexpected form
Verify that new cluster/session can still connect, and the CQL output indicates the exception with a warning.
PYTHON-370
"""
class BadMetaException(Exception):
pass
@property
def function_name(self):
return self._testMethodName.lower()
@classmethod
def setup_class(cls):
cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cls.keyspace_name = cls.__name__.lower()
cls.session = cls.cluster.connect()
cls.session.execute("CREATE KEYSPACE %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}" % cls.keyspace_name)
cls.session.set_keyspace(cls.keyspace_name)
connection = cls.cluster.control_connection._connection
cls.parser_class = get_schema_parser(connection, CASSANDRA_VERSION.base_version, timeout=20).__class__
cls.cluster.control_connection.reconnect = Mock()
@classmethod
def teardown_class(cls):
drop_keyspace_shutdown_cluster(cls.keyspace_name, cls.session, cls.cluster)
def test_bad_keyspace(self):
with patch.object(self.parser_class, '_build_keyspace_metadata_internal', side_effect=self.BadMetaException):
self.cluster.refresh_keyspace_metadata(self.keyspace_name)
m = self.cluster.metadata.keyspaces[self.keyspace_name]
self.assertIs(m._exc_info[0], self.BadMetaException)
self.assertIn("/*\nWarning:", m.export_as_string())
def test_bad_table(self):
self.session.execute('CREATE TABLE %s (k int PRIMARY KEY, v int)' % self.function_name)
with patch.object(self.parser_class, '_build_column_metadata', side_effect=self.BadMetaException):
self.cluster.refresh_table_metadata(self.keyspace_name, self.function_name)
m = self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_name]
self.assertIs(m._exc_info[0], self.BadMetaException)
self.assertIn("/*\nWarning:", m.export_as_string())
def test_bad_index(self):
self.session.execute('CREATE TABLE %s (k int PRIMARY KEY, v int)' % self.function_name)
self.session.execute('CREATE INDEX ON %s(v)' % self.function_name)
with patch.object(self.parser_class, '_build_index_metadata', side_effect=self.BadMetaException):
self.cluster.refresh_table_metadata(self.keyspace_name, self.function_name)
m = self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_name]
self.assertIs(m._exc_info[0], self.BadMetaException)
self.assertIn("/*\nWarning:", m.export_as_string())
@greaterthancass20
def test_bad_user_type(self):
self.session.execute('CREATE TYPE %s (i int, d double)' % self.function_name)
with patch.object(self.parser_class, '_build_user_type', side_effect=self.BadMetaException):
self.cluster.refresh_schema_metadata() # presently do not capture these errors on udt direct refresh -- make sure it's contained during full refresh
m = self.cluster.metadata.keyspaces[self.keyspace_name]
self.assertIs(m._exc_info[0], self.BadMetaException)
self.assertIn("/*\nWarning:", m.export_as_string())
@greaterthancass21
def test_bad_user_function(self):
self.session.execute("""CREATE FUNCTION IF NOT EXISTS %s (key int, val int)
RETURNS NULL ON NULL INPUT
RETURNS int
LANGUAGE javascript AS 'key + val';""" % self.function_name)
with patch.object(self.parser_class, '_build_function', side_effect=self.BadMetaException):
self.cluster.refresh_schema_metadata() # presently do not capture these errors on udt direct refresh -- make sure it's contained during full refresh
m = self.cluster.metadata.keyspaces[self.keyspace_name]
self.assertIs(m._exc_info[0], self.BadMetaException)
self.assertIn("/*\nWarning:", m.export_as_string())
@greaterthancass21
def test_bad_user_aggregate(self):
self.session.execute("""CREATE FUNCTION IF NOT EXISTS sum_int (key int, val int)
RETURNS NULL ON NULL INPUT
RETURNS int
LANGUAGE javascript AS 'key + val';""")
self.session.execute("""CREATE AGGREGATE %s(int)
SFUNC sum_int
STYPE int
INITCOND 0""" % self.function_name)
with patch.object(self.parser_class, '_build_aggregate', side_effect=self.BadMetaException):
self.cluster.refresh_schema_metadata() # presently do not capture these errors on udt direct refresh -- make sure it's contained during full refresh
m = self.cluster.metadata.keyspaces[self.keyspace_name]
self.assertIs(m._exc_info[0], self.BadMetaException)
self.assertIn("/*\nWarning:", m.export_as_string())
class DynamicCompositeTypeTest(BasicSharedKeyspaceUnitTestCase):
def test_dct_alias(self):
"""
Tests to make sure DCT's have correct string formatting
Constructs a DCT and check the format as generated. To insure it matches what is expected
@since 3.6.0
@jira_ticket PYTHON-579
@expected_result DCT subtypes should always have fully qualified names
@test_category metadata
"""
self.session.execute("CREATE TABLE {0}.{1} ("
"k int PRIMARY KEY,"
"c1 'DynamicCompositeType(s => UTF8Type, i => Int32Type)',"
"c2 Text)".format(self.ks_name, self.function_table_name))
dct_table = self.cluster.metadata.keyspaces.get(self.ks_name).tables.get(self.function_table_name)
# Format can very slightly between versions, strip out whitespace for consistency sake
self.assertTrue("c1'org.apache.cassandra.db.marshal.DynamicCompositeType(s=>org.apache.cassandra.db.marshal.UTF8Type,i=>org.apache.cassandra.db.marshal.Int32Type)'" in dct_table.as_cql_query().replace(" ", ""))
@greaterthanorequalcass30
class Materia3lizedViewMetadataTestSimple(BasicSharedKeyspaceUnitTestCase):
def setUp(self):
self.session.execute("CREATE TABLE {0}.{1} (pk int PRIMARY KEY, c int)".format(self.keyspace_name, self.function_table_name))
self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT c FROM {0}.{1} WHERE c IS NOT NULL PRIMARY KEY (pk, c)".format(self.keyspace_name, self.function_table_name))
def tearDown(self):
self.session.execute("DROP MATERIALIZED VIEW {0}.mv1".format(self.keyspace_name))
self.session.execute("DROP TABLE {0}.{1}".format(self.keyspace_name, self.function_table_name))
def test_materialized_view_metadata_creation(self):
"""
test for materialized view metadata creation
test_materialized_view_metadata_creation tests that materialized view metadata properly created implicitly in
both keyspace and table metadata under "views". It creates a simple base table and then creates a view based
on that table. It then checks that the materialized view metadata is contained in the keyspace and table
metadata. Finally, it checks that the keyspace_name and the base_table_name in the view metadata is properly set.
@since 3.0.0
@jira_ticket PYTHON-371
@expected_result Materialized view metadata in both the ks and table should be created with a new view is created.
@test_category metadata
"""
self.assertIn("mv1", self.cluster.metadata.keyspaces[self.keyspace_name].views)
self.assertIn("mv1", self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views)
self.assertEqual(self.keyspace_name, self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views["mv1"].keyspace_name)
self.assertEqual(self.function_table_name, self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views["mv1"].base_table_name)
def test_materialized_view_metadata_alter(self):
"""
test for materialized view metadata alteration
test_materialized_view_metadata_alter tests that materialized view metadata is properly updated implicitly in the
table metadata once that view is updated. It creates a simple base table and then creates a view based
on that table. It then alters that materalized view and checks that the materialized view metadata is altered in
the table metadata.
@since 3.0.0
@jira_ticket PYTHON-371
@expected_result Materialized view metadata should be updated with the view is altered.
@test_category metadata
"""
self.assertIn("SizeTieredCompactionStrategy", self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views["mv1"].options["compaction"]["class"] )
self.session.execute("ALTER MATERIALIZED VIEW {0}.mv1 WITH compaction = {{ 'class' : 'LeveledCompactionStrategy' }}".format(self.keyspace_name))
self.assertIn("LeveledCompactionStrategy", self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views["mv1"].options["compaction"]["class"])
def test_materialized_view_metadata_drop(self):
"""
test for materialized view metadata dropping
test_materialized_view_metadata_drop tests that materialized view metadata is properly removed implicitly in
both keyspace and table metadata once that view is dropped. It creates a simple base table and then creates a view
based on that table. It then drops that materalized view and checks that the materialized view metadata is removed
from the keyspace and table metadata.
@since 3.0.0
@jira_ticket PYTHON-371
@expected_result Materialized view metadata in both the ks and table should be removed with the view is dropped.
@test_category metadata
"""
self.session.execute("DROP MATERIALIZED VIEW {0}.mv1".format(self.keyspace_name))
self.assertNotIn("mv1", self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views)
self.assertNotIn("mv1", self.cluster.metadata.keyspaces[self.keyspace_name].views)
self.assertDictEqual({}, self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views)
self.assertDictEqual({}, self.cluster.metadata.keyspaces[self.keyspace_name].views)
self.session.execute("CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT c FROM {0}.{1} WHERE c IS NOT NULL PRIMARY KEY (pk, c)".format(self.keyspace_name, self.function_table_name))
@greaterthanorequalcass30
class MaterializedViewMetadataTestComplex(BasicSegregatedKeyspaceUnitTestCase):
def test_create_view_metadata(self):
"""
test to ensure that materialized view metadata is properly constructed
test_create_view_metadata tests that materialized views metadata is properly constructed. It runs a simple
query to construct a materialized view, then proceeds to inspect the metadata associated with that MV.
Columns are inspected to insure that all are of the proper type, and in the proper type.
@since 3.0.0
@jira_ticket PYTHON-371
@expected_result Materialized view metadata should be constructed appropriately.
@test_category metadata
"""
create_table = """CREATE TABLE {0}.scores(
user TEXT,
game TEXT,
year INT,
month INT,
day INT,
score INT,
PRIMARY KEY (user, game, year, month, day)
)""".format(self.keyspace_name)
self.session.execute(create_table)
create_mv = """CREATE MATERIALIZED VIEW {0}.monthlyhigh AS
SELECT game, year, month, score, user, day FROM {0}.scores
WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND day IS NOT NULL
PRIMARY KEY ((game, year, month), score, user, day)
WITH CLUSTERING ORDER BY (score DESC, user ASC, day ASC)""".format(self.keyspace_name)
self.session.execute(create_mv)
score_table = self.cluster.metadata.keyspaces[self.keyspace_name].tables['scores']
mv = self.cluster.metadata.keyspaces[self.keyspace_name].views['monthlyhigh']
self.assertIsNotNone(score_table.views["monthlyhigh"])
self.assertIsNotNone(len(score_table.views), 1)
# Make sure user is a partition key, and not null
self.assertEqual(len(score_table.partition_key), 1)
self.assertIsNotNone(score_table.columns['user'])
self.assertTrue(score_table.columns['user'], score_table.partition_key[0])
# Validate clustering keys
self.assertEqual(len(score_table.clustering_key), 4)
self.assertIsNotNone(score_table.columns['game'])
self.assertTrue(score_table.columns['game'], score_table.clustering_key[0])
self.assertIsNotNone(score_table.columns['year'])
self.assertTrue(score_table.columns['year'], score_table.clustering_key[1])
self.assertIsNotNone(score_table.columns['month'])
self.assertTrue(score_table.columns['month'], score_table.clustering_key[2])
self.assertIsNotNone(score_table.columns['day'])
self.assertTrue(score_table.columns['day'], score_table.clustering_key[3])
self.assertIsNotNone(score_table.columns['score'])
# Validate basic mv information
self.assertEqual(mv.keyspace_name, self.keyspace_name)
self.assertEqual(mv.name, "monthlyhigh")
self.assertEqual(mv.base_table_name, "scores")
self.assertFalse(mv.include_all_columns)
# Validate that all columns are preset and correct
mv_columns = list(mv.columns.values())
self.assertEqual(len(mv_columns), 6)
game_column = mv_columns[0]
self.assertIsNotNone(game_column)
self.assertEqual(game_column.name, 'game')
self.assertEqual(game_column, mv.partition_key[0])
year_column = mv_columns[1]
self.assertIsNotNone(year_column)
self.assertEqual(year_column.name, 'year')
self.assertEqual(year_column, mv.partition_key[1])
month_column = mv_columns[2]
self.assertIsNotNone(month_column)
self.assertEqual(month_column.name, 'month')
self.assertEqual(month_column, mv.partition_key[2])
def compare_columns(a, b, name):
self.assertEqual(a.name, name)
self.assertEqual(a.name, b.name)
self.assertEqual(a.table, b.table)
self.assertEqual(a.cql_type, b.cql_type)
self.assertEqual(a.is_static, b.is_static)
self.assertEqual(a.is_reversed, b.is_reversed)
score_column = mv_columns[3]
compare_columns(score_column, mv.clustering_key[0], 'score')
user_column = mv_columns[4]
compare_columns(user_column, mv.clustering_key[1], 'user')
day_column = mv_columns[5]
compare_columns(day_column, mv.clustering_key[2], 'day')
def test_base_table_column_addition_mv(self):
"""
test to ensure that materialized view metadata is properly updated with base columns are added
test_create_view_metadata tests that materialized views metadata is properly updated when columns are added to
the base table.
@since 3.0.0
@jira_ticket PYTHON-419
@expected_result Materialized view metadata should be updated correctly
@test_category metadata
"""
create_table = """CREATE TABLE {0}.scores(
user TEXT,
game TEXT,
year INT,
month INT,
day INT,
score TEXT,
PRIMARY KEY (user, game, year, month, day)
)""".format(self.keyspace_name)
self.session.execute(create_table)
create_mv = """CREATE MATERIALIZED VIEW {0}.monthlyhigh AS
SELECT game, year, month, score, user, day FROM {0}.scores
WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND day IS NOT NULL
PRIMARY KEY ((game, year, month), score, user, day)
WITH CLUSTERING ORDER BY (score DESC, user ASC, day ASC)""".format(self.keyspace_name)
create_mv_alltime = """CREATE MATERIALIZED VIEW {0}.alltimehigh AS
SELECT * FROM {0}.scores
WHERE game IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND day IS NOT NULL
PRIMARY KEY (game, score, user, year, month, day)
WITH CLUSTERING ORDER BY (score DESC)""".format(self.keyspace_name)
self.session.execute(create_mv)
self.session.execute(create_mv_alltime)
score_table = self.cluster.metadata.keyspaces[self.keyspace_name].tables['scores']
self.assertIsNotNone(score_table.views["monthlyhigh"])
self.assertIsNotNone(score_table.views["alltimehigh"])
self.assertEqual(len(self.cluster.metadata.keyspaces[self.keyspace_name].views), 2)
insert_fouls = """ALTER TABLE {0}.scores ADD fouls INT""".format((self.keyspace_name))
self.session.execute(insert_fouls)
self.assertEqual(len(self.cluster.metadata.keyspaces[self.keyspace_name].views), 2)
score_table = self.cluster.metadata.keyspaces[self.keyspace_name].tables['scores']
self.assertIn("fouls", score_table.columns)
# This is a workaround for mv notifications being separate from base table schema responses.
# This maybe fixed with future protocol changes
for i in range(10):
mv_alltime = self.cluster.metadata.keyspaces[self.keyspace_name].views["alltimehigh"]
if("fouls" in mv_alltime.columns):
break
time.sleep(.2)
self.assertIn("fouls", mv_alltime.columns)
mv_alltime_fouls_comumn = self.cluster.metadata.keyspaces[self.keyspace_name].views["alltimehigh"].columns['fouls']
self.assertEqual(mv_alltime_fouls_comumn.cql_type, 'int')
@lessthancass30
def test_base_table_type_alter_mv(self):
"""
test to ensure that materialized view metadata is properly updated when a type in the base table
is updated.
test_create_view_metadata tests that materialized views metadata is properly updated when the type of base table
column is changed.
@since 3.0.0
@jira_ticket CASSANDRA-10424
@expected_result Materialized view metadata should be updated correctly
@test_category metadata
"""
create_table = """CREATE TABLE {0}.scores(
user TEXT,
game TEXT,
year INT,
month INT,
day INT,
score TEXT,
PRIMARY KEY (user, game, year, month, day)
)""".format(self.keyspace_name)
self.session.execute(create_table)
create_mv = """CREATE MATERIALIZED VIEW {0}.monthlyhigh AS
SELECT game, year, month, score, user, day FROM {0}.scores
WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND day IS NOT NULL
PRIMARY KEY ((game, year, month), score, user, day)
WITH CLUSTERING ORDER BY (score DESC, user ASC, day ASC)""".format(self.keyspace_name)
self.session.execute(create_mv)
self.assertEqual(len(self.cluster.metadata.keyspaces[self.keyspace_name].views), 1)
alter_scores = """ALTER TABLE {0}.scores ALTER score TYPE blob""".format((self.keyspace_name))
self.session.execute(alter_scores)
self.assertEqual(len(self.cluster.metadata.keyspaces[self.keyspace_name].views), 1)
score_column = self.cluster.metadata.keyspaces[self.keyspace_name].tables['scores'].columns['score']
self.assertEqual(score_column.cql_type, 'blob')
# until CASSANDRA-9920+CASSANDRA-10500 MV updates are only available later with an async event
for i in range(10):
score_mv_column = self.cluster.metadata.keyspaces[self.keyspace_name].views["monthlyhigh"].columns['score']
if "blob" == score_mv_column.cql_type:
break
time.sleep(.2)
self.assertEqual(score_mv_column.cql_type, 'blob')
def test_metadata_with_quoted_identifiers(self):
"""
test to ensure that materialized view metadata is properly constructed when quoted identifiers are used
test_metadata_with_quoted_identifiers tests that materialized views metadata is properly constructed.
It runs a simple query to construct a materialized view, then proceeds to inspect the metadata associated with
that MV. The caveat here is that the tables and the materialized view both have quoted identifiers
Columns are inspected to insure that all are of the proper type, and in the proper type.
@since 3.0.0
@jira_ticket PYTHON-371
@expected_result Materialized view metadata should be constructed appropriately even with quoted identifiers.
@test_category metadata
"""
create_table = """CREATE TABLE {0}.t1 (
"theKey" int,
"the;Clustering" int,
"the Value" int,
PRIMARY KEY ("theKey", "the;Clustering"))""".format(self.keyspace_name)
self.session.execute(create_table)
create_mv = """CREATE MATERIALIZED VIEW {0}.mv1 AS
SELECT "theKey", "the;Clustering", "the Value"
FROM {0}.t1
WHERE "theKey" IS NOT NULL AND "the;Clustering" IS NOT NULL AND "the Value" IS NOT NULL
PRIMARY KEY ("theKey", "the;Clustering")""".format(self.keyspace_name)
self.session.execute(create_mv)
t1_table = self.cluster.metadata.keyspaces[self.keyspace_name].tables['t1']
mv = self.cluster.metadata.keyspaces[self.keyspace_name].views['mv1']
self.assertIsNotNone(t1_table.views["mv1"])
self.assertIsNotNone(len(t1_table.views), 1)
# Validate partition key, and not null
self.assertEqual(len(t1_table.partition_key), 1)
self.assertIsNotNone(t1_table.columns['theKey'])
self.assertTrue(t1_table.columns['theKey'], t1_table.partition_key[0])
# Validate clustering key column
self.assertEqual(len(t1_table.clustering_key), 1)
self.assertIsNotNone(t1_table.columns['the;Clustering'])
self.assertTrue(t1_table.columns['the;Clustering'], t1_table.clustering_key[0])
# Validate regular column
self.assertIsNotNone(t1_table.columns['the Value'])
# Validate basic mv information
self.assertEqual(mv.keyspace_name, self.keyspace_name)
self.assertEqual(mv.name, "mv1")
self.assertEqual(mv.base_table_name, "t1")
self.assertFalse(mv.include_all_columns)
# Validate that all columns are preset and correct
mv_columns = list(mv.columns.values())
self.assertEqual(len(mv_columns), 3)
theKey_column = mv_columns[0]
self.assertIsNotNone(theKey_column)
self.assertEqual(theKey_column.name, 'theKey')
self.assertEqual(theKey_column, mv.partition_key[0])
cluster_column = mv_columns[1]
self.assertIsNotNone(cluster_column)
self.assertEqual(cluster_column.name, 'the;Clustering')
self.assertEqual(cluster_column.name, mv.clustering_key[0].name)
self.assertEqual(cluster_column.table, mv.clustering_key[0].table)
self.assertEqual(cluster_column.is_static, mv.clustering_key[0].is_static)
self.assertEqual(cluster_column.is_reversed, mv.clustering_key[0].is_reversed)
value_column = mv_columns[2]
self.assertIsNotNone(value_column)
self.assertEqual(value_column.name, 'the Value')
class GroupPerHost(BasicSharedKeyspaceUnitTestCase):
@classmethod
def setUpClass(cls):
cls.common_setup(rf=1, create_class_table=True)
cls.table_two_pk = "table_with_two_pk"
cls.session.execute(
'''
CREATE TABLE {0}.{1} (
k_one int,
k_two int,
v int,
PRIMARY KEY ((k_one, k_two))
)'''.format(cls.ks_name, cls.table_two_pk)
)
def test_group_keys_by_host(self):
"""
Test to ensure group_keys_by_host functions as expected. It is tried
with a table with a single field for the partition key and a table
with two fields for the partition key
@since 3.13
@jira_ticket PYTHON-647
@expected_result group_keys_by_host return the expected value
@test_category metadata
"""
stmt = """SELECT * FROM {}.{}
WHERE k_one = ? AND k_two = ? """.format(self.ks_name, self.table_two_pk)
keys = ((1, 2), (2, 2), (2, 3), (3, 4))
self._assert_group_keys_by_host(keys, self.table_two_pk, stmt)
stmt = """SELECT * FROM {}.{}
WHERE k = ? """.format(self.ks_name, self.ks_name)
keys = ((1, ), (2, ), (2, ), (3, ))
self._assert_group_keys_by_host(keys, self.ks_name, stmt)
def _assert_group_keys_by_host(self, keys, table_name, stmt):
keys_per_host = group_keys_by_replica(self.session, self.ks_name, table_name, keys)
self.assertNotIn(NO_VALID_REPLICA, keys_per_host)
prepared_stmt = self.session.prepare(stmt)
for key in keys:
routing_key = prepared_stmt.bind(key).routing_key
hosts = self.cluster.metadata.get_replicas(self.ks_name, routing_key)
self.assertEqual(1, len(hosts)) # RF is 1 for this keyspace
self.assertIn(key, keys_per_host[hosts[0]])
|
the-stack_0_15533 | # Download images and labels related to the validation/test set in the dataset
import os
import cv2
import shutil
import argparse
from PIL import Image
parser = argparse.ArgumentParser()
parser.add_argument('--inp', type = str, help = 'Input path.')
parser.add_argument('--out', type = str, help = 'Output path.')
parser.add_argument('--label', type = str, help = 'Image labels.')
opt = parser.parse_args()
print(opt)
file = open(opt.label)
valid_set = list()
for line in file.readlines():
valid_set.append(line.split('/')[-1].split('\n')[0])
n_images = 0
try:
os.mkdir(opt.out)
except:
pass
for subdir, dirs, files in os.walk(opt.inp):
for file in sorted(files):
if file.endswith(('.jpg')) and file in valid_set:
img = Image.open(opt.inp + file)
img = img.convert('RGB')
img.save(opt.out + os.sep + file)
shutil.copy(opt.inp + os.sep + str(file).split('.')[0] + '.txt', opt.out)
n_images += 1
print(n_images, 'images.') |
the-stack_0_15535 |
import random
import sys
from datetime import datetime
import torch
import numpy as np
import os
import logging
import torch.utils.data as data
import json
def seed_all_rng(seed=None):
"""
Set the random seed for the RNG in torch, numpy and python.
Args:
seed (int): if None, will use a strong random seed.
"""
if seed is None:
seed = (
os.getpid()
+ int(datetime.now().strftime("%S%f"))
+ int.from_bytes(os.urandom(2), "big")
)
logger = logging.getLogger(__name__)
logger.info("Using a generated random seed {}".format(seed))
np.random.seed(seed)
torch.set_rng_state(torch.manual_seed(seed).get_state())
random.seed(seed)
def worker_init_reset_seed(worker_id):
seed_all_rng(np.random.randint(2 ** 31) + worker_id)
class Label(object):
def __init__(self, gt_bboxes, gt_classes):
self.gt_classes = gt_classes
self.gt_bboxes = gt_bboxes
def __len__(self):
if isinstance(self.gt_classes, list):
return len(self.gt_classes)
elif isinstance(self.gt_classes, torch.Tensor):
return list(self.gt_classes.size())[0]
elif type(self.gt_classes) is np.ndarray:
return self.gt_classes.shape[0]
else:
return 0
# class AspectRatioGroupedDataset(object):
# """
# Batch data that have similar aspect ratio together.
# In this implementation, images whose aspect ratio < (or >) 1 will
# be batched together.
#
# It assumes the underlying dataset produces dicts with "width" and "height" keys.
# It will then produce a list of original dicts with length = batch_size,
# all with similar aspect ratios.
# """
#
# def __init__(self, dataset):
# """
# Args:
# dataset: an iterable. Each element must be a dict with keys
# "width" and "height", which will be used to batch data.
# batch_size (int):
# """
# self.dataset = dataset
# self.batch_size = dataset.batch_size
# self._buckets = [[] for _ in range(2)]
# # Hard-coded two aspect ratio groups: w > h and w < h.
# # Can add support for more aspect ratio groups, but doesn't seem useful
#
# def __iter__(self):
# for d in self.dataset:
# _, h, w = list(d["image"].size())
# bucket_id = 0 if w > h else 1
# bucket = self._buckets[bucket_id]
# bucket.append(d)
# if len(bucket) == self.batch_size:
# yield bucket[:]
# del bucket[:]
class AspectRatioGroupedDataset(object):
"""
Batch data that have similar aspect ratio together.
In this implementation, images whose aspect ratio < (or >) 1 will
be batched together.
It assumes the underlying dataset produces dicts with "width" and "height" keys.
It will then produce a list of original dicts with length = batch_size,
all with similar aspect ratios.
"""
def __init__(self, dataset, batch_size):
"""
Args:
dataset: an iterable. Each element must be a dict with keys
"width" and "height", which will be used to batch data.
batch_size (int):
"""
self.dataset = dataset
self.batch_size = batch_size
# Hard-coded two aspect ratio groups: w > h and w < h.
# Can add support for more aspect ratio groups, but doesn't seem useful
def __iter__(self):
bucket = []
for d in self.dataset:
bucket.append(d)
if len(bucket) == self.batch_size:
yield bucket[:]
bucket = []
"""
Enables writing json with numpy arrays to file
"""
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self,obj)
"""
Class will hold the average dimension for a class, regressed value is the residual
"""
class ClassAverages:
def __init__(self, classes=[]):
self.dimension_map = {}
self.filename = os.path.abspath(os.path.dirname(__file__)) + '/class_averages.txt'
if len(classes) == 0: # eval mode
self.load_items_from_file()
for detection_class in classes:
class_ = detection_class.lower()
if class_ in self.dimension_map.keys():
continue
self.dimension_map[class_] = {}
self.dimension_map[class_]['count'] = 0
self.dimension_map[class_]['total'] = np.zeros(3, dtype=np.double)
def add_item(self, class_, dimension):
class_ = class_.lower()
self.dimension_map[class_]['count'] += 1
self.dimension_map[class_]['total'] += dimension
# self.dimension_map[class_]['total'] /= self.dimension_map[class_]['count']
def get_item(self, class_):
class_ = class_.lower()
return self.dimension_map[class_]['total'] / self.dimension_map[class_]['count']
def dump_to_file(self):
f = open(self.filename, "w")
f.write(json.dumps(self.dimension_map, cls=NumpyEncoder))
f.close()
def load_items_from_file(self):
f = open(self.filename, 'r')
dimension_map = json.load(f)
for class_ in dimension_map:
dimension_map[class_]['total'] = np.asarray(dimension_map[class_]['total'])
self.dimension_map = dimension_map
def recognized_class(self, class_):
return class_.lower() in self.dimension_map |
the-stack_0_15536 | import setuptools
# Reads the content of your README.md into a variable to be used in the setup below
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name='jv_toolbox', # should match the package folder
packages=['jv_toolbox'], # should match the package folder
version='0.0.1', # important for updates
license='MIT', # should match your chosen license
description='Testing installation of Package',
long_description=long_description, # loads your README.md
long_description_content_type="text/markdown", # README.md is of type 'markdown'
author='Jonathan Vlk',
author_email='[email protected]',
url='https://github.com/jgvlk/toolbox_project',
project_urls = { # Optional
"Bug Tracker": "https://github.com/jgvlk/toolbox_project/issues"
},
install_requires=['requests'], # list all packages that your package uses
keywords=["pypi", "jv_toolbox", "tutorial"], #descriptive meta-data
classifiers=[ # https://pypi.org/classifiers
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Documentation',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
download_url="https://github.com/jgvlk/toolbox_project/archive/refs/tags/0.0.1.tar.gz",
) |
the-stack_0_15538 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import struct
import logging
import hashlib
import hmac
ONETIMEAUTH_BYTES = 10
ONETIMEAUTH_CHUNK_BYTES = 12
ONETIMEAUTH_CHUNK_DATA_LEN = 2
def sha1_hmac(secret, data):
return hmac.new(secret, data, hashlib.sha1).digest()
def onetimeauth_verify(_hash, data, key):
return _hash == sha1_hmac(key, data)[:ONETIMEAUTH_BYTES]
def onetimeauth_gen(data, key):
return sha1_hmac(key, data)[:ONETIMEAUTH_BYTES]
def compat_ord(s):
if type(s) == int:
return s
return _ord(s)
def compat_chr(d):
if bytes == str:
return _chr(d)
return bytes([d])
_ord = ord
_chr = chr
ord = compat_ord
chr = compat_chr
def to_bytes(s):
if bytes != str:
if type(s) == str:
return s.encode('utf-8')
return s
def to_str(s):
if bytes != str:
if type(s) == bytes:
return s.decode('utf-8')
return s
def inet_ntop(family, ipstr):
if family == socket.AF_INET:
return to_bytes(socket.inet_ntoa(ipstr))
elif family == socket.AF_INET6:
import re
v6addr = ':'.join(('%02X%02X' % (ord(i), ord(j))).lstrip('0')
for i, j in zip(ipstr[::2], ipstr[1::2]))
v6addr = re.sub('::+', '::', v6addr, count=1)
return to_bytes(v6addr)
def inet_pton(family, addr):
addr = to_str(addr)
if family == socket.AF_INET:
return socket.inet_aton(addr)
elif family == socket.AF_INET6:
if '.' in addr: # a v4 addr
v4addr = addr[addr.rindex(':') + 1:]
v4addr = socket.inet_aton(v4addr)
v4addr = map(lambda x: ('%02X' % ord(x)), v4addr)
v4addr.insert(2, ':')
newaddr = addr[:addr.rindex(':') + 1] + ''.join(v4addr)
return inet_pton(family, newaddr)
dbyts = [0] * 8 # 8 groups
grps = addr.split(':')
for i, v in enumerate(grps):
if v:
dbyts[i] = int(v, 16)
else:
for j, w in enumerate(grps[::-1]):
if w:
dbyts[7 - j] = int(w, 16)
else:
break
break
return b''.join((chr(i // 256) + chr(i % 256)) for i in dbyts)
else:
raise RuntimeError("What family?")
def is_ip(address):
for family in (socket.AF_INET, socket.AF_INET6):
try:
if type(address) != str:
address = address.decode('utf8')
inet_pton(family, address)
return family
except (TypeError, ValueError, OSError, IOError):
pass
return False
def patch_socket():
if not hasattr(socket, 'inet_pton'):
socket.inet_pton = inet_pton
if not hasattr(socket, 'inet_ntop'):
socket.inet_ntop = inet_ntop
patch_socket()
ADDRTYPE_IPV4 = 0x01
ADDRTYPE_IPV6 = 0x04
ADDRTYPE_HOST = 0x03
ADDRTYPE_AUTH = 0x10
ADDRTYPE_MASK = 0xF
def pack_addr(address):
address_str = to_str(address)
address = to_bytes(address)
for family in (socket.AF_INET, socket.AF_INET6):
try:
r = socket.inet_pton(family, address_str)
if family == socket.AF_INET6:
return b'\x04' + r
else:
return b'\x01' + r
except (TypeError, ValueError, OSError, IOError):
pass
if len(address) > 255:
address = address[:255] # TODO
return b'\x03' + chr(len(address)) + address
# add ss header
def add_header(address, port, data=b''):
_data = b''
_data = pack_addr(address) + struct.pack('>H', port) + data
return _data
def parse_header(data):
addrtype = ord(data[0])
dest_addr = None
dest_port = None
header_length = 0
if addrtype & ADDRTYPE_MASK == ADDRTYPE_IPV4:
if len(data) >= 7:
dest_addr = socket.inet_ntoa(data[1:5])
dest_port = struct.unpack('>H', data[5:7])[0]
header_length = 7
else:
logging.warn('header is too short')
elif addrtype & ADDRTYPE_MASK == ADDRTYPE_HOST:
if len(data) > 2:
addrlen = ord(data[1])
if len(data) >= 4 + addrlen:
dest_addr = data[2:2 + addrlen]
dest_port = struct.unpack('>H', data[2 + addrlen:4 +
addrlen])[0]
header_length = 4 + addrlen
else:
logging.warn('header is too short')
else:
logging.warn('header is too short')
elif addrtype & ADDRTYPE_MASK == ADDRTYPE_IPV6:
if len(data) >= 19:
dest_addr = socket.inet_ntop(socket.AF_INET6, data[1:17])
dest_port = struct.unpack('>H', data[17:19])[0]
header_length = 19
else:
logging.warn('header is too short')
else:
logging.warn('unsupported addrtype %d, maybe wrong password or '
'encryption method' % addrtype)
if dest_addr is None:
return None
return addrtype, to_bytes(dest_addr), dest_port, header_length
class IPNetwork(object):
ADDRLENGTH = {socket.AF_INET: 32, socket.AF_INET6: 128, False: 0}
def __init__(self, addrs):
self._network_list_v4 = []
self._network_list_v6 = []
if type(addrs) == str:
addrs = addrs.split(',')
list(map(self.add_network, addrs))
def add_network(self, addr):
if addr is "":
return
block = addr.split('/')
addr_family = is_ip(block[0])
addr_len = IPNetwork.ADDRLENGTH[addr_family]
if addr_family is socket.AF_INET:
ip, = struct.unpack("!I", socket.inet_aton(block[0]))
elif addr_family is socket.AF_INET6:
hi, lo = struct.unpack("!QQ", inet_pton(addr_family, block[0]))
ip = (hi << 64) | lo
else:
raise Exception("Not a valid CIDR notation: %s" % addr)
if len(block) is 1:
prefix_size = 0
while (ip & 1) == 0 and ip is not 0:
ip >>= 1
prefix_size += 1
logging.warn("You did't specify CIDR routing prefix size for %s, "
"implicit treated as %s/%d" % (addr, addr, addr_len))
elif block[1].isdigit() and int(block[1]) <= addr_len:
prefix_size = addr_len - int(block[1])
ip >>= prefix_size
else:
raise Exception("Not a valid CIDR notation: %s" % addr)
if addr_family is socket.AF_INET:
self._network_list_v4.append((ip, prefix_size))
else:
self._network_list_v6.append((ip, prefix_size))
def __contains__(self, addr):
addr_family = is_ip(addr)
if addr_family is socket.AF_INET:
ip, = struct.unpack("!I", socket.inet_aton(addr))
return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1],
self._network_list_v4))
elif addr_family is socket.AF_INET6:
hi, lo = struct.unpack("!QQ", inet_pton(addr_family, addr))
ip = (hi << 64) | lo
return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1],
self._network_list_v6))
else:
return False
def test_inet_conv():
ipv4 = b'8.8.4.4'
b = inet_pton(socket.AF_INET, ipv4)
assert inet_ntop(socket.AF_INET, b) == ipv4
ipv6 = b'2404:6800:4005:805::1011'
b = inet_pton(socket.AF_INET6, ipv6)
assert inet_ntop(socket.AF_INET6, b) == ipv6
def test_parse_header():
assert parse_header(b'\x03\x0ewww.google.com\x00\x50') == \
(3, b'www.google.com', 80, 18)
assert parse_header(b'\x01\x08\x08\x08\x08\x00\x35') == \
(1, b'8.8.8.8', 53, 7)
assert parse_header((b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00'
b'\x00\x10\x11\x00\x50')) == \
(4, b'2404:6800:4005:805::1011', 80, 19)
def test_pack_header():
assert pack_addr(b'8.8.8.8') == b'\x01\x08\x08\x08\x08'
assert pack_addr(b'2404:6800:4005:805::1011') == \
b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00\x00\x10\x11'
assert pack_addr(b'www.google.com') == b'\x03\x0ewww.google.com'
def test_ip_network():
ip_network = IPNetwork('127.0.0.0/24,::ff:1/112,::1,192.168.1.1,192.0.2.0')
assert '127.0.0.1' in ip_network
assert '127.0.1.1' not in ip_network
assert ':ff:ffff' in ip_network
assert '::ffff:1' not in ip_network
assert '::1' in ip_network
assert '::2' not in ip_network
assert '192.168.1.1' in ip_network
assert '192.168.1.2' not in ip_network
assert '192.0.2.1' in ip_network
assert '192.0.3.1' in ip_network # 192.0.2.0 is treated as 192.0.2.0/23
assert 'www.google.com' not in ip_network
if __name__ == '__main__':
test_inet_conv()
test_parse_header()
test_pack_header()
test_ip_network()
|
the-stack_0_15539 | # Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cassandra.graph import Vertex, Edge
from tests.integration.advanced.graph import (
validate_classic_vertex, validate_classic_edge, validate_generic_vertex_result_type,
validate_classic_edge_properties, validate_line_edge,
validate_generic_edge_result_type, validate_path_result_type)
from tests.integration import requiredse, DSE_VERSION
from tests.integration.advanced import use_single_node_with_graph
from tests.integration.advanced.graph import GraphTestConfiguration
from tests.integration.advanced.graph.fluent import (
BaseExplicitExecutionTest, _AbstractTraversalTest, _validate_prop)
def setup_module():
if DSE_VERSION:
dse_options = {'graph': {'realtime_evaluation_timeout_in_seconds': 60}}
use_single_node_with_graph(dse_options=dse_options)
@requiredse
@GraphTestConfiguration.generate_tests(traversal=True)
class ExplicitExecutionTest(BaseExplicitExecutionTest, _AbstractTraversalTest):
"""
This test class will execute all tests of the AbstractTraversalTestClass using Explicit execution
All queries will be run by converting them to byte code, and calling execute graph explicitly with a generated ep.
"""
@staticmethod
def fetch_key_from_prop(property):
return property.label
def _validate_classic_vertex(self, g, vertex):
validate_classic_vertex(self, vertex)
def _validate_generic_vertex_result_type(self, g, vertex):
validate_generic_vertex_result_type(self, vertex)
def _validate_classic_edge_properties(self, g, edge):
validate_classic_edge_properties(self, edge)
def _validate_classic_edge(self, g, edge):
validate_classic_edge(self, edge)
def _validate_line_edge(self, g, edge):
validate_line_edge(self, edge)
def _validate_generic_edge_result_type(self, edge):
validate_generic_edge_result_type(self, edge)
def _validate_type(self, g, vertex):
for key in vertex.properties:
value = vertex.properties[key][0].value
_validate_prop(key, value, self)
def _validate_path_result_type(self, g, path_obj):
# This pre-processing is due to a change in TinkerPop
# properties are not returned automatically anymore
# with some queries.
for obj in path_obj.objects:
if not obj.properties:
props = []
if isinstance(obj, Edge):
obj.properties = {
p.key: p.value
for p in self.fetch_edge_props(g, obj)
}
elif isinstance(obj, Vertex):
obj.properties = {
p.label: p.value
for p in self.fetch_vertex_props(g, obj)
}
validate_path_result_type(self, path_obj)
def _validate_meta_property(self, g, vertex):
self.assertEqual(len(vertex.properties), 1)
self.assertEqual(len(vertex.properties['key']), 1)
p = vertex.properties['key'][0]
self.assertEqual(p.label, 'key')
self.assertEqual(p.value, 'meta_prop')
self.assertEqual(p.properties, {'k0': 'v0', 'k1': 'v1'})
|
the-stack_0_15540 | from builtins import range
import tensorflow as tf
import numpy as np
import math
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, "../utils"))
import tf_util
from structural_losses.tf_nndistance import nn_distance
from structural_losses.tf_approxmatch import approx_match
def placeholder_inputs(batch_size, num_point):
pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))
labels_pl = tf.placeholder(tf.int32, shape=(batch_size))
return pointclouds_pl, labels_pl
def get_model(
point_cloud, is_training, num_output_points, bottleneck_size, bn_decay=None
):
""" Classification PointNet, input is BxNx3, output Bx40 """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
input_image = tf.expand_dims(point_cloud, -1)
# Point functions (MLP implemented as conv2d)
net = tf_util.conv2d(
input_image,
64,
[1, 3],
padding="VALID",
stride=[1, 1],
bn=True,
is_training=is_training,
scope="conv1",
bn_decay=bn_decay,
)
net = tf_util.conv2d(
net,
64,
[1, 1],
padding="VALID",
stride=[1, 1],
bn=True,
is_training=is_training,
scope="conv2",
bn_decay=bn_decay,
)
net = tf_util.conv2d(
net,
64,
[1, 1],
padding="VALID",
stride=[1, 1],
bn=True,
is_training=is_training,
scope="conv3",
bn_decay=bn_decay,
)
net = tf_util.conv2d(
net,
128,
[1, 1],
padding="VALID",
stride=[1, 1],
bn=True,
is_training=is_training,
scope="conv4",
bn_decay=bn_decay,
)
net = tf_util.conv2d(
net,
bottleneck_size,
[1, 1],
padding="VALID",
stride=[1, 1],
bn=True,
is_training=is_training,
scope="conv5",
bn_decay=bn_decay,
)
net = tf_util.max_pool2d(net, [num_point, 1], padding="VALID", scope="maxpool")
net = tf.reshape(net, [batch_size, -1])
net = tf_util.fully_connected(
net, 256, bn=True, is_training=is_training, scope="fc11b", bn_decay=bn_decay
)
net = tf_util.fully_connected(
net, 256, bn=True, is_training=is_training, scope="fc12b", bn_decay=bn_decay
)
net = tf_util.fully_connected(
net, 256, bn=True, is_training=is_training, scope="fc13b", bn_decay=bn_decay
)
net = tf_util.fully_connected(
net,
3 * num_output_points,
bn=True,
is_training=is_training,
scope="fc14b",
bn_decay=bn_decay,
activation_fn=None,
)
out_point_cloud = tf.reshape(net, [batch_size, -1, 3])
return out_point_cloud
def calc_distances(p0, points):
return ((p0 - points) ** 2).sum(axis=1)
def fps_from_given_pc(pts, k, given_pc):
farthest_pts = np.zeros((k, 3))
t = np.size(given_pc) // 3
farthest_pts[0:t] = given_pc
distances = calc_distances(farthest_pts[0], pts)
for i in range(1, t):
distances = np.minimum(distances, calc_distances(farthest_pts[i], pts))
for i in range(t, k):
farthest_pts[i] = pts[np.argmax(distances)]
distances = np.minimum(distances, calc_distances(farthest_pts[i], pts))
return farthest_pts
def unique(arr):
_, idx = np.unique(arr, return_index=True)
return arr[np.sort(idx)]
def nn_matching(full_pc, idx, k, complete_fps=True):
batch_size = np.size(full_pc, 0)
out_pc = np.zeros((full_pc.shape[0], k, 3))
for ii in range(0, batch_size):
best_idx = idx[ii]
if complete_fps:
best_idx = unique(best_idx)
out_pc[ii] = fps_from_given_pc(full_pc[ii], k, full_pc[ii][best_idx])
else:
out_pc[ii] = full_pc[ii][best_idx]
return out_pc[:, 0:k, :]
def emd_matching(full_pc, gen_pc, sess):
batch_size = np.size(full_pc, 0)
k = np.size(gen_pc, 1)
out_pc = np.zeros_like(gen_pc)
match_mat_tensor = approx_match(
tf.convert_to_tensor(full_pc), tf.convert_to_tensor(gen_pc)
)
pc1_match_idx_tensor = tf.cast(tf.argmax(match_mat_tensor, axis=2), dtype=tf.int32)
pc1_match_idx = pc1_match_idx_tensor.eval(session=sess)
for ii in range(0, batch_size):
best_idx = unique(pc1_match_idx[ii])
out_pc[ii] = fps_from_given_pc(full_pc[ii], k, full_pc[ii][best_idx])
return out_pc
def get_nn_indices(ref_pc, samp_pc):
_, idx, _, _ = nn_distance(samp_pc, ref_pc)
return idx
def get_simplification_loss(ref_pc, samp_pc, pc_size, gamma=1, delta=0):
cost_p1_p2, _, cost_p2_p1, _ = nn_distance(samp_pc, ref_pc)
max_cost = tf.reduce_max(cost_p1_p2, axis=1)
max_cost = tf.reduce_mean(max_cost)
cost_p1_p2 = tf.reduce_mean(cost_p1_p2)
cost_p2_p1 = tf.reduce_mean(cost_p2_p1)
loss = cost_p1_p2 + max_cost + (gamma + delta * pc_size) * cost_p2_p1
tf.summary.scalar("cost_p1_p2", cost_p1_p2)
tf.summary.scalar("cost_p2_p1", cost_p2_p1)
tf.summary.scalar("max_cost", max_cost)
return loss
|
the-stack_0_15544 | import os
import warnings
import numpy as np
import pytorch_lightning as pl
import toml
import torch
import torch.nn.functional as F
import wandb
from pytorch_lightning.callbacks import ModelCheckpoint
from torch import nn
from torch import optim
from torchvision import models
from torchvision.models._utils import IntermediateLayerGetter
from torchvision.models.segmentation.deeplabv3 import DeepLabHead, DeepLabV3
from torchvision.models.segmentation.fcn import FCNHead, FCN
from data.data_palm import get_palm_loaders
from models.resnet_unet import UNetWithResnet50Encoder
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
torch.multiprocessing.set_sharing_strategy("file_system")
warnings.filterwarnings("ignore", category=UserWarning)
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# set to False if not using wandb
WANDB = True
if WANDB:
from pytorch_lightning.loggers import WandbLogger
CHECKPOINT_PATH = None
CHECKPOINTS_BASE_PATH = toml.load("paths.toml")["CHECKPOINTS_BASE_PATH"]
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "supervised_baseline/last.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "cm_r50_raw_risks_burdens_inner_none/last.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "cm_r50_raw_risks_burdens_inner_h1/last.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "cm_r50_raw_risks_burdens_outer_none/last.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "cm_r50_raw_risks_burdens_outer_h1/last.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "cm_r50_raw_risks_burdens_outer_h12/last.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "cm_r50_raw_snps_none/last.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "cm_r50_raw_snps_h1/last.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "cm_r50_raw_snps_h12/last.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "cm_r50_risk_scores_gen_none/last.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "cm_r50_risk_scores_gen_h1/last.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "cm_r50_risk_scores_gen_h12/last.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "cm_r50_burden_scores_gen_none/last.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "cm_r50_burden_scores_gen_h1/last.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "cm_r50_burden_scores_gen_h12/last.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "barlow_r50_proj128/epoch_99-step_170399.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "byol_r50_proj128/epoch_99-step_170399.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "simsiam_r50_proj128/epoch_99-step_170399.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "simclr_r50_proj128/epoch_99-step_170399.ckpt"
# CHECKPOINT_PATH = CHECKPOINTS_BASE_PATH + "nnclr_r50_proj128/epoch_99-step_170399.ckpt"
train_pct = 0.6
val_pct = 0.8 - train_pct
loader_param = {
"batch_size": 4,
"size": 448,
"joint_mask": True,
"train_pct": train_pct,
"val_pct": val_pct,
}
accumulate_grad_batches = 16
n_classes = 2
epochs = 50
warmup_epochs = 10 # if set to 0, fine-tune in all epochs
lr = 1e-3
dice_weight = 0.8
bce_weight = 0.2
seg_model_name = "unet" # "fcn" or "deeplabv3" or "unet"
basemodel = models.resnet50
pretrained_imagenet = False
set_scheduler = "none" # "none" or "steplr" or "onecycle" or "reduceplat"
# optimizer = "sgd"
# optimizer_dict = dict(weight_decay=5e-4, momentum=0.9, nesterov=True)
optimizer = "adam"
optimizer_dict = dict(weight_decay=1e-5)
pl.seed_everything(42, workers=True)
def dice(y, y_pred):
intersection = np.sum(y_pred * y) * 2.0
return intersection / (np.sum(y_pred) + np.sum(y))
def load_from_state_dict_supervised(model, state_dict):
"""Loads the model weights from the state dictionary."""
# step 1: filter state dict
model_keys_prefixes = []
for okey, oitem in model.state_dict().items():
model_keys_prefixes.append(okey.split(".")[0])
new_state_dict = {}
index = 0
for key, item in state_dict.items():
# remove the "model." prefix from the state dict key
all_key_parts = [model_keys_prefixes[index]]
all_key_parts.extend(key.split(".")[2:])
index += 1
new_key = ".".join(all_key_parts)
if new_key in model.state_dict() and "fc" not in new_key:
new_state_dict[new_key] = item
# step 2: load from checkpoint
model.load_state_dict(new_state_dict, strict=False)
def load_from_state_dict_gen_img(model, state_dict):
"""Loads the model weights from the state dictionary."""
# step 1: filter state dict
model_keys_prefixes = []
for okey, oitem in model.state_dict().items():
model_keys_prefixes.append(okey.split(".")[0])
new_state_dict = {}
index = 0
for key, item in state_dict.items():
if (
key.startswith("imaging_model")
or key.startswith("model.imaging_model")
or key.startswith("models.0.imaging_model")
):
# remove the "model." prefix from the state dict key
all_key_parts = [model_keys_prefixes[index]]
if key.startswith("imaging_model"):
all_key_parts.extend(key.split(".")[2:])
elif key.startswith("model.imaging_model"):
all_key_parts.extend(key.split(".")[3:])
else:
all_key_parts.extend(key.split(".")[4:])
index += 1
new_key = ".".join(all_key_parts)
if new_key in model.state_dict():
new_state_dict[new_key] = item
# step 2: load from checkpoint
model.load_state_dict(new_state_dict, strict=False)
def load_from_state_dict_img_only(model, state_dict):
"""Loads the model weights from the state dictionary."""
# step 1: filter state dict
model_keys_prefixes = []
for okey, oitem in model.state_dict().items():
model_keys_prefixes.append(okey.split(".")[0])
new_state_dict = {}
index = 0
for key, item in state_dict.items():
if (
(
key.startswith("resnet_simclr")
or key.startswith("resnet_simsiam")
or key.startswith("resnet_barlow_twins")
or key.startswith("resnet_byol")
or key.startswith("resnet_nnclr")
)
and "projection" not in key
and "prediction" not in key
and "momentum" not in key
):
# remove the "model." prefix from the state dict key
all_key_parts = [model_keys_prefixes[index]]
all_key_parts.extend(key.split(".")[3:])
index += 1
new_key = ".".join(all_key_parts)
if new_key in model.state_dict():
new_state_dict[new_key] = item
# step 2: load from checkpoint
model.load_state_dict(new_state_dict, strict=False)
class Model(pl.LightningModule):
def __init__(
self,
n_output,
loss_fct,
base_model=models.resnet50,
seg_model_name="fcn", # can be "fcn" or "deeplabv3" or "unet"
pretrained=True,
lr=1e-3,
total_steps=0,
set_scheduler="none",
opt_method="adam",
opt_param=dict(),
):
super().__init__()
self.lr = lr
self.total_steps = total_steps
self.loss_fct = loss_fct
self.set_scheduler = set_scheduler
if CHECKPOINT_PATH is None:
backbone = base_model(pretrained=pretrained)
else:
backbone = base_model(pretrained=pretrained)
state_dict = torch.load(CHECKPOINT_PATH, map_location=DEVICE)
if (
"simclr" in CHECKPOINT_PATH
or "byol" in CHECKPOINT_PATH
or "barlow" in CHECKPOINT_PATH
or "simsiam" in CHECKPOINT_PATH
or "nnclr" in CHECKPOINT_PATH
):
load_from_state_dict_img_only(backbone, state_dict["state_dict"])
elif "supervised" in CHECKPOINT_PATH:
if "state_dict" in state_dict:
load_from_state_dict_supervised(backbone, state_dict["state_dict"])
else:
load_from_state_dict_supervised(backbone, state_dict)
else:
if "state_dict" in state_dict:
load_from_state_dict_gen_img(backbone, state_dict["state_dict"])
else:
load_from_state_dict_gen_img(backbone, state_dict)
if warmup_epochs > 0 and CHECKPOINT_PATH is not None:
for param in backbone.parameters():
param.requires_grad = False
if seg_model_name == "fcn" or seg_model_name == "deeplabv3":
out_layer = "layer4"
out_inplanes = 2048
return_layers = {out_layer: "out"}
backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)
model_map = {
"deeplabv3": (DeepLabHead, DeepLabV3),
"fcn": (FCNHead, FCN),
}
classifier = model_map[seg_model_name][0](out_inplanes, n_output)
base_model = model_map[seg_model_name][1]
self.model = base_model(backbone, classifier, aux_classifier=None)
else:
self.model = UNetWithResnet50Encoder(backbone, n_classes=n_output)
self.opt_method = opt_method
self.opt_param = opt_param
self.labels = []
self.preds = []
def forward(self, x):
return self.model(x)
def configure_optimizers(self):
if self.opt_method == "adam":
optimizer = optim.Adam(self.parameters(), lr=self.lr, **self.opt_param)
elif self.opt_method == "sgd":
optimizer = optim.SGD(self.parameters(), lr=self.lr, **self.opt_param)
else:
raise NotImplementedError(
f"optimization method {self.opt_method} not set up"
)
if self.set_scheduler == "none":
return optimizer
elif self.set_scheduler == "steplr":
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.5)
elif self.set_scheduler == "onecycle":
scheduler = optim.lr_scheduler.OneCycleLR(
optimizer,
max_lr=self.lr,
total_steps=self.total_steps,
)
elif self.set_scheduler == "reduceplat":
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer)
return {
"optimizer": optimizer,
"scheduler": scheduler,
"monitor": "valid_loss",
}
return [optimizer], [scheduler]
def on_train_epoch_start(self) -> None:
if warmup_epochs > 0 and self.current_epoch == warmup_epochs:
if CHECKPOINT_PATH is not None:
for param in self.parameters():
param.requires_grad = True
self.trainer.optimizers[0] = optim.Adam(
self.parameters(), lr=self.lr / 10, **self.opt_param
)
def training_step(self, batch, idx):
x, y = batch
if seg_model_name == "fcn" or seg_model_name == "deeplabv3":
y_hat = self(x)["out"]
else:
y_hat = self(x)
loss = self.loss_fct(y_hat, y)
self.log("train_loss", loss, on_epoch=True)
return loss
def validation_step(self, batch, idx):
x, y = batch
if seg_model_name == "fcn" or seg_model_name == "deeplabv3":
y_hat = self(x)["out"]
else:
y_hat = self(x)
loss = self.loss_fct(y_hat, y)
y_np = y.detach().cpu().numpy()
y_hat_np = F.sigmoid(y_hat).detach().cpu().numpy()
self.store_predictions_labels(y_np, y_hat_np)
self.log("valid_loss", loss, on_epoch=True, prog_bar=True)
if idx == 0:
self.display_batch_imgs(x, y_hat_np, y_np, title="val images")
return loss
def test_step(self, batch, idx):
x, y = batch
if seg_model_name == "fcn" or seg_model_name == "deeplabv3":
y_hat = self(x)["out"]
else:
y_hat = self(x)
loss = self.loss_fct(y_hat, y)
y_np = y.detach().cpu().numpy()
y_hat_np = F.sigmoid(y_hat).detach().cpu().numpy()
self.store_predictions_labels(y_np, y_hat_np)
self.log("test_loss", loss, on_epoch=True, prog_bar=True)
self.display_batch_imgs(x, y_hat_np, y_np, title="test images")
return loss
def on_validation_epoch_end(self) -> None:
y = np.concatenate(self.labels).ravel()
y_hat = np.concatenate(self.preds).ravel()
self.log(
"valid_dice",
dice(y, y_hat),
)
self.labels = []
self.preds = []
def on_test_epoch_end(self) -> None:
y = np.concatenate(self.labels).ravel()
y_hat = np.concatenate(self.preds).ravel()
self.log(
"test_dice",
dice(y, y_hat),
)
self.labels = []
self.preds = []
def store_predictions_labels(self, y, y_hat):
self.labels.append(y)
self.preds.append(y_hat)
def display_batch_imgs(self, x, y_hat_np, y_np, title="val images"):
mask_list = []
for original_image, true_mask, prediction_mask in zip(x, y_np, y_hat_np):
mask_list.append(
wandb.Image(
original_image.cpu(),
masks={
"prediction": {
"mask_data": np.argmax(prediction_mask, axis=0),
"class_labels": {0: "background", 1: "foreground"},
},
"ground truth": {
"mask_data": np.argmax(true_mask, axis=0),
"class_labels": {0: "background", 1: "foreground"},
},
},
)
)
self.logger.experiment.log({title: mask_list})
class DiceLoss(nn.Module):
def __init__(self):
super(DiceLoss, self).__init__()
def forward(self, inputs, targets, smooth=1):
# comment out if your model contains a sigmoid or equivalent activation layer
inputs = F.sigmoid(inputs)
# flatten label and prediction tensors
inputs = inputs.view(-1)
targets = targets.view(-1)
intersection = (inputs * targets).sum()
dice = (2.0 * intersection + smooth) / (inputs.sum() + targets.sum() + smooth)
return 1 - dice
loaders = get_palm_loaders(**loader_param)
tl, vl, ttl = loaders
bce_fn = torch.nn.BCEWithLogitsLoss()
dice_fn = DiceLoss()
def loss_fn(y_pred, y_true):
bce = bce_fn(y_pred, y_true)
dice = dice_fn(y_pred, y_true)
return bce_weight * bce + dice_weight * dice
use_sch = set_scheduler != "none"
total_steps = epochs * len(tl) if use_sch else 0
model = (
Model(
n_classes,
loss_fct=loss_fn,
base_model=basemodel,
lr=lr,
total_steps=total_steps,
pretrained=pretrained_imagenet,
set_scheduler=set_scheduler,
opt_method=optimizer,
opt_param=optimizer_dict,
seg_model_name=seg_model_name,
)
.cuda()
.train()
)
logger = None
if WANDB:
logger = WandbLogger(project="PALM_myopia_segmentation")
params = {
"epochs": epochs,
"train_pct": train_pct,
"lr": lr,
"scheduler": set_scheduler,
"base_model": basemodel.__name__,
"img_size": tl.dataset[0][0].shape[-1],
"bs": tl.batch_size,
"accumulate_grad_batches": accumulate_grad_batches,
"seg_model_name": seg_model_name,
}
logger.log_hyperparams(params)
trainer = pl.Trainer(
gpus=1,
deterministic=True,
max_epochs=epochs,
logger=logger if WANDB else True,
accumulate_grad_batches=accumulate_grad_batches,
callbacks=[
ModelCheckpoint(
monitor="valid_loss",
filename="model-{epoch:02d}-{valid_dice:.2f}",
save_top_k=1,
),
],
)
trainer.validate(model, dataloaders=vl)
trainer.fit(model, tl, vl)
result = trainer.test(dataloaders=ttl, ckpt_path="best")
|
the-stack_0_15547 | ############################################################################## Setup
"""
1D Bayesian Optimization Test:
(1) Gemerate 1D objective.
(2) Initialize with data.
(3) Test predictions, variance estimation, and sampling.
(4) Run single iteration of each acquisition function.
"""
# Imports
import numpy as np
import pandas as pd
from edbo.bro import BO_express
from edbo.pd_utils import to_torch, torch_to_numpy
import matplotlib.pyplot as plt
import random
############################################################################## Test Functions
# Objective
def random_result(*kwargs):
"""Random objective."""
return round(random.random(),3) * 100
# Test a precomputed objective
def BO_pred(acq_func, plot=False, return_='pred', append=False, init='rand'):
# Define reaction space and auto-encode
n_ligands = random.sample([3,4,5,6,7,8], 1)[0]
ligands = pd.read_csv('ligands.csv').sample(n_ligands).values.flatten()
bases = ['DBU', 'MTBD', 'potassium carbonate', 'potassium phosphate', 'potassium tert-butoxide']
reaction_components={'aryl_halide':['chlorobenzene','iodobenzene','bromobenzene'],
'base':bases,
'solvent':['THF', 'Toluene', 'DMSO', 'DMAc'],
'ligand':ligands,
'concentration':[0.1, 0.2, 0.3],
'temperature': [20, 30, 40]
}
encoding={
'aryl_halide':'resolve',
'base':'resolve',
'solvent':'resolve',
'ligand':'mordred',
'concentration':'numeric',
'temperature':'numeric'}
# Instatiate BO class
bo = BO_express(reaction_components=reaction_components,
encoding=encoding,
acquisition_function=acq_func,
init_method=init,
batch_size=random.sample(range(30),1)[0],
computational_objective=random_result,
target='yield')
bo.init_sample(append=True)
bo.run(append=append)
bo.save()
bo = BO_express()
bo.load()
# Check prediction
if return_ == 'pred':
try:
bo.model.predict(to_torch(bo.obj.domain)) # torch.tensor
bo.model.predict(bo.obj.domain.values) # numpy.array
bo.model.predict(list(bo.obj.domain.values)) # list
bo.model.predict(bo.obj.domain) # pandas.DataFrame
except:
return False
return True
# Check predictive postrior variance
elif return_ == 'var':
try:
bo.model.predict(to_torch(bo.obj.domain)) # torch.tensor
bo.model.predict(bo.obj.domain.values) # numpy.array
bo.model.predict(list(bo.obj.domain.values)) # list
bo.model.predict(bo.obj.domain) # pandas.DataFrame
except:
return False
return True
# Make sure sampling works with tensors, arrays, lists, and DataFrames
elif return_ == 'sample':
try:
bo.model.sample_posterior(to_torch(bo.obj.domain)) # torch.tensor
bo.model.sample_posterior(bo.obj.domain.values) # numpy.array
bo.model.sample_posterior(list(bo.obj.domain.values)) # list
bo.model.sample_posterior(bo.obj.domain) # pandas.DataFrame
return True
except:
return False
# Plot model
elif return_ == 'plot':
mean = bo.obj.scaler.unstandardize(bo.model.predict(bo.obj.domain))
std = np.sqrt(bo.model.variance(bo.obj.domain)) * bo.obj.scaler.std * 2
samples = bo.obj.scaler.unstandardize(bo.model.sample_posterior(bo.obj.domain, batch_size=3))
plt.figure(1, figsize=(6,6))
# Model mean and standard deviation
plt.subplot(211)
plt.plot(range(len(mean)), mean, label='GP')
plt.fill_between(range(len(mean)), mean-std, mean+std, alpha=0.4)
# Known results and next selected point
plt.scatter(bo.obj.results_input().index.values, bo.obj.results_input()['yield'], color='black', label='known')
plt.ylabel('f(x)')
# Samples
plt.subplot(212)
for sample in samples:
plt.plot(range(len(mean)), torch_to_numpy(sample))
plt.xlabel('x')
plt.ylabel('Posterior Samples')
plt.show()
return True
elif return_ == 'simulate':
if init != 'external':
bo.init_seq.batch_size = random.sample([2,3,4,5,6,7,8,9,10],1)[0]
bo.simulate(iterations=3)
bo.plot_convergence()
bo.model.regression()
return True
############################################################################## Tests
# Test predicted mean and variance, sampling, and ploting
def test_BO_pred_mean_TS():
assert BO_pred('TS', return_='pred')
def test_BO_var():
assert BO_pred('TS', return_='var')
def test_BO_sample():
assert BO_pred('TS', return_='sample')
def test_BO_plot():
assert BO_pred('TS', return_='plot')
# Test simulations
def test_BO_simulate_TS():
assert BO_pred('TS', return_='simulate')
def test_BO_simulate_EI():
assert BO_pred('EI', return_='simulate')
|
the-stack_0_15548 | """I/O for UCSC Browser Extensible Data (BED)."""
from __future__ import absolute_import, division, print_function
from builtins import map, next
import shlex
import pandas as pd
from Bio.File import as_handle
from .util import report_bad_line
def read_bed(infile):
"""UCSC Browser Extensible Data (BED) format.
A BED file has these columns:
chromosome, start position, end position, [gene, strand, other stuff...]
Coordinate indexing is from 0.
Sets of regions are separated by "track" lines. This function stops reading
after encountering a track line other than the first one in the file.
"""
# ENH: just pd.read_table, skip 'track'
@report_bad_line
def _parse_line(line):
fields = line.split('\t', 6)
chrom, start, end = fields[:3]
gene = (fields[3].rstrip()
if len(fields) >= 4 else '-')
strand = (fields[5].rstrip()
if len(fields) >= 6 else '.')
return chrom, int(start), int(end), gene, strand
def track2track(handle):
try:
firstline = next(handle)
if firstline.startswith("browser "):
# UCSC Genome Browser feature -- ignore it
firstline = next(handle)
except StopIteration:
pass
else:
if not firstline.startswith("track"):
yield firstline
for line in handle:
if line.startswith("track"):
break
yield line
with as_handle(infile, 'rU') as handle:
rows = map(_parse_line, track2track(handle))
return pd.DataFrame.from_records(rows, columns=["chromosome", "start",
"end", "gene", "strand"])
def read_bed3(infile):
"""3-column BED format: chromosome, start, end."""
table = read_bed(infile)
return table.loc[:, ['chromosome', 'start', 'end']]
def read_bed4(infile):
"""4-column BED format: chromosome, start, end, name."""
table = read_bed(infile)
return table.loc[:, ['chromosome', 'start', 'end', 'gene']]
def read_bed6(infile):
"""6-column BED format: chromosome, start, end, name, score, strand."""
return NotImplemented
def parse_bed_track(line):
"""Parse the "name" field of a BED track definition line.
Example:
track name=146793_BastianLabv2_P2_target_region description="146793_BastianLabv2_P2_target_region"
"""
fields = shlex.split(line) # raises ValueError if line is corrupted
assert fields[0] == 'track'
for field in fields[1:]:
if '=' in field:
key, val = field.split('=', 1)
if key == 'name':
return val
raise ValueError("No name defined for this track")
def group_bed_tracks(bedfile):
"""Group the parsed rows in a BED file by track.
Yields (track_name, iterable_of_lines), much like itertools.groupby.
"""
# ENH - make this memory-efficient w/ generators or something
with as_handle(bedfile, 'r') as handle:
curr_track = 'DEFAULT'
curr_lines = []
for line in handle:
if line.startswith('track'):
if curr_lines:
yield curr_track, curr_lines
curr_lines = []
curr_track = parse_bed_track(line)
else:
curr_lines.append(line)
yield curr_track, curr_lines
# _____________________________________________________________________
def write_bed(dframe):
if len(dframe.columns) == 3:
return write_bed3(dframe)
elif len(dframe.columns) == 3:
return write_bed4(dframe)
else:
# Default: BED-like, keep all trailing columns
return dframe
def write_bed3(dframe):
return dframe.loc[:, ["chromosome", "start", "end"]]
def write_bed4(dframe):
dframe = dframe.copy()
if "gene" not in dframe:
dframe["gene"] = '-'
return dframe.loc[:, ["chromosome", "start", "end", "gene"]]
|
the-stack_0_15549 | from __future__ import absolute_import, division, print_function
from xfel.ui import settings_dir
from xfel.ui.db import db_proxy, get_run_path
import os, shutil
known_job_statuses = ["DONE", "ERR", "PEND", "RUN", "SUSP", "PSUSP", "SSUSP", "UNKWN", "EXIT", "DONE", "ZOMBI", "DELETED", "SUBMIT_FAIL", "SUBMITTED", "HOLD"]
finished_job_statuses = ["DONE", "EXIT", "DELETED", "UNKWN", "ERR", "SUBMIT_FAIL"]
class JobFactory(object):
@staticmethod
def from_job(job):
if job.task_id is None:
return IndexingJob(job.app, job.id, **job._db_dict)
task = job.app.get_task(job.task_id)
if task.type == "indexing":
return IndexingJob(job.app, job.id, **job._db_dict)
if task.type == "ensemble_refinement":
return EnsembleRefinementJob(job.app, job.id, **job._db_dict)
if task.type == "scaling":
return ScalingJob(job.app, job.id, **job._db_dict)
if task.type == "merging":
return MergingJob(job.app, job.id, **job._db_dict)
@staticmethod
def from_args(app, job_id = None, **kwargs):
return JobFactory.from_job(Job(app, job_id, **kwargs))
class Job(db_proxy):
def __init__(self, app, job_id = None, **kwargs):
db_proxy.__init__(self, app, "%s_job" % app.params.experiment_tag, id = job_id, **kwargs)
self.job_id = self.id
self._run = None
self._rungroup = None
self._trial = None
self._task = None
self._dataset = None
self._dataset_version = None
def __getattr__(self, name):
# Called only if the property cannot be found
if name in ["run", "rungroup", "trial", "task", "dataset", "dataset_version"]:
_name = "_" + name
name_id = name + "_id"
if getattr(self, _name) is None:
if name == "dataset_version":
if self.dataset_id is not None:
self._dataset_version = self.dataset.latest_version # todo bug fix: add this to get_all_jobs
elif getattr(self, name_id) is not None:
setattr(self, _name, getattr(self.app, "get_" + name)(**{name_id:self.trial_id}))
return getattr(self, _name)
elif name == "scope":
return task_scope[task_types.index(self.type)]
else:
return super(Job, self).__getattr__(name)
def __setattr__(self, name, value):
if name in ["run", "rungroup", "trial", "task", "dataset", "dataset_version"]:
setattr(self, "_"+name, value)
else:
super(Job, self).__setattr__(name, value)
def get_log_path(self):
run_path = get_run_path(self.app.params.output_folder, self.trial, self.rungroup, self.run)
return os.path.join(run_path, "stdout", "log.out")
def submit(self, previous_job = None):
raise NotImplementedError("Override me!")
def delete(self, output_only=False):
raise NotImplementedError("Override me!")
def get_output_files(self):
# Retrun folder and experiment and reflection table suffixes
raise NotImplementedError("Override me!")
def remove_from_db(self):
assert self.status == "DELETED"
print("Removing job %d from the db"%self.id, end=' ')
tag = self.app.params.experiment_tag
query = """DELETE job FROM `%s_job` job
WHERE job.id = %d""" % (
tag, self.id)
cursor = self.app.execute_query(query, commit=True)
print("(%d)"%cursor.rowcount)
def get_identifier_string(self):
if self.app.params.facility.name == 'lcls':
s = "%s_%s_r%04d_t%03d_rg%03d"% \
(self.app.params.facility.lcls.experiment, self.app.params.experiment_tag, int(self.run.run), self.trial.trial, self.rungroup.id)
else:
s = "%s_%s_t%03d_rg%03d"% \
(self.app.params.experiment_tag, self.run.run, self.trial.trial, self.rungroup.id)
if self.task is not None:
s += "_task%03d"%self.task.id
return s
class IndexingJob(Job):
def get_output_files(self):
run_path = str(get_run_path(self.app.params.output_folder, self.trial, self.rungroup, self.run))
return os.path.join(run_path, 'out'), '_integrated.expt', '_integrated.refl'
def submit(self, previous_job = None):
import libtbx.load_env
configs_dir = os.path.join(settings_dir, "cfgs")
if not os.path.exists(configs_dir):
os.makedirs(configs_dir)
identifier_string = self.get_identifier_string()
target_phil_path = os.path.join(configs_dir, identifier_string + "_params.phil")
dispatcher = self.app.params.dispatcher
phil_str = self.trial.target_phil_str
if phil_str is None: phil_str = ""
if self.rungroup.extra_phil_str is not None:
phil_str += "\n" + self.rungroup.extra_phil_str
from xfel.ui import load_phil_scope_from_dispatcher
if dispatcher == "cxi.xtc_process":
image_format = 'pickle'
else:
orig_phil_scope = load_phil_scope_from_dispatcher(dispatcher)
if os.path.isfile(dispatcher):
dispatcher = 'libtbx.python ' + dispatcher
from iotbx.phil import parse
if self.rungroup.two_theta_low is not None or self.rungroup.two_theta_high is not None:
override_str = """
radial_average {
enable = True
show_plots = False
verbose = False
output_bins = False
}
"""
phil_scope = orig_phil_scope.fetch(parse(override_str))
else:
phil_scope = orig_phil_scope
trial_params = phil_scope.fetch(parse(phil_str)).extract()
image_format = self.rungroup.format
if image_format == 'cbf':
if "rayonix" in self.rungroup.detector_address.lower():
mode = "rayonix"
elif "cspad" in self.rungroup.detector_address.lower():
mode = "cspad"
elif "jungfrau" in self.rungroup.detector_address.lower():
mode = "jungfrau"
else:
mode = "other"
if hasattr(trial_params, 'format'):
trial_params.format.file_format = image_format
trial_params.format.cbf.mode = mode
if self.rungroup.calib_dir is not None or self.rungroup.config_str is not None or dispatcher == 'cxi.xtc_process' or image_format == 'pickle':
config_path = os.path.join(configs_dir, identifier_string + ".cfg")
else:
config_path = None
if hasattr(trial_params.dispatch, 'process_percent'):
trial_params.dispatch.process_percent = self.trial.process_percent
# Dictionary for formating the submit phil and, if used, the labelit cfg file
d = dict(
# Generally for the LABELIT backend or image pickles
address = self.rungroup.detector_address,
default_calib_dir = libtbx.env.find_in_repositories("xfel/metrology/CSPad/run4/CxiDs1.0_Cspad.0"),
dark_avg_path = self.rungroup.dark_avg_path,
dark_stddev_path = self.rungroup.dark_stddev_path,
untrusted_pixel_mask_path = self.rungroup.untrusted_pixel_mask_path,
detz_parameter = self.rungroup.detz_parameter,
gain_map_path = self.rungroup.gain_map_path,
gain_mask_level = self.rungroup.gain_mask_level,
beamx = self.rungroup.beamx,
beamy = self.rungroup.beamy,
energy = self.rungroup.energy,
binning = self.rungroup.binning,
two_theta_low = self.rungroup.two_theta_low,
two_theta_high = self.rungroup.two_theta_high,
# Generally for job submission
dry_run = self.app.params.dry_run,
dispatcher = dispatcher,
cfg = config_path,
experiment = self.app.params.facility.lcls.experiment, # LCLS specific parameter
run_num = self.run.run,
output_dir = self.app.params.output_folder,
use_ffb = self.app.params.facility.lcls.use_ffb, # LCLS specific parameter
# Generally for both
trial = self.trial.trial,
rungroup = self.rungroup.rungroup_id,
experiment_tag = self.app.params.experiment_tag,
calib_dir = self.rungroup.calib_dir,
nproc = self.app.params.mp.nproc,
nproc_per_node = self.app.params.mp.nproc_per_node,
queue = self.app.params.mp.queue or None,
env_script = self.app.params.mp.env_script[0] if self.app.params.mp.env_script is not None and len(self.app.params.mp.env_script) > 0 and len(self.app.params.mp.env_script[0]) > 0 else None,
method = self.app.params.mp.method,
htcondor_executable_path = self.app.params.mp.htcondor.executable_path,
target = target_phil_path,
host = self.app.params.db.host,
dbname = self.app.params.db.name,
user = self.app.params.db.user,
port = self.app.params.db.port,
# always use mpi for 'lcls'
use_mpi = self.app.params.mp.method != 'local' or (self.app.params.mp.method == 'local' and self.app.params.facility.name == 'lcls')
)
if self.app.params.db.password is not None and len(self.app.params.db.password) == 0:
d['password'] = None
else:
d['password'] = self.app.params.db.password
phil = open(target_phil_path, "w")
if dispatcher == 'cxi.xtc_process':
phil.write(phil_str)
else:
extra_scope = None
if hasattr(trial_params, 'format'):
if image_format == "cbf":
trial_params.input.address = self.rungroup.detector_address
trial_params.format.cbf.detz_offset = self.rungroup.detz_parameter
trial_params.format.cbf.override_energy = self.rungroup.energy
trial_params.format.cbf.invalid_pixel_mask = self.rungroup.untrusted_pixel_mask_path
if mode == 'cspad':
trial_params.format.cbf.cspad.gain_mask_value = self.rungroup.gain_mask_level
elif mode == 'rayonix':
trial_params.format.cbf.rayonix.bin_size = self.rungroup.binning
trial_params.format.cbf.rayonix.override_beam_x = self.rungroup.beamx
trial_params.format.cbf.rayonix.override_beam_y = self.rungroup.beamy
if trial_params.input.known_orientations_folder is not None:
trial_params.input.known_orientations_folder = trial_params.input.known_orientations_folder.format(run=self.run.run)
else:
if trial_params.spotfinder.lookup.mask is None:
trial_params.spotfinder.lookup.mask = self.rungroup.untrusted_pixel_mask_path
if trial_params.integration.lookup.mask is None:
trial_params.integration.lookup.mask = self.rungroup.untrusted_pixel_mask_path
if self.app.params.facility.name == 'lcls':
locator_path = os.path.join(configs_dir, identifier_string + ".loc")
locator = open(locator_path, 'w')
locator.write("experiment=%s\n"%self.app.params.facility.lcls.experiment) # LCLS specific parameter
locator.write("run=%s\n"%self.run.run)
locator.write("detector_address=%s\n"%self.rungroup.detector_address)
if self.rungroup.wavelength_offset:
locator.write("wavelength_offset=%s\n"%self.rungroup.wavelength_offset)
if self.app.params.facility.lcls.use_ffb:
locator.write("use_ffb=True\n")
if image_format == "cbf":
if mode == 'rayonix':
from xfel.cxi.cspad_ana import rayonix_tbx
pixel_size = rayonix_tbx.get_rayonix_pixel_size(self.rungroup.binning)
extra_scope = parse("geometry { detector { panel { origin = (%f, %f, %f) } } }"%(-self.rungroup.beamx * pixel_size,
self.rungroup.beamy * pixel_size,
-self.rungroup.detz_parameter))
locator.write("rayonix.bin_size=%s\n"%self.rungroup.binning)
elif mode == 'cspad':
locator.write("cspad.detz_offset=%s\n"%self.rungroup.detz_parameter)
locator.close()
d['locator'] = locator_path
else:
d['locator'] = None
if self.rungroup.two_theta_low is not None or self.rungroup.two_theta_high is not None:
try:
trial_params.radial_average.two_theta_low = self.rungroup.two_theta_low
trial_params.radial_average.two_theta_high = self.rungroup.two_theta_high
except AttributeError:
pass # not all dispatchers support radial averaging
working_phil = phil_scope.format(python_object=trial_params)
if extra_scope:
working_phil = working_phil.fetch(extra_scope)
diff_phil = orig_phil_scope.fetch_diff(source=working_phil)
phil.write(diff_phil.as_str())
phil.close()
if config_path is not None:
if dispatcher != 'cxi.xtc_process':
d['untrusted_pixel_mask_path'] = None # Don't pass a pixel mask to mod_image_dict as it will
# will be used during dials processing directly
config_str = "[psana]\n"
if self.rungroup.calib_dir is not None:
config_str += "calib-dir=%s\n"%self.rungroup.calib_dir
modules = []
if self.rungroup.config_str is not None:
for line in self.rungroup.config_str.split("\n"):
if line.startswith('['):
modules.append(line.lstrip('[').rstrip(']'))
if dispatcher == 'cxi.xtc_process':
modules.insert(0, 'my_ana_pkg.mod_radial_average')
modules.extend(['my_ana_pkg.mod_hitfind:index','my_ana_pkg.mod_dump:index'])
elif image_format == 'pickle':
modules.insert(0, 'my_ana_pkg.mod_radial_average')
modules.extend(['my_ana_pkg.mod_image_dict'])
if self.app.params.facility.lcls.dump_shots:
modules.insert(0, 'my_ana_pkg.mod_dump:shot')
if len(modules) > 0:
config_str += "modules = %s\n"%(" ".join(modules))
if self.rungroup.config_str is not None:
config_str += self.rungroup.config_str + "\n"
if dispatcher == 'cxi.xtc_process' or image_format == 'pickle':
d['address'] = d['address'].replace('.','-').replace(':','|') # old style address
if dispatcher == 'cxi.xtc_process':
template = open(os.path.join(libtbx.env.find_in_repositories("xfel/ui/db/cfgs"), "index_all.cfg"))
elif image_format == 'pickle':
template = open(os.path.join(libtbx.env.find_in_repositories("xfel/ui/db/cfgs"), "image_dict.cfg"))
for line in template.readlines():
config_str += line.format(**d)
template.close()
d['address'] = self.rungroup.detector_address
cfg = open(config_path, 'w')
cfg.write(config_str)
cfg.close()
if dispatcher != 'cxi.xtc_process':
d['untrusted_pixel_mask_path'] = self.rungroup.untrusted_pixel_mask_path
submit_phil_path = os.path.join(configs_dir, identifier_string + "_submit.phil")
submit_root = libtbx.env.find_in_repositories("xfel/ui/db/cfgs")
if dispatcher in ['cxi.xtc_process', 'cctbx.xfel.xtc_process']:
template = open(os.path.join(submit_root, "submit_xtc_process.phil"))
else:
test_root = os.path.join(submit_root, "submit_" + dispatcher + ".phil")
if os.path.exists(test_root):
template = open(test_root)
else:
if hasattr(trial_params, 'format'):
template = open(os.path.join(submit_root, "submit_xtc_process.phil"))
else:
template = open(os.path.join(submit_root, "submit_xfel_process.phil"))
phil = open(submit_phil_path, "w")
if dispatcher == 'cxi.xtc_process':
d['target'] = None # any target phil will be in mod_hitfind
for line in template.readlines():
phil.write(line.format(**d))
d['target'] = target_phil_path
template.close()
phil.close()
from xfel.command_line.cxi_mpi_submit import Script as submit_script
args = [submit_phil_path]
if self.app.params.facility.name not in ['lcls']:
args.append(self.run.path)
return submit_script().run(args)
def delete(self, output_only=False):
if self.status not in finished_job_statuses:
print("Job is not finished (status = %s)"%self.status)
return
if self.status == "DELETED":
return
job_folder = get_run_path(self.app.params.output_folder, self.trial, self.rungroup, self.run)
if os.path.exists(job_folder):
print("Deleting job folder for job", self.id)
shutil.rmtree(job_folder)
else:
print("Cannot find job folder (%s)"%job_folder)
# Have to be careful to delete from the tables in the right order
tag = self.app.params.experiment_tag
def delete_and_commit(query):
cursor = self.app.execute_query(query, commit=True)
print("(%d)"%cursor.rowcount)
print("Deleting cell_bin entries", end=' ')
query = """DELETE cell_bin FROM `%s_cell_bin` cell_bin
JOIN `%s_crystal` crystal ON crystal.id = cell_bin.crystal_id
JOIN `%s_experiment` expr ON expr.crystal_id = crystal.id
JOIN `%s_imageset` imgset ON imgset.id = expr.imageset_id
JOIN `%s_imageset_event` ie_e ON ie_e.imageset_id = imgset.id
JOIN `%s_event` evt ON evt.id = ie_e.event_id
WHERE evt.run_id = %d AND evt.trial_id = %d AND evt.rungroup_id = %d""" % (
tag, tag, tag, tag, tag, tag, self.run.id, self.trial.id, self.rungroup.id)
delete_and_commit(query)
ids = {}
for item in "crystal", "beam", "detector":
print("Listing %s ids"%item, end=' ')
query = """SELECT %s.id FROM `%s_%s` %s
JOIN `%s_experiment` expr ON expr.%s_id = %s.id
JOIN `%s_imageset` imgset ON imgset.id = expr.imageset_id
JOIN `%s_imageset_event` ie_e ON ie_e.imageset_id = imgset.id
JOIN `%s_event` evt ON evt.id = ie_e.event_id
WHERE evt.run_id = %d AND evt.trial_id = %d AND evt.rungroup_id = %d""" % (
item, tag, item, item, tag, item, item, tag, tag, tag, self.run.id, self.trial.id, self.rungroup.id)
cursor = self.app.execute_query(query)
item_ids = ["%d"%i[0] for i in cursor.fetchall()]
print("(%d)"%len(item_ids))
ids[item] = ",".join(item_ids)
if len(self.trial.isoforms) == 0:
print("Listing bin entries", end=' ')
query = """SELECT bin.id FROM `%s_bin` bin
JOIN `%s_cell` cell ON bin.cell_id = cell.id
JOIN `%s_crystal` crystal ON crystal.cell_id = cell.id
JOIN `%s_experiment` expr ON expr.crystal_id = crystal.id
JOIN `%s_imageset` imgset ON imgset.id = expr.imageset_id
JOIN `%s_imageset_event` ie_e ON ie_e.imageset_id = imgset.id
JOIN `%s_event` evt ON evt.id = ie_e.event_id
WHERE evt.run_id = %d AND evt.trial_id = %d AND evt.rungroup_id = %d
AND cell.trial_id is NULL""" % (
tag, tag, tag, tag, tag, tag, tag, self.run.id, self.trial.id, self.rungroup.id)
cursor = self.app.execute_query(query)
item_ids = ["%d"%i[0] for i in cursor.fetchall()]
print("(%d)"%len(item_ids))
bin_ids = ",".join(item_ids)
print("Listing cell entries", end=' ')
query = """SELECT cell.id FROM `%s_cell` cell
JOIN `%s_crystal` crystal ON crystal.cell_id = cell.id
JOIN `%s_experiment` expr ON expr.crystal_id = crystal.id
JOIN `%s_imageset` imgset ON imgset.id = expr.imageset_id
JOIN `%s_imageset_event` ie_e ON ie_e.imageset_id = imgset.id
JOIN `%s_event` evt ON evt.id = ie_e.event_id
WHERE evt.run_id = %d AND evt.trial_id = %d AND evt.rungroup_id = %d
AND cell.trial_id IS NULL""" % (
tag, tag, tag, tag, tag, tag, self.run.id, self.trial.id, self.rungroup.id)
cursor = self.app.execute_query(query)
item_ids = ["%d"%i[0] for i in cursor.fetchall()]
print("(%d)"%len(item_ids))
cell_ids = ",".join(item_ids)
print("Deleting experiment entries", end=' ')
query = """DELETE expr FROM `%s_experiment` expr
JOIN `%s_imageset` imgset ON imgset.id = expr.imageset_id
JOIN `%s_imageset_event` ie_e ON ie_e.imageset_id = imgset.id
JOIN `%s_event` evt ON evt.id = ie_e.event_id
WHERE evt.run_id = %d AND evt.trial_id = %d AND evt.rungroup_id = %d""" % (
tag, tag, tag, tag, self.run.id, self.trial.id, self.rungroup.id)
delete_and_commit(query)
for item in "crystal", "beam", "detector":
if len(ids[item]) > 0:
print("Deleting %s entries"%item, end=' ')
query = """DELETE %s FROM `%s_%s` %s
WHERE %s.id IN (%s)""" % (
item, tag, item, item, item, ids[item])
delete_and_commit(query)
if len(self.trial.isoforms) == 0 and len(bin_ids) > 0:
print("Deleting bin entries", end=' ')
query = """DELETE bin FROM `%s_bin` bin
WHERE bin.id IN (%s)""" % (
tag, bin_ids)
delete_and_commit(query)
if len(self.trial.isoforms) == 0 and len(cell_ids) > 0:
print("Deleting cell entries", end=' ')
query = """DELETE cell FROM `%s_cell` cell
WHERE cell.id IN (%s)""" % (
tag, cell_ids)
delete_and_commit(query)
print("Listing imageset entries", end=' ')
query = """SELECT imgset.id FROM `%s_imageset` imgset
JOIN `%s_imageset_event` ie_e ON ie_e.imageset_id = imgset.id
JOIN `%s_event` evt ON evt.id = ie_e.event_id
WHERE evt.run_id = %d AND evt.trial_id = %d AND evt.rungroup_id = %d""" % (
tag, tag, tag, self.run.id, self.trial.id, self.rungroup.id)
cursor = self.app.execute_query(query)
item_ids = ["%d"%i[0] for i in cursor.fetchall()]
print("(%d)"%len(item_ids))
imageset_ids = ",".join(item_ids)
print("Deleting imageset_event entries", end=' ')
query = """DELETE is_e FROM `%s_imageset_event` is_e
JOIN `%s_event` evt ON evt.id = is_e.event_id
WHERE evt.run_id = %d AND evt.trial_id = %d AND evt.rungroup_id = %d""" % (
tag, tag, self.run.id, self.trial.id, self.rungroup.id)
delete_and_commit(query)
if len(imageset_ids) > 0:
print("Deleting imageset entries", end=' ')
query = """DELETE imgset FROM `%s_imageset` imgset
WHERE imgset.id IN (%s)""" % (
tag, imageset_ids)
delete_and_commit(query)
print("Deleting event entries", end=' ')
query = """DELETE evt FROM `%s_event` evt
WHERE evt.run_id = %d AND evt.trial_id = %d AND evt.rungroup_id = %d""" % (
tag, self.run.id, self.trial.id, self.rungroup.id)
delete_and_commit(query)
self.status = "DELETED"
class EnsembleRefinementJob(Job):
def delete(self, output_only=False):
job_folder = get_run_path(self.app.params.output_folder, self.trial, self.rungroup, self.run, self.task)
if os.path.exists(job_folder):
print("Deleting job folder for job", self.id)
shutil.rmtree(job_folder)
else:
print("Cannot find job folder (%s)"%job_folder)
self.status = "DELETED"
def get_output_files(self):
run_path = get_run_path(self.app.params.output_folder, self.trial, self.rungroup, self.run, self.task)
return os.path.join(run_path, 'combine_experiments_t%03d'%self.trial.trial, 'intermediates'), '_reintegrated.expt', '_reintegrated.refl'
def submit(self, previous_job = None):
from xfel.command_line.striping import Script
from xfel.command_line.cxi_mpi_submit import get_submission_id
from libtbx import easy_run
configs_dir = os.path.join(settings_dir, "cfgs")
identifier_string = self.get_identifier_string()
target_phil_path = os.path.join(configs_dir, identifier_string + "_params.phil")
with open(target_phil_path, 'w') as f:
if self.task.parameters:
f.write(self.task.parameters)
path = get_run_path(self.app.params.output_folder, self.trial, self.rungroup, self.run, self.task)
os.mkdir(path)
arguments = """
mp.queue={}
mp.nproc={}
mp.nproc_per_node={}
mp.method={}
{}
mp.use_mpi=False
striping.results_dir={}
striping.trial={}
striping.rungroup={}
striping.run={}
{}
striping.chunk_size=3000
striping.stripe=False
striping.dry_run=True
striping.output_folder={}
reintegration.integration.lookup.mask={}
mp.local.include_mp_in_command=False
""".format(self.app.params.mp.queue if len(self.app.params.mp.queue) > 0 else None,
self.app.params.mp.nproc,
self.app.params.mp.nproc_per_node,
self.app.params.mp.method,
'\n'.join(['mp.env_script={}'.format(p) for p in self.app.params.mp.env_script if p]),
self.app.params.output_folder,
self.trial.trial,
self.rungroup.id,
self.run.run,
target_phil_path,
path,
self.rungroup.untrusted_pixel_mask_path,
).split()
commands = Script(arguments).run()
submission_ids = []
if self.app.params.mp.method == 'local':
self.status = "RUNNING"
for command in commands:
try:
result = easy_run.fully_buffered(command=command)
result.raise_if_errors()
except Exception as e:
if not "Warning: job being submitted without an AFS token." in str(e):
raise e
submission_ids.append(get_submission_id(result, self.app.params.mp.method))
if self.app.params.mp.method == 'local':
self.status = "DONE"
else:
return ",".join(submission_ids)
class ScalingJob(Job):
def delete(self, output_only=False):
job_folder = get_run_path(self.app.params.output_folder, self.trial, self.rungroup, self.run, self.task)
if os.path.exists(job_folder):
print("Deleting job folder for job", self.id)
shutil.rmtree(job_folder)
else:
print("Cannot find job folder (%s)"%job_folder)
self.status = "DELETED"
def get_output_files(self):
run_path = get_run_path(self.app.params.output_folder, self.trial, self.rungroup, self.run, self.task)
return os.path.join(run_path, 'out'), ".expt", ".refl"
def write_submit_phil(self, submit_phil_path, target_phil_path):
import libtbx.load_env
from xfel.ui.db.task import task_types, task_dispatchers
submit_root = libtbx.env.find_in_repositories("xfel/ui/db/cfgs")
d = dict(
dry_run = self.app.params.dry_run,
dispatcher = task_dispatchers[task_types.index(self.task.type)],
run_num = self.run.run,
output_dir = self.app.params.output_folder,
trial = self.trial.trial,
rungroup = self.rungroup.rungroup_id,
task = self.task.id,
nproc = self.app.params.mp.nproc,
nproc_per_node = self.app.params.mp.nproc_per_node,
queue = self.app.params.mp.queue or None,
env_script = self.app.params.mp.env_script[0] if len(self.app.params.mp.env_script) > 0 and len(self.app.params.mp.env_script[0]) > 0 else None,
method = self.app.params.mp.method,
htcondor_executable_path = self.app.params.mp.htcondor.executable_path,
target = target_phil_path,
# always use mpi for 'lcls'
use_mpi = self.app.params.mp.method != 'local' or (self.app.params.mp.method == 'local' and self.app.params.facility.name == 'lcls')
)
with open(submit_phil_path, "w") as phil:
for line in open(os.path.join(submit_root, "submit_xfel_merge.phil")).readlines():
phil.write(line.format(**d))
def submit(self, previous_job = None):
from xfel.command_line.cxi_mpi_submit import Script as submit_script
output_path = os.path.join(get_run_path(self.app.params.output_folder, self.trial, self.rungroup, self.run, self.task), 'out')
configs_dir = os.path.join(settings_dir, "cfgs")
if not os.path.exists(configs_dir):
os.makedirs(configs_dir)
identifier_string = self.get_identifier_string()
submit_phil_path = os.path.join(configs_dir, identifier_string + "_submit.phil")
target_phil_path = os.path.join(configs_dir, identifier_string + "_params.phil")
input_folder, expt_suffix, refl_suffix = previous_job.get_output_files()
with open(target_phil_path, 'w') as f:
f.write("input.path=%s\n"%input_folder)
f.write("input.experiments_suffix=%s\n"%expt_suffix)
f.write("input.reflections_suffix=%s\n"%refl_suffix)
f.write("output.output_dir=%s\n"%output_path)
f.write("output.prefix=%s_%d\n"%(self.task.type, self.task.id))
f.write(self.task.parameters)
self.write_submit_phil(submit_phil_path, target_phil_path)
args = [submit_phil_path]
if self.app.params.facility.name not in ['lcls']:
args.append(self.run.path)
return submit_script().run(args)
class MergingJob(Job):
def get_global_path(self):
return self.dataset_version.output_path()
def get_log_path(self):
return self.get_global_path()
def get_identifier_string(self):
return "%s_%s%03d_v%03d"%(self.dataset.name, self.task.type, self.task.id, self.dataset_version.version)
def delete(self, output_only=False):
job_folder = self.get_global_path()
if os.path.exists(job_folder):
print("Deleting job folder for job", self.id)
shutil.rmtree(job_folder)
else:
print("Cannot find job folder (%s)"%job_folder)
self.status = "DELETED"
def get_output_files(self):
path = self.get_global_path()
return path, ".expt", ".refl"
def submit(self, previous_job = None):
from xfel.command_line.cxi_mpi_submit import do_submit
output_path = self.get_global_path()
if not os.path.exists(output_path):
os.makedirs(output_path)
identifier_string = self.get_identifier_string()
target_phil_path = os.path.join(output_path, identifier_string + "_params.phil")
with open(target_phil_path, 'w') as f:
expt_suffix = refl_suffix = None
for job in self.dataset_version.jobs:
input_folder, _expt_suffix, _refl_suffix = job.get_output_files()
if expt_suffix is None: expt_suffix = _expt_suffix
else: assert expt_suffix == _expt_suffix
if refl_suffix is None: refl_suffix = _refl_suffix
else: assert refl_suffix == _refl_suffix
f.write("input.path=%s\n"%input_folder)
f.write("input.experiments_suffix=%s\n"%expt_suffix)
f.write("input.reflections_suffix=%s\n"%refl_suffix)
f.write("output.output_dir=%s\n"%output_path)
f.write("output.prefix=%s_v%03d\n"%(self.dataset.name, self.dataset_version.version))
f.write(self.task.parameters)
command = "cctbx.xfel.merge %s"%target_phil_path
submit_path = os.path.join(output_path, "submit.sh")
return do_submit(command, submit_path, output_path, self.app.params.mp, identifier_string)
# Support classes and functions for job submission
class _job(object):
"""Used to represent a job that may not have been submitted into the cluster or database yet"""
def __init__(self, trial, rungroup, run, task=None, dataset=None):
self.trial = trial
self.rungroup = rungroup
self.run = run
self.task = task
self.dataset = dataset
def __eq__(self, other):
ret = True
check = ['trial', 'rungroup', 'run', 'task']
if getattr(self, 'task') and self.task.scope == 'global':
check.append('dataset')
for subitem_name in check:
subitem = getattr(self, subitem_name)
other_subitem_id = getattr(other, subitem_name + '_id')
if subitem is None:
ret = ret and other_subitem_id is None
else:
ret = ret and subitem.id == other_subitem_id
return ret
def submit_all_jobs(app):
submitted_jobs = app.get_all_jobs()
if app.params.mp.method == 'local': # only run one job at a time
for job in submitted_jobs:
if job.status in ['RUN', 'UNKWN', 'SUBMITTED']: return
runs = app.get_all_runs()
trials = app.get_all_trials(only_active = True)
needed_jobs = []
for trial in trials:
for rungroup in trial.rungroups:
assert rungroup.active
for run in rungroup.runs:
needed_jobs.append(_job(trial, rungroup, run))
for job in needed_jobs:
if job in submitted_jobs:
continue
print("Submitting job: trial %d, rungroup %d, run %s"%(job.trial.trial, job.rungroup.id, job.run.run))
j = JobFactory.from_args(app,
trial_id = job.trial.id,
rungroup_id = job.rungroup.id,
run_id = job.run.id,
status = "SUBMITTED")
j.trial = job.trial; j.rungroup = job.rungroup; j.run = job.run
try:
j.submission_id = j.submit()
except Exception as e:
print("Couldn't submit job:", str(e))
j.status = "SUBMIT_FAIL"
raise
if app.params.mp.method == 'local': # only run one job at a time
return
datasets = app.get_all_datasets()
for dataset_idx, dataset in enumerate(datasets):
if not dataset.active: continue
# one of the tasks will have a trial, otherwise we don't know where to save the data
trial = None
for task in dataset.tasks:
if task.trial is not None:
if trial is None:
trial = task.trial
else:
assert trial.id == task.trial.id, "Found multiple trials, don't know where to save the results"
assert trial, "No trial found in task list, don't know where to save the results"
trial_tags_ids = [t.id for t in trial.tags]
dataset_tags = [t for t in dataset.tags if t.id in trial_tags_ids]
runs_rungroups = []
for rungroup in trial.rungroups:
for run in rungroup.runs:
run_tags_ids = [t.id for t in run.tags]
if dataset.tag_operator == "union":
if any([t.id in run_tags_ids for t in dataset_tags]):
runs_rungroups.append((run, rungroup))
elif dataset.tag_operator == "intersection":
if all([t.id in run_tags_ids for t in dataset_tags]):
runs_rungroups.append((run, rungroup))
else:
assert False
# Datasets always start with indexing
global_tasks = {}
for run, rungroup in runs_rungroups:
submit_next_task = False
last_task_status = ""
tasks = dataset.tasks
previous_job = None
for task_idx, task in enumerate(tasks):
if task.scope == 'global':
if previous_job.status in ["DONE", "EXIT"]:
key = (dataset_idx, task_idx)
if key not in global_tasks:
global_tasks[key] = []
global_tasks[key].append(previous_job)
continue
if task.type == 'indexing':
job = _job(trial, rungroup, run)
else:
job = _job(trial, rungroup, run, task)
try:
submitted_job = submitted_jobs[submitted_jobs.index(job)]
except ValueError:
if not submit_next_task:
print("Warning, expected to find submitted %s job: trial %d, rungroup %d, run %s, task %d"% \
(task.type, trial.trial, rungroup.id, run.run, task.id))
break
else:
if not task_idx+1 < len(tasks): break # no more tasks to do after this one
next_task = tasks[task_idx+1]
if submitted_job.status not in finished_job_statuses or submitted_job.status == "UNKWN":
print ("Task %s waiting on job %d (%s) for trial %d, rungroup %d, run %s, task %d" % \
(next_task.type, submitted_job.id, submitted_job.status, trial.trial, rungroup.id, run.run, next_task.id))
break
if submitted_job.status not in ["DONE", "EXIT"]:
print ("Task %s cannot start due to unexpected status for job %d (%s) for trial %d, rungroup %d, run %s, task %d" % \
(next_task.type, submitted_job.id, submitted_job.status, trial.trial, rungroup.id, run.run, next_task.id))
break
submit_next_task = True
previous_job = submitted_job
continue
print("Submitting %s job: trial %d, rungroup %d, run %s, task %d"% \
(task.type, trial.trial, rungroup.id, run.run, task.id))
j = JobFactory.from_args(app,
trial_id = trial.id,
rungroup_id = rungroup.id,
run_id = run.id,
task_id = task.id,
status = "SUBMITTED")
j.trial = job.trial; j.rungroup = job.rungroup; j.run = job.run; j.task = job.task
try:
j.submission_id = j.submit(previous_job)
except Exception as e:
print("Couldn't submit job:", str(e))
j.status = "SUBMIT_FAIL"
raise
previous_job = j
if app.params.mp.method == 'local': # only run one job at a time
return
break # job submitted so don't look for more in this run for this dataset
for global_task in global_tasks:
dataset = datasets[global_task[0]]
task = dataset.tasks[global_task[1]]
latest_version = dataset.latest_version
if latest_version is None:
next_version = 0
else:
latest_version_jobs = latest_version.jobs
latest_verion_job_ids = [j.id for j in latest_version_jobs if j.task_id != task.id]
new_jobs = [j for j in global_tasks[global_task] if j.id not in latest_verion_job_ids]
if not new_jobs: continue
next_version = latest_version.version + 1
latest_version = app.create_dataset_version(dataset_id = dataset.id, version=next_version)
for job in global_tasks[global_task]:
latest_version.add_job(job)
j = JobFactory.from_args(app,
task_id = task.id,
dataset_id = dataset.id,
status = "SUBMITTED")
j.task = task; j.dataset = dataset; j.dataset_version = latest_version
try:
j.submission_id = j.submit()
except Exception as e:
print("Couldn't submit job:", str(e))
j.status = "SUBMIT_FAIL"
raise
latest_version.add_job(j)
if app.params.mp.method == 'local': # only run one job at a time
return
|
the-stack_0_15550 | """
Defines models
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.utils.rnn import pad_packed_sequence
def init_weights(m):
if type(m) == nn.Linear or type(m) == nn.Conv2d:
torch.nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
m.bias.data.fill_(0.01)
class GradientReversalFunction(Function):
"""
Gradient Reversal Layer from:
Unsupervised Domain Adaptation by Backpropagation (Ganin & Lempitsky, 2015)
Forward pass is the identity function. In the backward pass,
the upstream gradients are multiplied by -lambda (i.e. gradient is reversed)
"""
@staticmethod
def forward(ctx, x, lambda_):
ctx.lambda_ = lambda_
return x.clone()
@staticmethod
def backward(ctx, grads):
lambda_ = ctx.lambda_
lambda_ = grads.new_tensor(lambda_)
dx = -lambda_ * grads
return dx, None
class GradientReversal(torch.nn.Module):
def __init__(self, lambda_=1):
super(GradientReversal, self).__init__()
self.lambda_ = lambda_
def forward(self, x):
return GradientReversalFunction.apply(x, self.lambda_)
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
class Model(nn.Module):
def __init__(self, opt):
super(Model, self).__init__()
self.acoustic_modality = opt.acoustic_modality
self.visual_modality = opt.visual_modality
self.lexical_modality = opt.lexical_modality
self.acoustic_feature_dim = opt.acoustic_feature_dim
self.visual_feature_dim = opt.visual_feature_dim
self.lexical_feature_dim = opt.lexical_feature_dim
self.conv_width_v = opt.conv_width_v
self.conv_width_a = opt.conv_width_a
self.kernel_size_v = opt.kernel_size_v
self.kernel_size_a = opt.kernel_size_a
self.max_pool_width = opt.max_pool_width
self.rnn_layer_num_v = opt.rnn_layer_num_v
self.rnn_layer_num_a = opt.rnn_layer_num_a
self.rnn_width = opt.rnn_width
self.linear_width_l = opt.linear_width_l
self.linear_width = opt.linear_width
self.dropout_rate = opt.dropout_rate
self.conv1d_v1 = nn.Conv1d( in_channels=opt.visual_feature_dim,
out_channels=self.conv_width_v,
kernel_size=self.kernel_size_v,
padding=self.kernel_size_v-1)
self.conv1d_v2 = nn.Conv1d( in_channels=self.conv_width_v,
out_channels=self.conv_width_v,
kernel_size=self.kernel_size_v,
padding=self.kernel_size_v-1)
self.conv1d_v3 = nn.Conv1d( in_channels=self.conv_width_v,
out_channels=self.conv_width_v,
kernel_size=self.kernel_size_v,
padding=self.kernel_size_v-1)
self.conv1d_a1 = nn.Conv1d( in_channels=opt.acoustic_feature_dim,
out_channels=self.conv_width_a,
kernel_size=self.kernel_size_a,
padding=self.kernel_size_a-1)
self.conv1d_a2 = nn.Conv1d( in_channels=self.conv_width_a,
out_channels=self.conv_width_a,
kernel_size=self.kernel_size_a,
padding=self.kernel_size_a-1)
self.conv1d_a3 = nn.Conv1d( in_channels=self.conv_width_a,
out_channels=self.conv_width_a,
kernel_size=self.kernel_size_a,
padding=self.kernel_size_a-1)
self.maxpool = nn.MaxPool1d(self.max_pool_width)
self.gru_v = nn.GRU(input_size=self.conv_width_v,
num_layers=self.rnn_layer_num_v,
hidden_size=self.rnn_width,
batch_first=True)
self.gru_a = nn.GRU(input_size=self.conv_width_a,
num_layers=self.rnn_layer_num_a,
hidden_size=self.rnn_width,
batch_first=True)
self.linear_l = nn.Linear(self.lexical_feature_dim, self.linear_width_l)
self.batchnorm_v = nn.BatchNorm1d(self.rnn_width)
self.batchnorm_a = nn.BatchNorm1d(self.rnn_width)
self.batchnorm_l = nn.BatchNorm1d(self.linear_width_l)
self.dropout = nn.Dropout(self.dropout_rate)
width = 0
if self.acoustic_modality:
width += self.rnn_width
if self.visual_modality:
width += self.rnn_width
if self.lexical_modality:
width += self.linear_width_l
self.linear_1 = nn.Linear(width, self.linear_width)
self.linear_2 = nn.Linear(self.linear_width, 3)
self.softmax = nn.Softmax(dim=1)
self.relu = nn.ReLU()
def forward_v(self, x_v):
x = x_v
x = torch.transpose(x, 1, 2)
x = self.relu(self.maxpool(self.conv1d_v1(x)))
x = self.relu(self.maxpool(self.conv1d_v2(x)))
x = self.relu(self.maxpool(self.conv1d_v3(x)))
x = torch.transpose(x, 1, 2)
x, _ = self.gru_v(x)
x = torch.transpose(x, 1, 2)
x = F.adaptive_avg_pool1d(x,1)[:, :, -1]
x = self.batchnorm_v(self.dropout(x))
return x
def forward_a(self, x_a):
x = x_a
x = torch.transpose(x, 1, 2)
x = self.relu(self.maxpool(self.conv1d_a1(x)))
x = self.relu(self.maxpool(self.conv1d_a2(x)))
x = self.relu(self.maxpool(self.conv1d_a3(x)))
x = torch.transpose(x, 1, 2)
x, _ = self.gru_a(x)
x = torch.transpose(x, 1, 2)
x = F.adaptive_avg_pool1d(x,1)[:, :, -1]
x = self.batchnorm_a(self.dropout(x))
return x
def forward_l(self, x_l):
x = x_l
x = self.relu(self.linear_l(x))
x = self.batchnorm_l(self.dropout(x))
return x
def encoder(self, x_v, x_a, x_l):
if self.visual_modality:
x_v = self.forward_v(x_v)
if self.acoustic_modality:
x_a = self.forward_a(x_a)
if self.lexical_modality:
x_l = self.forward_l(x_l)
if self.visual_modality:
if self.acoustic_modality:
if self.lexical_modality:
x = torch.cat((x_v, x_a, x_l), 1)
else:
x = torch.cat((x_v, x_a), 1)
else:
if self.lexical_modality:
x = torch.cat((x_v, x_l), 1)
else:
x = x_v
else:
if self.acoustic_modality:
if self.lexical_modality:
x = torch.cat((x_a, x_l), 1)
else:
x = x_a
else:
x = x_l
return x
def recognizer(self, x):
x = self.relu(self.linear_1(x))
x = self.softmax(self.linear_2(x))
return x
def forward(self, x_v, x_a, x_l):
x = self.encoder(x_v, x_a, x_l)
x = self.recognizer(x)
return x
class DomainDiscriminator(nn.Module):
def __init__(self, opt):
super(DomainDiscriminator, self).__init__()
self.acoustic_modality = opt.acoustic_modality
self.visual_modality = opt.visual_modality
self.lexical_modality = opt.lexical_modality
self.rnn_width = opt.rnn_width
self.linear_width_l = opt.linear_width_l
self.linear_width = opt.linear_width
self.grl = GradientReversal(opt.domain_weight)
width = 0
if self.acoustic_modality:
width += self.rnn_width
if self.visual_modality:
width += self.rnn_width
if self.lexical_modality:
width += self.linear_width_l
self.linear_1 = nn.Linear(width, self.linear_width)
self.linear_2 = nn.Linear(self.linear_width, 2)
self.softmax = nn.Softmax(dim=1)
self.relu = nn.ReLU()
def forward(self, x):
x = self.grl(x)
x = self.relu(self.linear_1(x))
x = self.softmax(self.linear_2(x))
return x
class SpeakerDiscriminator(nn.Module):
def __init__(self, opt):
super(SpeakerDiscriminator, self).__init__()
self.acoustic_modality = opt.acoustic_modality
self.visual_modality = opt.visual_modality
self.lexical_modality = opt.lexical_modality
self.rnn_width = opt.rnn_width
self.linear_width_l = opt.linear_width_l
self.linear_width = opt.linear_width
self.grl = GradientReversal(opt.subject_weight)
width = 0
if self.acoustic_modality:
width += self.rnn_width
if self.visual_modality:
width += self.rnn_width
if self.lexical_modality:
width += self.linear_width_l
self.linear_1 = nn.Linear(width, self.linear_width)
self.linear_2 = nn.Linear(self.linear_width, 22)
self.softmax = nn.Softmax(dim=1)
self.relu = nn.ReLU()
def forward(self, x):
x = self.grl(x)
x = self.relu(self.linear_1(x))
x = self.softmax(self.linear_2(x))
return x
|
the-stack_0_15552 | import os
from pathlib import Path
from typing import Any, Dict, Union
from unittest.mock import Mock
import pytest
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.accelerators import CPUAccelerator
from pytorch_lightning.plugins import SingleDevicePlugin
from pytorch_lightning.plugins.precision import MixedPrecisionPlugin
from pytorch_lightning.plugins.precision.precision_plugin import PrecisionPlugin
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers.boring_model import BoringModel
def test_unsupported_precision_plugins():
"""Test error messages are raised for unsupported precision plugins with CPU."""
trainer = Mock()
accelerator = CPUAccelerator(
training_type_plugin=SingleDevicePlugin(torch.device("cpu")), precision_plugin=MixedPrecisionPlugin()
)
with pytest.raises(MisconfigurationException, match=r"AMP \+ CPU is not supported"):
accelerator.setup(trainer=trainer)
@pytest.mark.parametrize("delay_dispatch", [True, False])
def test_plugin_setup_optimizers_in_pre_dispatch(tmpdir, delay_dispatch):
"""
Test when using a custom training type plugin that delays setup optimizers,
we do not call setup optimizers till ``pre_dispatch``.
"""
class TestModel(BoringModel):
def on_fit_start(self):
if delay_dispatch:
# Ensure we haven't setup optimizers if we've delayed dispatch
assert len(self.trainer.optimizers) == 0
else:
assert len(self.trainer.optimizers) > 0
def on_fit_end(self):
assert len(self.trainer.optimizers) > 0
class CustomPlugin(SingleDevicePlugin):
@property
def setup_optimizers_in_pre_dispatch(self) -> bool:
return delay_dispatch
model = TestModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, plugins=CustomPlugin(device=torch.device("cpu")))
trainer.fit(model)
def test_accelerator_on_reset_dataloader_hooks(tmpdir):
"""
Ensure data-loader hooks are called using an Accelerator.
"""
class CustomAccelerator(CPUAccelerator):
train_count: int = 0
val_count: int = 0
test_count: int = 0
predict_count: int = 0
def on_reset_train_dataloader(self, dataloader):
self.train_count += 1
assert self.lightning_module.trainer.training
return super().on_reset_train_dataloader(dataloader)
def on_reset_val_dataloader(self, dataloader):
self.val_count += 1
assert self.lightning_module.trainer.training or self.lightning_module.trainer.validating
return super().on_reset_val_dataloader(dataloader)
def on_reset_test_dataloader(self, dataloader):
self.test_count += 1
assert self.lightning_module.trainer.testing
return super().on_reset_test_dataloader(dataloader)
def on_reset_predict_dataloader(self, dataloader):
self.predict_count += 1
assert self.lightning_module.trainer.predicting
return super().on_reset_predict_dataloader(dataloader)
model = BoringModel()
accelerator = CustomAccelerator(PrecisionPlugin(), SingleDevicePlugin(device=torch.device("cpu")))
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, accelerator=accelerator)
trainer.fit(model)
trainer.validate(model)
trainer.test(model)
trainer.predict(model, dataloaders=model.test_dataloader())
# assert that all loader hooks were called
assert accelerator.train_count == 1
assert accelerator.val_count == 1 # only called once during the entire session
assert accelerator.test_count == 1
assert accelerator.predict_count == 1
accelerator = CustomAccelerator(PrecisionPlugin(), SingleDevicePlugin(device=torch.device("cpu")))
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, accelerator=accelerator)
trainer.validate(model)
trainer.test(model)
trainer.predict(model)
# assert val/test/predict loader hooks were called
assert accelerator.val_count == 1
assert accelerator.test_count == 1
assert accelerator.predict_count == 1
def test_plugin_on_reset_dataloader_hooks(tmpdir):
"""
Ensure data-loader hooks are called using a Plugin.
"""
class CustomPlugin(SingleDevicePlugin):
train_count: int = 0
val_count: int = 0
test_count: int = 0
predict_count: int = 0
def on_reset_train_dataloader(self, dataloader):
self.train_count += 1
assert self.lightning_module.trainer.training
return super().on_reset_train_dataloader(dataloader)
def on_reset_val_dataloader(self, dataloader):
self.val_count += 1
assert self.lightning_module.trainer.training or self.lightning_module.trainer.validating
return super().on_reset_val_dataloader(dataloader)
def on_reset_test_dataloader(self, dataloader):
self.test_count += 1
assert self.lightning_module.trainer.testing
return super().on_reset_test_dataloader(dataloader)
def on_reset_predict_dataloader(self, dataloader):
self.predict_count += 1
assert self.lightning_module.trainer.predicting
return super().on_reset_predict_dataloader(dataloader)
plugin = CustomPlugin(device=torch.device("cpu"))
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, plugins=plugin)
trainer.fit(model)
trainer.validate(model)
trainer.test(model)
trainer.predict(model, dataloaders=model.test_dataloader())
# assert that all loader hooks were called
assert plugin.train_count == 1
assert plugin.val_count == 1 # only called once during the entire session
assert plugin.test_count == 1
assert plugin.predict_count == 1
plugin = CustomPlugin(device=torch.device("cpu"))
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, plugins=plugin)
trainer.validate(model)
trainer.test(model)
trainer.predict(model)
# assert val/test/predict loader hooks were called
assert plugin.val_count == 1
assert plugin.test_count == 1
assert plugin.predict_count == 1
def test_restore_checkpoint_after_pre_dispatch_default():
"""
Assert default for restore_checkpoint_after_pre_dispatch is False.
"""
plugin = SingleDevicePlugin(torch.device("cpu"))
accelerator = CPUAccelerator(training_type_plugin=plugin, precision_plugin=PrecisionPlugin())
assert not accelerator.restore_checkpoint_after_pre_dispatch
assert not plugin.restore_checkpoint_after_pre_dispatch
@pytest.mark.parametrize("restore_after_pre_dispatch", [True, False])
def test_restore_checkpoint_after_pre_dispatch(tmpdir, restore_after_pre_dispatch):
"""
Test to ensure that if restore_checkpoint_after_pre_dispatch is True, then we only load the state after
pre-dispatch is called.
"""
class TestPlugin(SingleDevicePlugin):
predispatched_called = False
def pre_dispatch(self) -> None:
super().pre_dispatch()
self.predispatched_called = True
@property
def restore_checkpoint_after_pre_dispatch(self) -> bool:
return restore_after_pre_dispatch
def load_checkpoint_file(self, checkpoint_path: Union[str, Path]) -> Dict[str, Any]:
assert self.predispatched_called == restore_after_pre_dispatch
return super().load_checkpoint_file(checkpoint_path)
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
trainer.fit(model)
checkpoint_path = os.path.join(tmpdir, "model.pt")
trainer.save_checkpoint(checkpoint_path)
plugin = TestPlugin(torch.device("cpu"))
accelerator = CPUAccelerator(training_type_plugin=plugin, precision_plugin=PrecisionPlugin())
assert accelerator.restore_checkpoint_after_pre_dispatch == restore_after_pre_dispatch
assert plugin.restore_checkpoint_after_pre_dispatch == restore_after_pre_dispatch
trainer = Trainer(
default_root_dir=tmpdir, accelerator=accelerator, fast_dev_run=True, resume_from_checkpoint=checkpoint_path
)
trainer.fit(model)
for func in (trainer.test, trainer.validate, trainer.predict):
accelerator.training_type_plugin.predispatched_called = False
func(model, ckpt_path=checkpoint_path)
|
the-stack_0_15554 | # Copyright (c) 2015 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2017 Wind River Systems, Inc.
#
import pecan
from pecan.configuration import set_config
from pecan.testing import load_test_app
from oslo_config import cfg
from oslo_config import fixture as fixture_config
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from dcmanager.api import api_config
from dcmanager.common import config
from dcmanager.tests import base
config.register_options()
OPT_GROUP_NAME = 'keystone_authtoken'
cfg.CONF.import_group(OPT_GROUP_NAME, "keystonemiddleware.auth_token")
def fake_delete_response(self, context):
resp = jsonutils.dumps(context.to_dict())
return resp
class DCManagerApiTest(base.DCManagerTestCase):
def setUp(self):
super(DCManagerApiTest, self).setUp()
self.addCleanup(set_config, {}, overwrite=True)
api_config.test_init()
self.CONF = self.useFixture(fixture_config.Config()).conf
# self.setup_messaging(self.CONF)
self.CONF.set_override('auth_strategy', 'noauth')
self.app = self._make_app()
def _make_app(self, enable_acl=False):
self.config = {
'app': {
'root': 'dcmanager.api.controllers.root.RootController',
'modules': ['dcmanager.api'],
'enable_acl': enable_acl,
'errors': {
400: '/error',
'__force_dict__': True
}
},
}
return load_test_app(self.config)
def tearDown(self):
super(DCManagerApiTest, self).tearDown()
pecan.set_config({}, overwrite=True)
class TestRootController(DCManagerApiTest):
"""Test version listing on root URI."""
def test_get(self):
response = self.app.get('/')
self.assertEqual(response.status_int, 200)
json_body = jsonutils.loads(response.body)
versions = json_body.get('versions')
self.assertEqual(1, len(versions))
def _test_method_returns_405(self, method):
api_method = getattr(self.app, method)
response = api_method('/', expect_errors=True)
self.assertEqual(response.status_int, 405)
def test_post(self):
self._test_method_returns_405('post')
def test_put(self):
self._test_method_returns_405('put')
def test_patch(self):
self._test_method_returns_405('patch')
def test_delete(self):
self._test_method_returns_405('delete')
def test_head(self):
self._test_method_returns_405('head')
class TestErrors(DCManagerApiTest):
def setUp(self):
super(TestErrors, self).setUp()
cfg.CONF.set_override('admin_tenant', 'fake_tenant_id',
group='cache')
def test_404(self):
response = self.app.get('/assert_called_once', expect_errors=True)
self.assertEqual(response.status_int, 404)
def test_bad_method(self):
fake_tenant = uuidutils.generate_uuid()
fake_url = '/v1.0/%s/bad_method' % fake_tenant
response = self.app.patch(fake_url,
expect_errors=True)
self.assertEqual(response.status_int, 404)
class TestRequestID(DCManagerApiTest):
def test_request_id(self):
response = self.app.get('/')
self.assertIn('x-openstack-request-id', response.headers)
self.assertTrue(
response.headers['x-openstack-request-id'].startswith('req-'))
id_part = response.headers['x-openstack-request-id'].split('req-')[1]
self.assertTrue(uuidutils.is_uuid_like(id_part))
class TestKeystoneAuth(DCManagerApiTest):
def setUp(self):
super(DCManagerApiTest, self).setUp()
self.addCleanup(set_config, {}, overwrite=True)
api_config.test_init()
self.CONF = self.useFixture(fixture_config.Config()).conf
cfg.CONF.set_override('auth_strategy', 'keystone')
self.app = self._make_app()
def test_auth_not_enforced_for_root(self):
response = self.app.get('/')
self.assertEqual(response.status_int, 200)
|
the-stack_0_15555 | """ Hyperparameters for Large Scale Data Collection (LSDC) """
import os.path
from visual_mpc.policy.cem_controllers.variants.ensemble_vidpred import CEM_Controller_Ensemble_Vidpred
from visual_mpc.agent.benchmarking_agent import BenchmarkAgent
from visual_mpc.envs.mujoco_env.cartgripper_env.autograsp_env import AutograspCartgripperEnv
import numpy as np
BASE_DIR = '/'.join(str.split(__file__, '/')[:-1])
current_dir = os.path.dirname(os.path.realpath(__file__))
env_params = {
'num_objects': 1,
'object_mass': 0.5,
'friction': 1.0,
'finger_sensors': True,
'minlen': 0.03,
'maxlen': 0.06,
'object_object_mindist': 0.15,
'cube_objects': True,
'autograsp': {'zthresh': -0.06, 'touchthresh': 0.0, 'reopen': True}
}
agent = {
'type': BenchmarkAgent,
'env': (AutograspCartgripperEnv, env_params),
'T': 30,
'image_height' : 48,
'image_width' : 64,
'data_save_dir': BASE_DIR,
'make_final_gif_pointoverlay': True,
'record': BASE_DIR + '/record/',
'num_load_steps': 16,
'start_goal_confs': os.environ['VMPC_DATA_DIR'] + '/ensemble_lifting_tasks',
'current_dir': current_dir
}
policy = {
'verbose':True,
'initial_std': 0.04, # std dev. in xy
'initial_std_lift': 0.6, # std dev. in xy
'initial_std_rot': np.pi / 32,
'type': CEM_Controller_Ensemble_Vidpred,
'rejection_sampling': False,
'replan_interval': 10,
'num_samples': [800, 400],
}
config = {
'current_dir': current_dir,
'save_data': True,
'save_raw_images': True,
'start_index':0,
'end_index': 88,
'agent': agent,
'policy': policy,
}
|
the-stack_0_15556 | import json, os
import math, copy, time
import numpy as np
from collections import defaultdict
import pandas as pd
from utils import *
import math
from tqdm import tqdm
import seaborn as sb
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import dill
from functools import partial
import multiprocessing as mp
class Graph():
def __init__(self):
super(Graph, self).__init__()
'''
node_forward and bacward are only used when building the data.
Afterwards will be transformed into node_feature by DataFrame
node_forward: name -> node_id
node_bacward: node_id -> feature_dict
node_feature: a DataFrame containing all features
'''
self.node_forward = defaultdict(lambda: {})
self.node_bacward = defaultdict(lambda: [])
self.node_feature = defaultdict(lambda: [])
'''
edge_list: index the adjacancy matrix (time) by
<target_type, source_type, relation_type, target_id, source_id>
'''
self.edge_list = defaultdict( #target_type
lambda: defaultdict( #source_type
lambda: defaultdict( #relation_type
lambda: defaultdict( #target_id
lambda: defaultdict( #source_id(
lambda: int # time
)))))
self.times = {}
def add_node(self, node):
nfl = self.node_forward[node['type']]
if node['id'] not in nfl:
self.node_bacward[node['type']] += [node]
ser = len(nfl)
nfl[node['id']] = ser
return ser
return nfl[node['id']]
def add_edge(self, source_node, target_node, time = None, relation_type = None, directed = True):
edge = [self.add_node(source_node), self.add_node(target_node)]
'''
Add bi-directional edges with different relation type
'''
self.edge_list[target_node['type']][source_node['type']][relation_type][edge[1]][edge[0]] = time
if directed:
self.edge_list[source_node['type']][target_node['type']]['rev_' + relation_type][edge[0]][edge[1]] = time
else:
self.edge_list[source_node['type']][target_node['type']][relation_type][edge[0]][edge[1]] = time
self.times[time] = True
def update_node(self, node):
nbl = self.node_bacward[node['type']]
ser = self.add_node(node)
for k in node:
if k not in nbl[ser]:
nbl[ser][k] = node[k]
def get_meta_graph(self):
types = self.get_types()
metas = []
for target_type in self.edge_list:
for source_type in self.edge_list[target_type]:
for r_type in self.edge_list[target_type][source_type]:
metas += [(target_type, source_type, r_type)]
return metas
def get_types(self):
return list(self.node_feature.keys())
def sample_subgraph(graph, time_range, sampled_depth = 2, sampled_number = 8, inp = None, feature_extractor = feature_OAG):
'''
Sample Sub-Graph based on the connection of other nodes with currently sampled nodes
We maintain budgets for each node type, indexed by <node_id, time>.
Currently sampled nodes are stored in layer_data.
After nodes are sampled, we construct the sampled adjacancy matrix.
'''
layer_data = defaultdict( #target_type
lambda: {} # {target_id: [ser, time]}
)
budget = defaultdict( #source_type
lambda: defaultdict( #source_id
lambda: [0., 0] #[sampled_score, time]
))
new_layer_adj = defaultdict( #target_type
lambda: defaultdict( #source_type
lambda: defaultdict( #relation_type
lambda: [] #[target_id, source_id]
)))
'''
For each node being sampled, we find out all its neighborhood,
adding the degree count of these nodes in the budget.
Note that there exist some nodes that have many neighborhoods
(such as fields, venues), for those case, we only consider
'''
def add_budget(te, target_id, target_time, layer_data, budget):
for source_type in te:
tes = te[source_type]
for relation_type in tes:
if relation_type == 'self' or target_id not in tes[relation_type]:
continue
adl = tes[relation_type][target_id]
if len(adl) < sampled_number:
sampled_ids = list(adl.keys())
else:
sampled_ids = np.random.choice(list(adl.keys()), sampled_number, replace = False)
for source_id in sampled_ids:
source_time = adl[source_id]
if source_time == None:
source_time = target_time
if source_time > np.max(list(time_range.keys())) or source_id in layer_data[source_type]:
continue
budget[source_type][source_id][0] += 1. / len(sampled_ids)
budget[source_type][source_id][1] = source_time
'''
First adding the sampled nodes then updating budget.
'''
for _type in inp:
for _id, _time in inp[_type]:
layer_data[_type][_id] = [len(layer_data[_type]), _time]
for _type in inp:
te = graph.edge_list[_type]
for _id, _time in inp[_type]:
add_budget(te, _id, _time, layer_data, budget)
'''
We recursively expand the sampled graph by sampled_depth.
Each time we sample a fixed number of nodes for each budget,
based on the accumulated degree.
'''
for layer in range(sampled_depth):
sts = list(budget.keys())
for source_type in sts:
te = graph.edge_list[source_type]
keys = np.array(list(budget[source_type].keys()))
if sampled_number > len(keys):
'''
Directly sample all the nodes
'''
sampled_ids = np.arange(len(keys))
else:
'''
Sample based on accumulated degree
'''
score = np.array(list(budget[source_type].values()))[:,0] ** 2
score = score / np.sum(score)
sampled_ids = np.random.choice(len(score), sampled_number, p = score, replace = False)
sampled_keys = keys[sampled_ids]
'''
First adding the sampled nodes then updating budget.
'''
for k in sampled_keys:
layer_data[source_type][k] = [len(layer_data[source_type]), budget[source_type][k][1]]
for k in sampled_keys:
add_budget(te, k, budget[source_type][k][1], layer_data, budget)
budget[source_type].pop(k)
'''
Prepare feature, time and adjacency matrix for the sampled graph
'''
feature, times, indxs, texts = feature_extractor(layer_data, graph)
edge_list = defaultdict( #target_type
lambda: defaultdict( #source_type
lambda: defaultdict( #relation_type
lambda: [] # [target_id, source_id]
)))
for _type in layer_data:
for _key in layer_data[_type]:
_ser = layer_data[_type][_key][0]
edge_list[_type][_type]['self'] += [[_ser, _ser]]
'''
Reconstruct sampled adjacancy matrix by checking whether each
link exist in the original graph
'''
for target_type in graph.edge_list:
te = graph.edge_list[target_type]
for source_type in te:
tes = te[source_type]
for relation_type in tes:
tesr = tes[relation_type]
for target_key in layer_data[target_type]:
target_ser = layer_data[target_type][target_key][0]
if target_key not in tesr:
continue
tesrt = tesr[target_key]
for source_key in layer_data[source_type]:
source_ser = layer_data[source_type][source_key][0]
'''
Check whether each link (target_id, source_id) exist in original adjacancy matrix
'''
if source_key in tesrt:
edge_list[target_type][source_type][relation_type] += [[target_ser, source_ser]]
return feature, times, edge_list, indxs, texts
def to_torch(feature, time, edge_list, graph):
'''
Transform a sampled sub-graph into pytorch Tensor
node_dict: {node_type: <node_number, node_type_ID>} node_number is used to trace back the nodes in original graph.
edge_dict: {edge_type: edge_type_ID}
'''
node_dict = {}
node_feature = []
node_type = []
node_time = []
edge_index = []
edge_type = []
edge_time = []
node_num = 0
types = graph.get_types()
for t in types:
node_dict[t] = [node_num, len(node_dict)]
node_num += len(feature[t])
for t in types:
node_feature += list(feature[t])
node_time += list(time[t])
node_type += [node_dict[t][1] for _ in range(len(feature[t]))]
edge_dict = {e[2]: i for i, e in enumerate(graph.get_meta_graph())}
edge_dict['self'] = len(edge_dict)
for target_type in edge_list:
for source_type in edge_list[target_type]:
for relation_type in edge_list[target_type][source_type]:
for ii, (ti, si) in enumerate(edge_list[target_type][source_type][relation_type]):
tid, sid = ti + node_dict[target_type][0], si + node_dict[source_type][0]
edge_index += [[sid, tid]]
edge_type += [edge_dict[relation_type]]
'''
Our time ranges from 1900 - 2020, largest span is 120.
'''
edge_time += [node_time[tid] - node_time[sid] + 120]
node_feature = torch.FloatTensor(node_feature)
node_type = torch.LongTensor(node_type)
edge_time = torch.LongTensor(edge_time)
edge_index = torch.LongTensor(edge_index).t()
edge_type = torch.LongTensor(edge_type)
return node_feature, node_type, edge_time, edge_index, edge_type, node_dict, edge_dict
|
the-stack_0_15559 | #!/usr/bin/env python
import sys
import os
import platform
import subprocess
def check_for_executable(exe_name, args=['--version']):
try:
cmd = [exe_name]
cmd.extend(args)
subprocess.check_output(cmd)
return True
except Exception:
return False
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--clean',
help='remove build directory before build',
action='store_true',
dest='clean')
parser.add_argument(
'-t', '--tests', help='run tests', action='store_true', dest='run_tests')
parser.add_argument(
'-v', help='verbose', action='store_true', dest='verbose')
parser.add_argument(
'-o', '--output',
help='output dir (relative to source dir)',
default='build',
dest='out_dir')
parser.add_argument(
'-c', '--config',
help='config (Debug or Release)',
default='Debug',
dest='config')
parser.add_argument(
'--sanitizers',
help='Run tests with address and undefined behaviour sanitizer if available',
default=False,
dest='sanitizers')
if platform.system() == "Windows":
parser.add_argument(
'--win32',
help='Build 32-bit libraries',
action='store_true',
dest='win32')
args = parser.parse_args()
args.platform = platform.system()
src_dir = os.path.realpath(os.path.dirname(os.path.dirname(__file__)))
if args.clean:
subprocess.check_call('rm -rf {}'.format(args.out_dir).split())
cmake_invocation = ['cmake', '.', '-B{}'.format(args.out_dir)]
if args.platform == 'Windows':
if args.win32:
cmake_invocation.extend(['-G', 'Visual Studio 15 2017'])
else:
cmake_invocation.extend(['-G', 'Visual Studio 15 2017 Win64'])
else:
if check_for_executable('ninja'):
cmake_invocation.extend(['-GNinja'])
cmake_invocation.extend(['-DCMAKE_BUILD_TYPE={}'.format(args.config)])
cmake_invocation.append('-DCMAKE_BUILD_TYPE={}'.format(args.config))
if args.verbose:
cmake_invocation.append('-DCMAKE_VERBOSE_MAKEFILE:BOOL=ON')
if args.sanitizers:
cmake_invocation.append('-DENABLE_SANITIZERS:BOOL=ON')
subprocess.check_call(cmake_invocation, cwd=src_dir)
subprocess.check_call(
'cmake --build ./{}'.format(args.out_dir).split(), cwd=src_dir)
if args.run_tests:
rc = subprocess.call(
'ctest . --output-on-failure -C {}'.format(args.config).split(),
cwd=os.path.join(src_dir, args.out_dir))
if rc != 0:
sys.exit(1)
if __name__ == '__main__':
main()
|
the-stack_0_15560 | import os
import textwrap
import warnings
from xml.dom import minidom
from conans.client.tools import msvs_toolset
from conans.errors import ConanException
from conans.util.files import save, load
class MSBuildToolchain(object):
filename = "conantoolchain.props"
def __init__(self, conanfile):
self._conanfile = conanfile
self.preprocessor_definitions = {}
self.configuration = conanfile.settings.build_type
def _name_condition(self, settings):
props = [("Configuration", self.configuration),
# FIXME: This probably requires mapping ARM architectures
("Platform", {'x86': 'Win32',
'x86_64': 'x64'}.get(settings.get_safe("arch")))]
name = "".join("_%s" % v for _, v in props if v is not None)
condition = " And ".join("'$(%s)' == '%s'" % (k, v) for k, v in props if v is not None)
return name.lower(), condition
def write_toolchain_files(self):
# Warning
msg = ("\n*****************************************************************\n"
"******************************************************************\n"
"'write_toolchain_files()' has been deprecated and moved.\n"
"It will be removed in next Conan release.\n"
"Use 'generate()' method instead.\n"
"********************************************************************\n"
"********************************************************************\n")
from conans.client.output import Color, ConanOutput
ConanOutput(self._conanfile.output._stream,
color=self._conanfile.output._color).writeln(msg, front=Color.BRIGHT_RED)
warnings.warn(msg)
self.generate()
def generate(self):
name, condition = self._name_condition(self._conanfile.settings)
config_filename = "conantoolchain{}.props".format(name)
self._write_config_toolchain(config_filename)
self._write_main_toolchain(config_filename, condition)
def _write_config_toolchain(self, config_filename):
def format_macro(k, value):
return '%s="%s"' % (k, value) if value is not None else k
runtime = self._conanfile.settings.get_safe("compiler.runtime")
cppstd = self._conanfile.settings.get_safe("compiler.cppstd")
toolset = msvs_toolset(self._conanfile.settings)
runtime_library = {"MT": "MultiThreaded",
"MTd": "MultiThreadedDebug",
"MD": "MultiThreadedDLL",
"MDd": "MultiThreadedDebugDLL"}.get(runtime, "")
content = textwrap.dedent("""\
<?xml version="1.0" encoding="utf-8"?>
<Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemDefinitionGroup>
<ClCompile>
<PreprocessorDefinitions>
{};%(PreprocessorDefinitions)
</PreprocessorDefinitions>
<RuntimeLibrary>{}</RuntimeLibrary>
<LanguageStandard>{}</LanguageStandard>
</ClCompile>
</ItemDefinitionGroup>
<PropertyGroup Label="Configuration">
<PlatformToolset>{}</PlatformToolset>
</PropertyGroup>
</Project>
""")
preprocessor_definitions = ";".join([format_macro(k, v)
for k, v in self.preprocessor_definitions.items()])
# It is useless to set PlatformToolset in the config file, because the conditional checks it
cppstd = "stdcpp%s" % cppstd if cppstd else ""
toolset = toolset or ""
config_props = content.format(preprocessor_definitions, runtime_library, cppstd, toolset)
config_filepath = os.path.abspath(config_filename)
self._conanfile.output.info("MSBuildToolchain created %s" % config_filename)
save(config_filepath, config_props)
def _write_main_toolchain(self, config_filename, condition):
main_toolchain_path = os.path.abspath(self.filename)
if os.path.isfile(main_toolchain_path):
content = load(main_toolchain_path)
else:
content = textwrap.dedent("""\
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0"
xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ImportGroup Label="PropertySheets" >
</ImportGroup>
<PropertyGroup Label="ConanPackageInfo">
<ConanPackageName>{}</ConanPackageName>
<ConanPackageVersion>{}</ConanPackageVersion>
</PropertyGroup>
</Project>
""")
conan_package_name = self._conanfile.name if self._conanfile.name else ""
conan_package_version = self._conanfile.version if self._conanfile.version else ""
content = content.format(conan_package_name, conan_package_version)
dom = minidom.parseString(content)
try:
import_group = dom.getElementsByTagName('ImportGroup')[0]
except Exception:
raise ConanException("Broken {}. Remove the file and try again".format(self.filename))
children = import_group.getElementsByTagName("Import")
for node in children:
if (config_filename == node.getAttribute("Project") and
condition == node.getAttribute("Condition")):
break # the import statement already exists
else: # create a new import statement
import_node = dom.createElement('Import')
import_node.setAttribute('Condition', condition)
import_node.setAttribute('Project', config_filename)
import_group.appendChild(import_node)
conan_toolchain = dom.toprettyxml()
conan_toolchain = "\n".join(line for line in conan_toolchain.splitlines() if line.strip())
self._conanfile.output.info("MSBuildToolchain writing {}".format(self.filename))
save(main_toolchain_path, conan_toolchain)
|
the-stack_0_15561 | from __future__ import division, absolute_import, print_function
import sys
from numpy.testing import (TestCase, run_module_suite, assert_,
assert_array_equal)
from numpy import random
from numpy.compat import long
import numpy as np
class TestRegression(TestCase):
def test_VonMises_range(self):
# Make sure generated random variables are in [-pi, pi].
# Regression test for ticket #986.
for mu in np.linspace(-7., 7., 5):
r = random.mtrand.vonmises(mu, 1, 50)
assert_(np.all(r > -np.pi) and np.all(r <= np.pi))
def test_hypergeometric_range(self):
# Test for ticket #921
assert_(np.all(np.random.hypergeometric(3, 18, 11, size=10) < 4))
assert_(np.all(np.random.hypergeometric(18, 3, 11, size=10) > 0))
# Test for ticket #5623
args = [
(2**20 - 2, 2**20 - 2, 2**20 - 2), # Check for 32-bit systems
]
is_64bits = sys.maxsize > 2**32
if is_64bits:
args.append((2**40 - 2, 2**40 - 2, 2**40 - 2)) # Check for 64-bit systems
for arg in args:
assert_(np.random.hypergeometric(*arg) > 0)
def test_logseries_convergence(self):
# Test for ticket #923
N = 1000
np.random.seed(0)
rvsn = np.random.logseries(0.8, size=N)
# these two frequency counts should be close to theoretical
# numbers with this large sample
# theoretical large N result is 0.49706795
freq = np.sum(rvsn == 1) / float(N)
msg = "Frequency was %f, should be > 0.45" % freq
assert_(freq > 0.45, msg)
# theoretical large N result is 0.19882718
freq = np.sum(rvsn == 2) / float(N)
msg = "Frequency was %f, should be < 0.23" % freq
assert_(freq < 0.23, msg)
def test_permutation_longs(self):
np.random.seed(1234)
a = np.random.permutation(12)
np.random.seed(1234)
b = np.random.permutation(long(12))
assert_array_equal(a, b)
def test_randint_range(self):
# Test for ticket #1690
lmax = np.iinfo('l').max
lmin = np.iinfo('l').min
try:
random.randint(lmin, lmax)
except:
raise AssertionError
def test_shuffle_mixed_dimension(self):
# Test for trac ticket #2074
for t in [[1, 2, 3, None],
[(1, 1), (2, 2), (3, 3), None],
[1, (2, 2), (3, 3), None],
[(1, 1), 2, 3, None]]:
np.random.seed(12345)
shuffled = list(t)
random.shuffle(shuffled)
assert_array_equal(shuffled, [t[0], t[3], t[1], t[2]])
def test_call_within_randomstate(self):
# Check that custom RandomState does not call into global state
m = np.random.RandomState()
res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3])
for i in range(3):
np.random.seed(i)
m.seed(4321)
# If m.state is not honored, the result will change
assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res)
def test_multivariate_normal_size_types(self):
# Test for multivariate_normal issue with 'size' argument.
# Check that the multivariate_normal size argument can be a
# numpy integer.
np.random.multivariate_normal([0], [[0]], size=1)
np.random.multivariate_normal([0], [[0]], size=np.int_(1))
np.random.multivariate_normal([0], [[0]], size=np.int64(1))
if __name__ == "__main__":
run_module_suite()
|
the-stack_0_15562 | from __future__ import print_function
import ROOT,itertools,math #
from array import array #
from DataFormats.FWLite import Events, Handle
ROOT.FWLiteEnabler.enable()
#
tag='output'
##A class to keep BMTF data
###Common methods############
def fetchStubsOLD(event,ontime=False,isData=True):
phiSeg = Handle ('L1MuDTChambPhContainer')
if not isData:
event.getByLabel('simTwinMuxDigis',phiSeg)
else:
event.getByLabel('bmtfDigis',phiSeg)
if ontime:
filtered=filter(lambda x: x.bxNum()==0, phiSeg.product().getContainer())
return filtered
else:
return phiSeg.product().getContainer()
def fetchStubs(event,ontime=True):
phiSeg2 = Handle ('std::vector<L1MuKBMTCombinedStub>')
event.getByLabel('simKBmtfStubs',phiSeg2)
if ontime:
filtered=filter(lambda x: x.bxNum()==0, phiSeg2.product())
return filtered
else:
return phiSeg2.product()
def globalBMTFPhi(muon):
temp=muon.processor()*48+muon.hwPhi()
temp=temp*2*math.pi/576.0-math.pi*15.0/180.0;
if temp>math.pi:
temp=temp-2*math.pi;
K=1.0/muon.hwPt()
if muon.hwSign()>0:
K=-1.0/muon.hwPt()
return temp+5.740*K
def fetchKMTF(event,etaMax,collection):
kbmtfH = Handle ('BXVector<l1t::RegionalMuonCand>')
event.getByLabel(collection,kbmtfH)
kbmtf=kbmtfH.product()
kbmtfMuons={}
for bx in [-3,-2,-1,0,1,2,3]:
kbmtfMuons[bx]=[]
for bx in range(kbmtf.getFirstBX(),kbmtf.getLastBX()+1):
for j in range(0,kbmtf.size(bx)):
mu = kbmtf.at(bx,j)
kbmtfMuons[bx].append(mu)
# kbmtfMuons[bx]=sorted(kbmtfMuons[bx],key=lambda x: x.hwPt(),reverse=True)
return kbmtfMuons
def curvResidual(a,b):
return (a.charge()/a.pt()-b.charge()/b.pt())*b.pt()/b.charge()
def ptResidual(a,b):
return (a.pt()-b.pt())/b.pt()
def curvResidualSTA(a,b):
return (a.charge()/a.ptUnconstrained()-b.charge()/b.pt())*b.pt()/b.charge()
def deltaPhi( p1, p2):
'''Computes delta phi, handling periodic limit conditions.'''
res = p1 - p2
while res > math.pi:
res -= 2*math.pi
while res < -math.pi:
res += 2*math.pi
return res
def deltaR( *args ):
return math.sqrt( deltaR2(*args) )
def deltaR2( e1, p1, e2, p2):
de = e1 - e2
dp = deltaPhi(p1, p2)
return de*de + dp*dp
def log(event,counter,mystubs,kmtf,bmtf):
print("--------EVENT"+str(counter)+"------------")
print('RUN={run} LUMI={lumi} EVENT={event}'.format(run=event.eventAuxiliary().id().run(),lumi=event.eventAuxiliary().id().luminosityBlock(),event=event.eventAuxiliary().id().event()))
print("-----------------------------")
print("-----------------------------")
print('Stubs:')
for stub in mystubs:
print('wheel={w} sector={sc} station={st} high/low={ts} phi={phi} phiB={phiB} qual={qual} BX={BX}'.format(w=stub.whNum(),sc=stub.scNum(),st=stub.stNum(),ts=stub.Ts2Tag(),phi=stub.phi(),phiB=stub.phiB(),qual=stub.code(),BX=stub.bxNum()))
print('EMU:')
for g in bmtf :
print("EMU sector={sector} pt={pt} eta={eta} phi={phi} qual={qual} dxy={dxy} pt2={pt2} hasFineEta={HF}".format(sector=g.processor(), pt=g.hwPt(),eta=g.hwEta(),phi=g.hwPhi(),qual=g.hwQual(),dxy=g.hwDXY(),pt2=g.hwPt2(),HF=g.hwHF()))
print('DATA:')
for g in kmtf :
print("DATA sector={sector} pt={pt} eta={eta} phi={phi} qual={qual} dxy={dxy} pt2={pt2} hasFineEta={HF}".format(sector=g.processor(),pt=g.hwPt(),eta=g.hwEta(),phi=g.hwPhi(),qual=g.hwQual(),dxy=g.hwDXY(),pt2=g.hwPt2(),HF=g.hwHF()))
print("-----------------------------")
print("-----------------------------")
print("c + enter to continue")
import pdb;pdb.set_trace()
###############################
#########Histograms#############
histos={}
histos['fw']={}
histos['fw']['pt1']=ROOT.TH1D("fw_pt1","HW p_{T}",512,0,511)
histos['fw']['eta1']=ROOT.TH1D("fw_eta1","HW #eta",256,-127,128)
histos['fw']['phi1']=ROOT.TH1D("fw_phi1","HW #phi",256,-127,128)
histos['fw']['HF1']=ROOT.TH1D("fw_HF1","HW HF",256,-127,128)
histos['fw']['qual1']=ROOT.TH1D("fw_qual1","HW qual",16,0,16)
histos['fw']['dxy1']=ROOT.TH1D("fw_dxy1","HW DXY",4,0,4)
histos['fw']['ptSTA1']=ROOT.TH1D("fw_ptSTA1","HW STA PT",256,0,255)
histos['fw']['pt2']=ROOT.TH1D("fw_pt2","HW p_{T}",512,0,511)
histos['fw']['eta2']=ROOT.TH1D("fw_eta2","HW #eta",256,-127,128)
histos['fw']['phi2']=ROOT.TH1D("fw_phi2","HW #phi",256,-127,128)
histos['fw']['HF2']=ROOT.TH1D("fw_HF2","HW HF",256,-127,128)
histos['fw']['qual2']=ROOT.TH1D("fw_qual2","HW qual",16,0,16)
histos['fw']['dxy2']=ROOT.TH1D("fw_dxy2","HW DXY",4,0,4)
histos['fw']['ptSTA2']=ROOT.TH1D("fw_ptSTA2","HW STA PT",256,0,255)
histos['fw']['pt3']=ROOT.TH1D("fw_pt3","HW p_{T}",512,0,511)
histos['fw']['eta3']=ROOT.TH1D("fw_eta3","HW #eta",256,-127,128)
histos['fw']['phi3']=ROOT.TH1D("fw_phi3","HW #phi",256,-127,128)
histos['fw']['HF3']=ROOT.TH1D("fw_HF3","HW HF",256,-127,128)
histos['fw']['qual3']=ROOT.TH1D("fw_qual3","HW qual",16,0,16)
histos['fw']['dxy3']=ROOT.TH1D("fw_dxy3","HW DXY",4,0,4)
histos['fw']['ptSTA3']=ROOT.TH1D("fw_ptSTA3","HW STA PT",256,0,255)
histos['emu']={}
histos['emu']['pt1']=ROOT.TH1D("emu_pt1","HW p_{T}",512,0,511)
histos['emu']['eta1']=ROOT.TH1D("emu_eta1","HW #eta",256,-127,128)
histos['emu']['phi1']=ROOT.TH1D("emu_phi1","HW #phi",256,-127,128)
histos['emu']['HF1']=ROOT.TH1D("emu_HF1","HW HF",256,-127,128)
histos['emu']['qual1']=ROOT.TH1D("emu_qual1","HW qual",16,0,16)
histos['emu']['dxy1']=ROOT.TH1D("emu_dxy1","HW DXY",4,0,4)
histos['emu']['ptSTA1']=ROOT.TH1D("emu_ptSTA1","HW STA PT",256,0,255)
histos['emu']['pt2']=ROOT.TH1D("emu_pt2","HW p_{T}",512,0,511)
histos['emu']['eta2']=ROOT.TH1D("emu_eta2","HW #eta",256,-127,128)
histos['emu']['phi2']=ROOT.TH1D("emu_phi2","HW #phi",256,-127,128)
histos['emu']['HF2']=ROOT.TH1D("emu_HF2","HW HF",256,-127,128)
histos['emu']['qual2']=ROOT.TH1D("emu_qual2","HW qual",16,0,16)
histos['emu']['dxy2']=ROOT.TH1D("emu_dxy2","HW DXY",4,0,4)
histos['emu']['ptSTA2']=ROOT.TH1D("emu_ptSTA2","HW STA PT",256,0,255)
histos['emu']['pt3']=ROOT.TH1D("emu_pt3","HW p_{T}",512,0,511)
histos['emu']['eta3']=ROOT.TH1D("emu_eta3","HW #eta",256,-127,128)
histos['emu']['phi3']=ROOT.TH1D("emu_phi3","HW #phi",256,-127,128)
histos['emu']['HF3']=ROOT.TH1D("emu_HF3","HW HF",256,-127,128)
histos['emu']['qual3']=ROOT.TH1D("emu_qual3","HW qual",16,0,16)
histos['emu']['dxy3']=ROOT.TH1D("emu_dxy3","HW DXY",4,0,4)
histos['emu']['ptSTA3']=ROOT.TH1D("emu_ptSTA3","HW STA PT",256,0,255)
for key,histo in histos['fw'].iteritems():
histo.Sumw2()
def fill(info,mu):
if len(mu)>0:
info['pt1'].Fill(mu[0].hwPt())
info['eta1'].Fill(mu[0].hwEta())
info['phi1'].Fill(mu[0].hwPhi())
info['HF1'].Fill(mu[0].hwHF())
info['qual1'].Fill(mu[0].hwQual())
info['dxy1'].Fill(mu[0].hwDXY())
info['ptSTA1'].Fill(mu[0].hwPt2())
else:
info['pt1'].Fill(0)
info['eta1'].Fill(0)
info['phi1'].Fill(0)
info['HF1'].Fill(0)
info['qual1'].Fill(0)
info['dxy1'].Fill(0)
info['ptSTA1'].Fill(0)
if len(mu)>1:
info['pt2'].Fill(mu[1].hwPt())
info['eta2'].Fill(mu[1].hwEta())
info['phi2'].Fill(mu[1].hwPhi())
info['HF2'].Fill(mu[1].hwHF())
info['qual2'].Fill(mu[1].hwQual())
info['dxy2'].Fill(mu[1].hwDXY())
info['ptSTA2'].Fill(mu[1].hwPt2())
else:
info['pt2'].Fill(0)
info['eta2'].Fill(0)
info['phi2'].Fill(0)
info['HF2'].Fill(0)
info['qual2'].Fill(0)
info['dxy2'].Fill(0)
info['ptSTA2'].Fill(0)
if len(mu)>2:
info['pt3'].Fill(mu[2].hwPt())
info['eta3'].Fill(mu[2].hwEta())
info['phi3'].Fill(mu[2].hwPhi())
info['HF3'].Fill(mu[2].hwHF())
info['qual3'].Fill(mu[2].hwQual())
info['dxy3'].Fill(mu[2].hwDXY())
info['ptSTA3'].Fill(mu[2].hwPt2())
else:
info['pt3'].Fill(0)
info['eta3'].Fill(0)
info['phi3'].Fill(0)
info['HF3'].Fill(0)
info['qual3'].Fill(0)
info['dxy3'].Fill(0)
info['ptSTA3'].Fill(0)
##############################
BUNCHES=[0]
events=Events([tag+'.root'])
counter=-1
for event in events:
counter=counter+1
#fetch stubs
stubs=fetchStubsOLD(event,True)
unpacker=fetchKMTF(event,100.0,'bmtfDigis:kBMTF')
emulator=fetchKMTF(event,100.0,'simKBmtfDigis:BMTF')
for processor in range(0,12):
for bx in BUNCHES:
emu=filter(lambda x: x.processor()==processor,emulator[bx])
data=filter(lambda x: x.processor()==processor,unpacker[bx])
if (len(emu)+len(data))>0:
fill(histos['emu'],emu)
fill(histos['fw'],data)
# if len(emu)!=0 and len(data)==0:
# log(event,counter,stubs,data,emu)
# import pdb;pdb.set_trace()
f=ROOT.TFile("validationResults.root","RECREATE")
for key,histo in histos['fw'].iteritems():
histo.SetMarkerStyle(7)
histo.Write()
for key,histo in histos['emu'].iteritems():
histo.SetLineColor(ROOT.kRed)
histo.Write()
#make fancy plots
histonames=['pt1','eta1','phi1','HF1','qual1','dxy1','ptSTA1']
for h in histonames:
c=ROOT.TCanvas(h)
c.cd()
histos['emu'][h].Draw("HIST")
histos['emu'][h].GetXaxis().SetTitle(histos['emu'][h].GetTitle())
histos['emu'][h].GetYaxis().SetTitle("events")
histos['fw'][h].Draw("SAME")
c.SetLogy()
l=ROOT.TLegend(0.6,0.6,0.9,0.8)
l.AddEntry(histos['emu'][h],"emulator","l")
l.AddEntry(histos['fw'][h],"data","p")
l.Draw()
c.Write("plot_"+h)
f.Close()
|
the-stack_0_15563 | #!/usr/bin/env python3
#
# Copyright (c) 2013-2019, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# // Author: Filippov Ilia
import common
import sys
import os
from pkg_resources import parse_version
print_debug = common.print_debug
error = common.error
take_lines = common.take_lines
exists = [False, False, False, False, False, False, False, False, False]
names = ["m4", "bison", "flex", "sde", "ispc", "clang", "gcc", "icc", "cmake"]
PATH_dir = os.environ["PATH"].split(os.pathsep)
for counter in PATH_dir:
for i in range(0,len(exists)):
if os.path.exists(counter + os.sep + names[i]):
exists[i] = True
print_debug("=== in PATH: ===\n", False, "")
print_debug("Tools:\n", False, "")
for i in range(0,3):
if exists[i]:
print_debug(take_lines(names[i] + " --version", "first"), False, "")
else:
error("you don't have " + names[i], 0)
if exists[0] and exists[1] and exists[2]:
if common.check_tools(2):
print_debug("Tools' versions are ok\n", False, "")
print_debug("\nSDE:\n", False, "")
if exists[3]:
print_debug(take_lines(names[3] + " --version", "first"), False, "")
else:
error("you don't have " + names[3], 2)
print_debug("\nISPC:\n", False, "")
if exists[4]:
print_debug(take_lines(names[4] + " --version", "first"), False, "")
else:
error("you don't have " + names[4], 2)
print_debug("\nC/C++ compilers:\n", False, "")
for i in range(5,8):
if exists[i]:
print_debug(take_lines(names[i] + " --version", "first"), False, "")
else:
error("you don't have " + names[i], 2)
print_debug("\nCMake:\n", False, "")
if exists[8]:
cmake_version = take_lines(names[8] + " --version", "first")[3]
if (parse_version(cmake_version) >= parse_version("3.8.0")):
print_debug(take_lines(names[8] + " --version", "first"), False, "")
else:
error("CMake version is older than needed. Please install version 3.8 or newer", 2)
else:
error("you don't have " + names[8], 2)
print_debug("\n=== in ISPC specific environment variables: ===\n", False, "")
if os.environ.get("LLVM_HOME") == None:
error("you have no LLVM_HOME", 2)
else:
print_debug("Your LLVM_HOME:" + os.environ.get("LLVM_HOME") + "\n", False, "")
if os.environ.get("ISPC_HOME") == None:
error("you have no ISPC_HOME", 2)
else:
print_debug("Your ISPC_HOME:" + os.environ.get("ISPC_HOME") + "\n", False, "")
if os.path.exists(os.environ.get("ISPC_HOME") + os.sep + "ispc"):
print_debug("You have ISPC in your ISPC_HOME: " +
take_lines(os.environ.get("ISPC_HOME") + os.sep + "ispc" + " --version", "first"), False, "")
else:
error("you don't have ISPC in your ISPC_HOME", 2)
if os.environ.get("SDE_HOME") == None:
error("You have no SDE_HOME", 2)
else:
print_debug("Your SDE_HOME:" + os.environ.get("SDE_HOME") + "\n", False, "")
if os.path.exists(os.environ.get("SDE_HOME") + os.sep + "sde"):
print_debug("You have sde in your SDE_HOME: " +
take_lines(os.environ.get("SDE_HOME") + os.sep + "sde" + " --version", "first"), False, "")
else:
error("you don't have any SDE in your ISPC_HOME", 2)
|
the-stack_0_15565 | #####################################################################
#
# Predictive Failure Analysis (PFA)
# Graph JES2 Resource Data for Jobs
#
#This python script is for use with data that is collected, created,
#and written by the PFA_JES2_RESOURCE_EXHAUSTION check only. Its
#use with data from any other source will result in errors.
#
#Copyright 2021 IBM Corp.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing,
#software distributed under the License is distributed on an
#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
#either express or implied. See the License for the specific
#language governing permissions and limitations under the License.
#####################################################################
import sys
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import platform
import os
#Make sure we have plenty of potential data points to plot.
plt.rcParams['agg.path.chunksize']=10000
#Disable false positive warning
pd.options.mode.chained_assignment = None # default='warn'
#Which system are we running on?
system = platform.system()
keys = {"JQE":"Q","SPOOL":"S","BERT":"B","JOE":"J"}
user_keys = ["JQE","SPOOL","BERT","JOE"]
asid_header_data = ["Key","JobName","TaskId","Start_Time","STCK_Time","Current_Usage","Date_Time"]
capacity_header_data = ["Resource","Capacity"]
data_types_dict={'Key':str,'JobName':str,'TaskId':str,'Start_Time':str,'STCK_Time':int,'Current_Usage':int,'Date_Time':str}
capacity_types_dict={"Resource":str,"Capacity":int}
check_name = "PFA_JES2_Resource_Exhaustion"
COLUMN_CHAR_LEN = 8
#Parse our command line arguments.
if(len(sys.argv) == 5):
data_filepath = sys.argv[1]
capacity_filepath = sys.argv[2]
jobName = sys.argv[3]
jobName = jobName.upper()
key = sys.argv[4]
key = key.upper()
verbose = False
elif(len(sys.argv) == 6 and (sys.argv[5] == '-v' or sys.argv[5] == '-verbose')):
data_filepath = sys.argv[1]
capacity_filepath = sys.argv[2]
jobName = sys.argv[3]
jobName = jobName.upper()
key = sys.argv[4]
key = key.upper()
verbose = True
elif(len(sys.argv) == 2 and (sys.argv[1] == '-h' or sys.argv[1] == '-help')):
print("The proper syntax for this script is the following:\n")
print("'python Graph_JRE_Job.py data_file capacity_file job_name jes2_resource'.\n")
print("Valid JES2 Resources are: " + str([key for key in user_keys]) + "\n")
print("The file path value is case sensitive, but the JES2 resource and job_name values are not.\n")
print("For example, if this script and the required files are in the same directory, and you want to graph the JES2 Spool data for Job3, you would type the following:\n")
print("'python Graph_JRE_Job.py SY1.5day.All.data Capacity.data Job3 SPOOL'\n")
print("You can also add -v to the end of the command for verbose mode. This option will print additional data ")
print("that could help debug errors or verify the results. An example using verbose mode looks like the following:\n")
print("'python Graph_JRE_Job.py SY1.5day.All.data Capacity.data Job3 BERT -v'\n")
print("When this script is executed on z/OS, it saves the graph in a .pdf file that can be downloaded from the directory where this script was executed and displayed anywhere that supports displaying a .pdf file.")
print("The file name is in the format of jobName_JESResource_graph.pdf.")
print("For example, if you entered 'python Graph_JRE_Job.py SY1.5day.All.data Capacity.data Job3 SPOOL' on z/OS the saved file would be:")
print("JOB3_SPOOL_graph.pdf and it would be located in the current working directory.")
sys.exit()
else:
raise Exception("The supplied arguments are not correct. Specify the data_file_path, capacity_filepath, job_name, and JES2 resource in that order. For help enter 'python Graph_JRE_Job.py -h'")
#Make sure we have proper input from the user.
if(not os.path.exists(data_filepath)):
raise Exception("The specified file or filepath for the data file does not exist. Verify the file and filepath then try again.")
if(not os.path.exists(capacity_filepath)):
raise Exception("The specified file or filepath for the capacity file does not exist. Verify the file and filepath then try again.")
if key not in user_keys:
raise Exception("The specified resource does not exist. Specify a resource that exists.")
#Load up our data and assign correct header values so we can narrow it down to the pieces we want.
data_file = pd.read_csv(data_filepath,
sep="/|,",
names=asid_header_data,
header=None,
engine="python",
converters=data_types_dict)
capacity_file = pd.read_csv(capacity_filepath,
sep="/|,",
names=capacity_header_data,
header=None,
engine="python",
converters=capacity_types_dict)
#We need to make sure our jobName is left justified and the proper length.
#Otherwise we will not be able to find the correct data to graph.
if(len(jobName) < COLUMN_CHAR_LEN):
jobName = jobName.ljust(COLUMN_CHAR_LEN)
#Make sure we have proper input from the user.
if jobName not in data_file.values:
raise Exception("The specified job name does not exist. Verify the job name and try again.")
user_key = key
key = keys[user_key]
user_key = user_key.ljust(COLUMN_CHAR_LEN)
data_file['Capacity'] = np.nan
NUM_TO_PRINT = 10
PDF_FILENAME = jobName.strip()+'_'+user_key.strip()+"_graph.pdf" #This is the name of the .pdf file that gets saved when this script is ran on z/OS
def process_data(data_file, capacity_file):
the_capacity = capacity_file.loc[capacity_file['Resource'] == user_key,'Capacity'].values[0]
the_data = data_file.loc[(data_file['Key'] == key) & (data_file['JobName'] == jobName)]
the_data['Capacity'].fillna(the_capacity, inplace=True)
the_data['Capacity'] = the_data['Capacity'].astype(int)
the_data.loc[:,('Date_Time')] = pd.to_datetime(the_data['Date_Time'].astype(str), format='%Y%m%d%H%M%S')
the_data = get_latest_time(the_data)
if(verbose):
print_details(the_data,NUM_TO_PRINT)
return the_data
def graph_data(the_data):
y_values = [0,(the_data['Capacity'].max())*.25,(the_data['Capacity'].max())*.50,(the_data['Capacity'].max())*.75,(the_data['Capacity'].max())]
y_ticks = [str(int(y)) + user_key for y in y_values]
fig, ax = plt.subplots()
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d %H:%M'))
ax.plot(the_data['Date_Time'],the_data['Capacity'],'--r', label='Capacity')
ax.plot(the_data['Date_Time'],the_data['Current_Usage']/1024,'-b', label='Current Usage')
plt.xlabel('Month-Day Time')
fig.suptitle(check_name + "\n" + jobName + '/' + user_key, fontsize=16)
fig.autofmt_xdate()
plt.yticks(y_values, y_ticks)
ax.set_ylim(0,the_data['Capacity'].max()*1.10)
ax.legend(bbox_to_anchor=(1.41, 1),loc="upper right")
fig.subplots_adjust(right=0.75)
if system != 'z/OS':
plt.show();
else:
fig.savefig(PDF_FILENAME)
def print_details(data_frame,num_to_print):
print("Now graphing " + check_name + " data on a " + system + " system.")
print("The job_name is: " + jobName)
print("The JES2 resource is: " + user_key)
print("The data_filepath entered: " + data_filepath)
print("The capacity_filepath entered was: " + capacity_filepath)
print("\nPreview of the data being graphed:")
print(data_frame.head(num_to_print).to_string(index=False))
def get_latest_time(our_data):
#Need to verify that we are using the latest start time if multiple exist for the same ASID.
list_data = our_data['Start_Time'].to_dict()
#Here we make sure we get the latest start time.
times_dict = {}
for i in list_data:
if list_data[i] in times_dict:
times_dict[list_data[i]] += 1
else:
times_dict[list_data[i]] = 1
if(len(times_dict) > 1):
latest_time = max(times_dict.keys())
our_data = our_data.loc[(our_data['Start_Time'] == latest_time)]
return our_data
#Process and graph our data.
the_data = process_data(data_file, capacity_file)
jobName = jobName.strip()
user_key = user_key.strip()
graph_data(the_data)
if system == 'z/OS':
print(PDF_FILENAME + ' has been created and is ready to be downloaded and viewed.')
|
the-stack_0_15568 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. currentmodule:: olmos.version
.. moduleauthor:: NarekA <my_email>
This module contains project version information.
"""
__version__ = '0.0.1' #: the working version
__release__ = '0.0.1' #: the release version
|
the-stack_0_15569 | from glfw import *
from OpenGL.GL import *
import numpy as np
from ctypes import *
from learnopengl import *
from PIL import Image
import glm
def resize(window, width, height):
glViewport(0, 0, width, height)
def main():
# Initialize the library
if not init():
return
# Create a windowed mode window and its OpenGL context
window_hint(CONTEXT_VERSION_MAJOR, 3)
window_hint(CONTEXT_VERSION_MINOR, 3)
window_hint(OPENGL_PROFILE, OPENGL_CORE_PROFILE)
screen_width, screen_height = 800, 600
window = create_window(screen_width, screen_height, "LearnOpenGL", None, None)
if not window:
terminate()
make_context_current(window)
set_framebuffer_size_callback(window, resize)
glViewport(0, 0, 800, 600)
# Make the window's context current
make_context_current(window)
# shaders
shader = Shader('vertex.glsl', 'fragment.glsl')
vertices = np.array([
-0.5, -0.5, -0.5, 0.0, 0.0,
0.5, -0.5, -0.5, 1.0, 0.0,
0.5, 0.5, -0.5, 1.0, 1.0,
0.5, 0.5, -0.5, 1.0, 1.0,
-0.5, 0.5, -0.5, 0.0, 1.0,
-0.5, -0.5, -0.5, 0.0, 0.0,
-0.5, -0.5, 0.5, 0.0, 0.0,
0.5, -0.5, 0.5, 1.0, 0.0,
0.5, 0.5, 0.5, 1.0, 1.0,
0.5, 0.5, 0.5, 1.0, 1.0,
-0.5, 0.5, 0.5, 0.0, 1.0,
-0.5, -0.5, 0.5, 0.0, 0.0,
-0.5, 0.5, 0.5, 1.0, 0.0,
-0.5, 0.5, -0.5, 1.0, 1.0,
-0.5, -0.5, -0.5, 0.0, 1.0,
-0.5, -0.5, -0.5, 0.0, 1.0,
-0.5, -0.5, 0.5, 0.0, 0.0,
-0.5, 0.5, 0.5, 1.0, 0.0,
0.5, 0.5, 0.5, 1.0, 0.0,
0.5, 0.5, -0.5, 1.0, 1.0,
0.5, -0.5, -0.5, 0.0, 1.0,
0.5, -0.5, -0.5, 0.0, 1.0,
0.5, -0.5, 0.5, 0.0, 0.0,
0.5, 0.5, 0.5, 1.0, 0.0,
-0.5, -0.5, -0.5, 0.0, 1.0,
0.5, -0.5, -0.5, 1.0, 1.0,
0.5, -0.5, 0.5, 1.0, 0.0,
0.5, -0.5, 0.5, 1.0, 0.0,
-0.5, -0.5, 0.5, 0.0, 0.0,
-0.5, -0.5, -0.5, 0.0, 1.0,
-0.5, 0.5, -0.5, 0.0, 1.0,
0.5, 0.5, -0.5, 1.0, 1.0,
0.5, 0.5, 0.5, 1.0, 0.0,
0.5, 0.5, 0.5, 1.0, 0.0,
-0.5, 0.5, 0.5, 0.0, 0.0,
-0.5, 0.5, -0.5, 0.0, 1.0
], dtype=np.float32)
indices = np.array([
0, 1, 3, # first triangle
1, 2, 3 # second triangle
], dtype=np.uint32)
image1 = Image.open('container.jpg')
image2 = Image.open('awesomeface.png')
# generate buffers
VAO = glGenVertexArrays(1)
VBO = glGenBuffers(1)
EBO = glGenBuffers(1)
texture1 = glGenTextures(1)
texture2 = glGenTextures(1)
# vertex array buffer
glBindVertexArray(VAO)
# vertex buffer
glBindBuffer(GL_ARRAY_BUFFER, VBO)
glBufferData(GL_ARRAY_BUFFER, vertices.nbytes, vertices, GL_STATIC_DRAW)
# element buffer
#glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO)
#glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices.nbytes, indices, GL_STATIC_DRAW)
# texture1
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_2D, texture1)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, image1.width, image1.height, 0, GL_RGB, GL_UNSIGNED_BYTE, np.array(image1))
glGenerateMipmap(GL_TEXTURE_2D)
# texture1 warp
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
# texture1 filter
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
# texture2
glActiveTexture(GL_TEXTURE1)
glBindTexture(GL_TEXTURE_2D, texture2)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, image2.width, image2.height, 0, GL_RGBA, GL_UNSIGNED_BYTE, np.flipud(np.array(image2)))
glGenerateMipmap(GL_TEXTURE_2D)
# texture2 warp
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
# texture2 filter
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
# position attribute
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 5 * sizeof(c_float), c_void_p(0))
glEnableVertexAttribArray(0)
# texture
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 5 * sizeof(c_float), c_void_p(3 * sizeof(c_float)))
glEnableVertexAttribArray(2)
# unbind buffer and vertex array objects
glBindVertexArray(0)
shader.use()
shader.set_int("texture2", 1)
# model
# model = glm.mat4(1.0)
# model = glm.rotate(model, glm.radians(-55.), glm.vec3(1.0, 0, 0))
# view
view = glm.mat4(1.0)
view = glm.translate(view, glm.vec3(0, 0, -3.))
# projection
projection = glm.perspective(glm.radians(45.), screen_width/float(screen_height), 0.1, 100.)
# cube translations
np.random.seed(13)
positions = np.random.rand(10, 3) * 2 - 1
#print(positions)
# Loop until the user closes the window
while not window_should_close(window):
glEnable(GL_DEPTH_TEST)
# Render here, e.g. using pyOpenGL
glClearColor(0.2, 0.3, 0.3, 1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# bind textures
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_2D, texture1)
glActiveTexture(GL_TEXTURE1)
glBindTexture(GL_TEXTURE_2D, texture2)
glBindVertexArray(VAO)
shader.set_mat4('view', view)
shader.set_mat4('projection', projection)
for i in range(positions.shape[0]):
x, y, z = positions[i]
# set transformations
model = glm.mat4(1.0)
model = glm.translate(model, glm.vec3(x, y, z))
model = glm.rotate(model, (i % 3) * get_time() * glm.radians(i * 20.), glm.vec3(1., 0.3, 0.5))
model = glm.scale(model, glm.vec3(0.3, 0.3, 0.3))
# update transformations
shader.set_mat4('model', model)
#glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, c_void_p(0))
glDrawArrays(GL_TRIANGLES, 0, 36)
# Swap front and back buffers
swap_buffers(window)
# Poll for and process events
poll_events()
glDeleteVertexArrays(1, VAO)
glDeleteBuffers(1, VBO)
glDeleteBuffers(1, EBO)
terminate()
if __name__ == "__main__":
main() |
the-stack_0_15573 | # Inspired from OpenAI Baselines. This uses the same design of having an easily
# substitutable generic policy that can be trained. This allows to easily
# substitute in the I2A policy as opposed to the basic CNN one.
import os
os.environ["CUDA_VISIBLE_DEVICES"]="1"
import numpy as np
import tensorflow as tf
from common.multiprocessing_env import SubprocVecEnv
import gym
import gym_minigrid
from tqdm import tqdm
import argparse
from i2a import I2aPolicy
from a2c import CnnPolicy, get_actor_critic
#N_ENVS = 16
N_STEPS = 5
N_ENVS = 8
#N_STEPS = 1
# Total number of iterations (taking into account number of environments and
# number of steps). You wish to train for.
TOTAL_TIMESTEPS=int(1e6)
GAMMA=0.99
LOG_INTERVAL=100
SAVE_INTERVAL = 1e5
# Where you want to save the weights
SAVE_PATH = 'weights'
# This can be anything from "regular" "avoid" "hunt" "ambush" "rush" each
# resulting in a different reward function giving the agent different behavior.
REWARD_MODE = 'regular'
def discount_with_dones(rewards, dones, GAMMA):
discounted = []
r = 0
for reward, done in zip(rewards[::-1], dones[::-1]):
r = reward + GAMMA*r*(1.-done)
discounted.append(r)
return discounted[::-1]
def train(policy, save_name, load_count = 0, summarize=True, load_path=None, log_path = './logs'):
#Minigrid maze env
env_name = "MiniGrid-BlockMaze-v0"
def make_env(env_name):
return lambda: gym_minigrid.wrappers.PadImgObsWrapper(gym.make(env_name))
envs = [make_env(env_name) for i in range(N_ENVS)]
envs = SubprocVecEnv(envs)
ob_space = envs.observation_space.shape
nw, nh, nc = ob_space
ac_space = envs.action_space
obs = envs.reset()
with tf.Session() as sess:
actor_critic = get_actor_critic(sess, N_ENVS, N_STEPS, ob_space,
ac_space, policy, summarize)
if load_path is not None:
actor_critic.load(load_path)
print('Loaded a2c')
summary_op = tf.summary.merge_all()
writer = tf.summary.FileWriter(log_path, graph=sess.graph)
sess.run(tf.global_variables_initializer())
batch_ob_shape = (N_ENVS*N_STEPS, nw, nh, nc)
dones = [False for _ in range(N_ENVS)]
nbatch = N_ENVS * N_STEPS
episode_rewards = np.zeros((N_ENVS, ))
final_rewards = np.zeros((N_ENVS, ))
for update in tqdm(range(load_count + 1, TOTAL_TIMESTEPS + 1)):
# mb stands for mini batch
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones = [],[],[],[],[]
for n in range(N_STEPS):
actions, values, _ = actor_critic.act(obs)
mb_obs.append(np.copy(obs))
mb_actions.append(actions)
mb_values.append(values)
mb_dones.append(dones)
obs, rewards, dones, _ = envs.step(actions)
#print(obs[0:3, :,:,0])
episode_rewards += rewards
masks = 1 - np.array(dones)
final_rewards *= masks
final_rewards += (1 - masks) * episode_rewards
episode_rewards *= masks
mb_rewards.append(rewards)
mb_dones.append(dones)
#batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=np.float32).swapaxes(1, 0).reshape(batch_ob_shape)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0)
mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1, 0)
mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(1, 0)
mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0)
mb_masks = mb_dones[:, :-1]
mb_dones = mb_dones[:, 1:]
last_values = actor_critic.critique(obs).tolist()
#discount/bootstrap off value fn
for n, (rewards, d, value) in enumerate(zip(mb_rewards, mb_dones, last_values)):
rewards = rewards.tolist()
d = d.tolist()
if d[-1] == 0:
rewards = discount_with_dones(rewards+[value], d+[0], GAMMA)[:-1]
else:
rewards = discount_with_dones(rewards, d, GAMMA)
mb_rewards[n] = rewards
mb_rewards = mb_rewards.flatten()
mb_actions = mb_actions.flatten()
mb_values = mb_values.flatten()
mb_masks = mb_masks.flatten()
if summarize:
loss, policy_loss, value_loss, policy_entropy, _, summary = actor_critic.train(mb_obs,
mb_rewards, mb_masks, mb_actions, mb_values, update,
summary_op)
writer.add_summary(summary, update)
else:
loss, policy_loss, value_loss, policy_entropy, _ = actor_critic.train(mb_obs,
mb_rewards, mb_masks, mb_actions, mb_values, update)
if update % LOG_INTERVAL == 0 or update == 1:
print('%i): %.4f, %.4f, %.4f' % (update, policy_loss, value_loss, policy_entropy))
print(final_rewards.mean())
if update % SAVE_INTERVAL == 0:
print('Saving model')
actor_critic.save(SAVE_PATH, save_name + '_' + str(update) + '.ckpt')
actor_critic.save(SAVE_PATH, save_name + '_done.ckpt')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('algo', help='Algorithm to train either i2a or a2c')
args = parser.parse_args()
if args.algo == 'a2c':
policy = CnnPolicy
elif args.algo == 'i2a':
policy = I2aPolicy
else:
raise ValueError('Must specify the algo name as either a2c or i2a')
train(policy, args.algo, summarize=True, log_path=args.algo + '_logs')
|
the-stack_0_15575 | """
Code to extract some key info from the zresults*fits file that gets produced
after running zspec on calibrated DEIMOS data
"""
import sys
from astropy.io import fits as pf
from astropy.table import Table
maskname = sys.argv[1]
hdu = pf.open(maskname)
tdat = hdu[1].data
nobj = len(tdat)
for i in range(nobj):
i = tdat[i]
print('%-15s %-3s %-7s %1d %7.4f %g %s' %
(i['objname'],i['slitname'],i['maskname'],i['zquality'],i['z'], \
i['z_err'],i['comment']))
|
the-stack_0_15576 | """
Test fiberassign target operations.
"""
import os
import subprocess
import re
import shutil
import unittest
from datetime import datetime
import json
import glob
import numpy as np
import fitsio
import desimodel
import fiberassign
from fiberassign.utils import option_list, GlobalTimers
from fiberassign.hardware import load_hardware
from fiberassign.tiles import load_tiles, Tiles
from fiberassign.targets import (TARGET_TYPE_SCIENCE, TARGET_TYPE_SKY,
TARGET_TYPE_SUPPSKY,
TARGET_TYPE_STANDARD, TARGET_TYPE_SAFE,
Targets, TargetsAvailable, TargetTree,
LocationsAvailable, load_target_file)
from fiberassign.assign import (Assignment, write_assignment_fits,
write_assignment_ascii, merge_results,
read_assignment_fits_tile)
from fiberassign.qa import qa_tiles, qa_targets
from fiberassign.vis import plot_tiles, plot_qa, set_matplotlib_pdf_backend
from fiberassign.scripts.assign import parse_assign, run_assign_full
from fiberassign.scripts.plot import parse_plot, run_plot
from fiberassign.scripts.qa import parse_qa, run_qa
from fiberassign.scripts.qa_plot import parse_plot_qa, run_plot_qa
from .simulate import (test_subdir_create, sim_tiles, sim_targets,
sim_focalplane, petal_rotation, test_assign_date)
class TestQA(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Find the location of scripts. First try the case where we are running
# tests from the top level of the source tree.
cls.topDir = os.path.dirname( # top-level
os.path.dirname( # build/
os.path.dirname( # lib.arch/
os.path.dirname( # fiberassign/
os.path.dirname(os.path.abspath(__file__)) # test/
)
)
)
)
cls.binDir = os.path.join(cls.topDir, "bin")
if not os.path.isdir(cls.binDir):
# We are running from some other directory from an installed package
cls.topDir = os.path.dirname( # top-level
os.path.dirname( # lib/
os.path.dirname( # python3.x/
os.path.dirname( # site-packages/
os.path.dirname( # egg/
os.path.dirname( # fiberassign/
os.path.dirname(os.path.abspath(__file__)) # test/
)
)
)
)
)
)
cls.binDir = os.path.join(cls.topDir, "bin")
def setUp(self):
self.density_science = 5000
self.density_standards = 5000
self.density_sky = 10
self.density_suppsky = 5000
pass
def tearDown(self):
pass
def test_science(self):
set_matplotlib_pdf_backend()
import matplotlib.pyplot as plt
test_dir = test_subdir_create("qa_test_science")
log_file = os.path.join(test_dir, "log.txt")
np.random.seed(123456789)
input_mtl = os.path.join(test_dir, "mtl.fits")
# For this test, we will use just 2 science target classes, in order to verify
# we get approximately the correct distribution
sdist = [
(3000, 1, 0.25, "QSO"),
(2000, 1, 0.75, "ELG")
]
nscience = sim_targets(
input_mtl,
TARGET_TYPE_SCIENCE,
0,
density=self.density_science,
science_frac=sdist
)
log_msg = "Simulated {} science targets\n".format(nscience)
tgs = Targets()
load_target_file(tgs, input_mtl)
# Create a hierarchical triangle mesh lookup of the targets positions
tree = TargetTree(tgs, 0.01)
# Read hardware properties
fp, exclude, state = sim_focalplane(rundate=test_assign_date)
hw = load_hardware(focalplane=(fp, exclude, state))
tfile = os.path.join(test_dir, "footprint.fits")
sim_tiles(tfile)
tiles = load_tiles(tiles_file=tfile)
# Compute the targets available to each fiber for each tile.
tgsavail = TargetsAvailable(hw, tgs, tiles, tree)
# Free the tree
del tree
# Compute the fibers on all tiles available for each target
favail = LocationsAvailable(tgsavail)
# Pass empty map of STUCK positioners that land on good sky
stucksky = {}
# Create assignment object
asgn = Assignment(tgs, tgsavail, favail, stucksky)
# First-pass assignment of science targets
asgn.assign_unused(TARGET_TYPE_SCIENCE)
# Redistribute
asgn.redistribute_science()
write_assignment_fits(tiles, asgn, out_dir=test_dir, all_targets=True)
tile_ids = list(tiles.id)
merge_results(
[input_mtl], list(), tile_ids, result_dir=test_dir, copy_fba=False
)
# FIXME: In order to use the qa_targets function, we need to know the
# starting requested number of observations (NUMOBS_INIT). Then we can use
# that value for each target and compare to the number actually assigned.
# However, the NUMOBS_INIT column was removed from the merged TARGET table.
# If we are ever able to reach consensus on restoring that column, then we
# can re-enable these tests below.
#
# qa_targets(
# hw,
# tiles,
# result_dir=test_dir,
# result_prefix="fiberassign-"
# )
#
# # Load the target catalog so that we have access to the target properties
#
# fd = fitsio.FITS(input_mtl, "r")
# scidata = np.array(np.sort(fd[1].read(), order="TARGETID"))
# fd.close()
# del fd
#
# # How many possible positioner assignments did we have?
# nassign = 5000 * len(tile_ids)
#
# possible = dict()
# achieved = dict()
#
# namepat = re.compile(r".*/qa_target_count_(.*)_init-(.*)\.fits")
# for qafile in glob.glob("{}/qa_target_count_*.fits".format(test_dir)):
# namemat = namepat.match(qafile)
# name = namemat.group(1)
# obs = int(namemat.group(2))
# if obs == 0:
# continue
# fd = fitsio.FITS(qafile, "r")
# fdata = fd["COUNTS"].read()
# # Sort by target ID so we can select easily
# fdata = np.sort(fdata, order="TARGETID")
# tgid = np.array(fdata["TARGETID"])
# counts = np.array(fdata["NUMOBS_DONE"])
# avail = np.array(fdata["NUMOBS_AVAIL"])
# del fdata
# fd.close()
#
# # Select target properties. BOTH TARGET LISTS MUST BE SORTED.
# rows = np.where(np.isin(scidata["TARGETID"], tgid, assume_unique=True))[0]
#
# ra = np.array(scidata["RA"][rows])
# dec = np.array(scidata["DEC"][rows])
# dtarget = np.array(scidata["DESI_TARGET"][rows])
# init = np.array(scidata["NUMOBS_INIT"][rows])
#
# requested = obs * np.ones_like(avail)
#
# under = np.where(avail < requested)[0]
# over = np.where(avail > requested)[0]
#
# limavail = np.array(avail)
# limavail[over] = obs
#
# deficit = np.zeros(len(limavail), dtype=np.int)
#
# deficit[:] = limavail - counts
# deficit[avail == 0] = 0
#
# possible[name] = np.sum(limavail)
# achieved[name] = np.sum(counts)
#
# log_msg += "{}-{}:\n".format(name, obs)
#
# pindx = np.where(deficit > 0)[0]
# poor_tgid = tgid[pindx]
# poor_dtarget = dtarget[pindx]
# log_msg += " Deficit > 0: {}\n".format(len(poor_tgid))
# poor_ra = ra[pindx]
# poor_dec = dec[pindx]
# poor_deficit = deficit[pindx]
#
# # Plot Target availability
# # Commented out by default, since in the case of high target density
# # needed for maximizing assignments, there are far more targets than
# # the number of available fiber placements.
#
# # marksize = 4 * np.ones_like(deficit)
# #
# # fig = plt.figure(figsize=(12, 12))
# # ax = fig.add_subplot(1, 1, 1)
# # ax.scatter(ra, dec, s=2, c="black", marker="o")
# # for pt, pr, pd, pdef in zip(poor_tgid, poor_ra, poor_dec, poor_deficit):
# # ploc = plt.Circle(
# # (pr, pd), radius=(0.05*pdef), fc="none", ec="red"
# # )
# # ax.add_artist(ploc)
# # ax.set_xlabel("RA", fontsize="large")
# # ax.set_ylabel("DEC", fontsize="large")
# # ax.set_title(
# # "Target \"{}\": (min(avail, requested) - counts) > 0".format(
# # name, obs
# # )
# # )
# # #ax.legend(handles=lg, framealpha=1.0, loc="upper right")
# # plt.savefig(os.path.join(test_dir, "{}-{}_deficit.pdf".format(name, obs)), dpi=300, format="pdf")
#
# log_msg += \
# "Assigned {} tiles for total of {} possible target observations\n".format(
# len(tile_ids), nassign
# )
# ach = 0
# for nm in possible.keys():
# ach += achieved[nm]
# log_msg += \
# " type {} had {} possible target obs and achieved {}\n".format(
# nm, possible[nm], achieved[nm]
# )
# frac = 100.0 * ach / nassign
# log_msg += \
# " {} / {} = {:0.2f}% of fibers were assigned\n".format(
# ach, nassign, frac
# )
# for nm in possible.keys():
# log_msg += \
# " type {} had {:0.2f}% of achieved observations\n".format(
# nm, achieved[nm] / ach
# )
# with open(log_file, "w") as f:
# f.write(log_msg)
#
# self.assertGreaterEqual(frac, 99.0)
# Test if qa-fiberassign script runs without crashing
script = os.path.join(self.binDir, "qa-fiberassign")
if os.path.exists(script):
fafiles = glob.glob(f"{test_dir}/fiberassign-*.fits")
cmd = "{} --targets {}".format(script, " ".join(fafiles))
err = subprocess.call(cmd.split())
self.assertEqual(err, 0, f"FAILED ({err}): {cmd}")
else:
print(f"ERROR: didn't find {script}")
def test_suite():
"""Allows testing of only this module with the command::
python setup.py test -m <modulename>
"""
return unittest.defaultTestLoader.loadTestsFromName(__name__)
|
the-stack_0_15577 | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2021 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
""" Fine-tuning pre-trained models for token classification tasks.
Heavily adapted from: https://github.com/huggingface/transformers/blob/
v3.0.1/examples/token-classification/run_ner.py"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import (
accuracy_score as seq_accuracy_score,
f1_score as seq_f1_score,
precision_score as seq_precision_score,
recall_score as seq_recall_score,
classification_report as seq_classification_report
)
from sklearn.metrics import (
accuracy_score,
f1_score,
precision_score,
recall_score
)
from torch import nn
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from utils import TokenClassificationDataSet, Split, get_labels
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are
going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from "
"huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if "
"not the same as model_name"}
)
# If you want to tweak more attributes on your tokenizer, you should do it
# in a distinct script, or just modify its tokenizer_config.json.
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if "
"not the same as model_name"}
)
use_fast: bool = field(default=False, metadata={"help": "Set this flag to "
"use fast "
"tokenization."})
task_type: Optional[str] = field(
default="ner", metadata={"help": "the name of the task (ner or pos)"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the "
"pretrained models downloaded from s3"}
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for
training and eval.
"""
data_dir: str = field(
metadata={"help": "The input data dir. Should contain the .txt files "
"for a CoNLL-2003-formatted task."}
)
labels: Optional[str] = field(
default=None,
metadata={"help": "Path to a file containing all labels."},
)
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after "
"tokenization. Sequences longer than this will be truncated, "
"sequences shorter will be padded."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and "
"evaluation sets"}
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments,
DataTrainingArguments,
TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a
# json file, let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(
json_file=os.path.abspath(
sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists "
"and is not empty. Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=(logging.INFO if training_args.local_rank in [-1, 0]
else logging.WARN),
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, "
"16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
logger.info("Training/evaluation parameters %s", training_args)
# Set seed
set_seed(training_args.seed)
# Prepare task
labels = get_labels(data_args.labels)
label_map: Dict[int, str] = {i: label for i, label in enumerate(labels)}
num_labels = len(labels)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can
# concurrently download model & vocab.
config = AutoConfig.from_pretrained(
(model_args.config_name if model_args.config_name
else model_args.model_name_or_path),
num_labels=num_labels,
id2label=label_map,
label2id={label: i for i, label in enumerate(labels)},
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
(model_args.tokenizer_name if model_args.tokenizer_name
else model_args.model_name_or_path),
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast,
)
model = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
# Get datasets
train_dataset = (
TokenClassificationDataSet(
data_dir=data_args.data_dir,
tokenizer=tokenizer,
labels=labels,
model_type=config.model_type,
max_seq_length=data_args.max_seq_length,
overwrite_cache=data_args.overwrite_cache,
mode=Split.train,
)
if training_args.do_train
else None
)
eval_dataset = (
TokenClassificationDataSet(
data_dir=data_args.data_dir,
tokenizer=tokenizer,
labels=labels,
model_type=config.model_type,
max_seq_length=data_args.max_seq_length,
overwrite_cache=data_args.overwrite_cache,
mode=Split.dev,
)
if training_args.do_eval
else None
)
def align_predictions(predictions: np.ndarray,
label_ids: np.ndarray) -> Tuple[List[int], List[int]]:
preds = np.argmax(predictions, axis=2)
batch_size, seq_len = preds.shape
out_label_list = [[] for _ in range(batch_size)]
preds_list = [[] for _ in range(batch_size)]
for i in range(batch_size):
for j in range(seq_len):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
return preds_list, out_label_list
def compute_metrics(p: EvalPrediction) -> Dict:
preds_list, out_label_list = align_predictions(p.predictions,
p.label_ids)
# If task type is NER, use seqeval metrics.
# Otherwise, use scikit learn
if model_args.task_type == "ner":
return {
"accuracy": seq_accuracy_score(out_label_list, preds_list),
"precision": seq_precision_score(out_label_list, preds_list),
"recall": seq_recall_score(out_label_list, preds_list),
"f1": seq_f1_score(out_label_list, preds_list),
"matrix": seq_classification_report(out_label_list, preds_list)
}
else:
# Flatten the preds_list and out_label_list
preds_list = [p for sublist in preds_list for p in sublist]
out_label_list = [p for sublist in out_label_list for p in sublist]
return {
"accuracy": accuracy_score(out_label_list, preds_list),
"precision_micro": precision_score(out_label_list, preds_list,
average="micro"),
"recall_micro": recall_score(out_label_list, preds_list,
average="micro"),
"f1_micro": f1_score(out_label_list, preds_list,
average="micro"),
"precision_macro": precision_score(out_label_list, preds_list,
average="macro"),
"recall_macro": recall_score(out_label_list, preds_list,
average="macro"),
"f1_macro": f1_score(out_label_list, preds_list,
average="macro"),
}
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=compute_metrics,
)
# Training
if training_args.do_train:
trainer.train(
model_path=(model_args.model_name_or_path
if os.path.isdir(model_args.model_name_or_path)
else None)
)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
result = trainer.evaluate()
output_eval_file = os.path.join(training_args.output_dir,
"eval_results.txt")
if trainer.is_world_process_zero():
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key, value in result.items():
logger.info(" %s = %s", key, value)
writer.write("%s = %s\n" % (key, value))
results.update(result)
# Predict
if training_args.do_predict:
test_dataset = TokenClassificationDataSet(
data_dir=data_args.data_dir,
tokenizer=tokenizer,
labels=labels,
model_type=config.model_type,
max_seq_length=data_args.max_seq_length,
overwrite_cache=data_args.overwrite_cache,
mode=Split.test,
)
predictions, label_ids, metrics = trainer.predict(test_dataset)
preds_list, _ = align_predictions(predictions, label_ids)
output_test_results_file = os.path.join(training_args.output_dir,
"test_results.txt")
if trainer.is_world_process_zero():
with open(output_test_results_file, "w") as writer:
for key, value in metrics.items():
logger.info(" %s = %s", key, value)
writer.write("%s = %s\n" % (key, value))
# Save predictions
output_test_predictions_file = os.path.join(training_args.output_dir,
"test_predictions.txt")
if trainer.is_world_process_zero():
with open(output_test_predictions_file, "w") as writer:
with open(os.path.join(data_args.data_dir, "test.txt"), "r") as f:
example_id = 0
for line in f:
if (line.startswith("-DOCSTART-") or line == ""
or line == "\n"):
writer.write(line)
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
output_line = (line.split()[0] + " " +
preds_list[example_id].pop(0) + "\n")
writer.write(output_line)
else:
logger.warning(
"Maximum sequence length exceeded: "
"No prediction for '%s'.", line.split()[0])
return results
if __name__ == "__main__":
main()
|
the-stack_0_15579 | # Copyright (c) 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from neutron.tests import base
from neutron_vpnaas.services.vpn.common import netns_wrapper as nswrap
class TestNetnsWrapper(base.BaseTestCase):
def setUp(self):
super(TestNetnsWrapper, self).setUp()
patch_methods = ['filter_command',
'execute',
'setup_conf']
for method in patch_methods:
self.patch_obj(nswrap, method)
patch_classes = ['neutron.common.config.setup_logging',
'os.path.isdir',
'os.path.samefile',
'sys.exit']
for cls in patch_classes:
self.patch_cls(cls)
self.filter_command.return_value = False
self.execute.return_value = 0
self.conf = mock.Mock()
self.conf.cmd = 'ls,-al'
self.conf.mount_paths = {'/foo': '/dir/foo',
'/var': '/dir/var'}
self.setup_conf.return_value = self.conf
self.conf.rootwrap_config = 'conf'
self.isdir.return_value = True
self.samefile.return_value = False
def patch_obj(self, obj, method):
_m = mock.patch.object(obj, method)
_mock = _m.start()
setattr(self, method, _mock)
def patch_cls(self, patch_class):
_m = mock.patch(patch_class)
mock_name = patch_class.split('.')[-1]
_mock = _m.start()
setattr(self, mock_name, _mock)
def test_netns_wrap_fail_without_netns(self):
self.samefile.return_value = True
return_val = nswrap.execute_with_mount()
self.assertTrue(return_val)
def test_netns_wrap(self):
self.conf.cmd = 'ls,-al'
return_val = nswrap.execute_with_mount()
exp_calls = [mock.call(['mount', '--bind', '/dir/foo', '/foo']),
mock.call(['mount', '--bind', '/dir/var', '/var']),
mock.call('ls,-al')]
self.execute.assert_has_calls(exp_calls, any_order=True)
self.assertFalse(return_val)
def test_netns_wrap_fail_without_cmd(self):
self.conf.cmd = None
return_val = nswrap.execute_with_mount()
self.assertFalse(self.execute.called)
self.assertTrue(return_val)
def test_netns_wrap_fail_without_mount_paths(self):
self.conf.mount_paths = None
return_val = nswrap.execute_with_mount()
self.assertFalse(self.execute.called)
self.assertTrue(return_val)
|
the-stack_0_15580 | import argparse
import os
import random
import sys
import time
import struct
from collections import Counter
from collections import deque
from operator import itemgetter
from tempfile import NamedTemporaryFile as NTF
import SharedArray as sa
import numpy as np
from numba import jit
from text_embedding.documents import *
FLOAT = np.float32
INT = np.uint32
CHUNK = 1000000
STORE = 10*CHUNK
FMT = 'iif'
NBYTES = 12
def vocab_count(corpusfile, vocabfile=None, min_count=1, verbose=True, comm=None):
'''counts word occurrences to determine vocabulary
Args:
corpusfile: corpus .txt file
vocabfile: output .txt file
min_count: minimum word count
verbose: display progress
comm: MPI Communicator
Returns:
[(word, count)] list if vocabfile is None ; else None
'''
rank, size = ranksize(comm)
if verbose:
write('Counting Words with Minimum Count '+str(min_count)+'\n', comm)
t = time.time()
with open(corpusfile, 'r') as f:
documents = (line for i, line in enumerate(f) if i%size == rank)
counts = Counter(w for doc in documents for w in doc.split())
if size > 1:
counts = comm.reduce(counts, root=0)
if not rank:
vocab = sorted((item for item in counts.items() if item[1] >= min_count), key=itemgetter(1), reverse=True)
if verbose:
write('Counted '+str(len(vocab))+' Words, Time='+str(round(time.time()-t))+' sec\n')
if vocabfile is None:
checkpoint(comm)
return vocab
with open(vocabfile, 'w') as f:
for word, count in vocab:
f.write(word+' '+str(count)+'\n')
checkpoint(comm)
@jit
def doc2cooc(indices, weights, window_size, V):
row, col, val = [], [], []
start = 0
for i, index in enumerate(indices):
if index != V:
for w, other in zip(weights[start-i:], indices[start:i]):
if other != V:
if index < other:
row.append(index)
col.append(other)
else:
row.append(other)
col.append(index)
val.append(w)
start += i >= window_size
return row, col, val
@jit
def doc2cooc_unweighted(indices, window_size, V):
row, col = [], []
start = 0
for i, index in enumerate(indices):
if index != V:
for other in indices[start:i]:
if other != V:
if index < other:
row.append(index)
col.append(other)
else:
row.append(other)
col.append(index)
start += i >= window_size
return row, col
def counts2bin(counts, f):
for (i, j), v in counts.items():
f.write(struct.pack(FMT, i, j, v))
def bin2counts(f, counts, subset):
position = f.tell()
ncooc = int((f.seek(0, 2)-position)/NBYTES)
f.seek(position)
for cooc in range(ncooc):
i, j, v = struct.unpack(FMT, f.read(NBYTES))
if i in subset:
counts[(i, j)] += v
# NOTE: Result is highly non-random and contains only upper triangular entries
def cooc_count(corpusfile, vocabfile, coocfile, window_size=10, unweighted=False, verbose=True, comm=None):
'''counts word cooccurrence in a corpus
Args:
corpusfile: corpus .txt file
vocabfile: vocab .txt file
coocfile: cooccurrence .bin file
window_size: length of cooccurrence window
unweighted: do not weight cooccurrence by distance
verbose: display progress
comm: MPI Communicator
Returns:
None
'''
rank, size = ranksize(comm)
with open(vocabfile, 'r') as f:
word2index = {line.split()[0]: INT(i) for i, line in enumerate(f)}
if unweighted:
one = FLOAT(1)
else:
weights = np.fromiter((1.0/d for d in reversed(range(1, window_size+1))), FLOAT, window_size)
V = INT(len(word2index))
counts = Counter()
if verbose:
write('\rCounting Cooccurrences with Window Size '+str(window_size)+'\n', comm)
lines = 0
t = time.time()
if size > 1:
random.seed(0)
idx = list(range(V))
random.shuffle(idx)
start, stop = int(rank/size*V), int((rank+1)/size*V)
subset = set(idx[start:stop])
positions = [0]*size
with open(corpusfile, 'r') as f:
n = 0
while True:
v = None
with NTF() as tmp:
dump = Counter()
files = comm.allgather(tmp.name)
for k, line in enumerate(f):
if k%size == rank:
doc = line.split()
if unweighted:
for i, j in zip(*doc2cooc_unweighted(np.fromiter((word2index.get(word, V) for word in doc), INT, len(doc)), window_size, V)):
if i in subset:
counts[(i, j)] += one
else:
dump[(i, j)] += one
else:
for i, j, v in zip(*doc2cooc(np.fromiter((word2index.get(word, V) for word in doc), INT, len(doc)), weights, window_size, V)):
if i in subset:
counts[(i, j)] += v
else:
dump[(i, j)] += v
if not (k+1)%CHUNK:
counts2bin(dump, tmp)
dump = Counter()
if verbose:
write('\rProcessed '+str(n+k+1)+' Lines, Time='+str(round(time.time()-t))+' sec', comm)
if not (k+1)%STORE:
n += k+1
break
counts2bin(dump, tmp)
tmp.flush()
for k in range(2):
for i, name in enumerate(files):
if i != rank:
with open(name, 'rb') as g:
g.seek(positions[i])
bin2counts(g, counts, subset)
positions[i] = g.tell() * (k == 0)
checkpoint(comm)
if verbose:
write('\rProcessed '+str(n)+' Lines, Time='+str(round(time.time()-t))+' sec', comm)
if not comm.allreduce(int(not v is None)):
break
if verbose:
write('\rCounted '+str(comm.allreduce(len(counts.items())))+' Cooccurrences, Time='+str(round(time.time()-t))+' sec\n', comm)
for k in range(size):
if k == rank:
mode = 'ab' if rank else 'wb'
with open(coocfile, mode) as f:
counts2bin(counts, f)
checkpoint(comm)
else:
with open(corpusfile, 'r') as f:
for k, line in enumerate(f):
doc = line.split()
if unweighted:
for i, j in zip(*doc2cooc_unweighted(np.fromiter((word2index.get(word, V) for word in doc), INT, len(doc)), window_size, V)):
counts[(i, j)] += one
else:
for i, j, v in zip(*doc2cooc(np.fromiter((word2index.get(word, V) for word in doc), INT, len(doc)), weights, window_size, V)):
counts[(i, j)] += v
if verbose and not (k+1)%CHUNK:
write('\rProcessed '+str(k+1)+' Lines, Time='+str(round(time.time()-t))+' sec')
if verbose:
write('\rCounted '+str(len(counts.items()))+' Cooccurrences, Time='+str(round(time.time()-t))+' sec\n')
with open(coocfile, 'wb') as f:
counts2bin(counts, f)
def reformat_coocfile(inputfile, outputfile):
'''converts full-matrix cooccurrence file upper-triangular cooccurrence file
Args:
inputfile: full-matrix binary cooccurrence file with index starting at 1 in format "int,int,double" (as created by original GloVe code)
outputfile: ouput binary file
Returns:
None
'''
with open(inputfile, 'rb') as f:
with open(outputfile, 'wb') as g:
while True:
try:
i, j, d = struct.unpack('iid', f.read(16))
except struct.error:
break
if i <= j:
g.write(struct.pack(FMT, INT(i-1), INT(j-1), FLOAT(d)))
# NOTE: Open using 'with ... as' to prevent too many open POSIX files
class SharedArrayManager:
_shared = []
def __init__(self, comm=None):
self._comm = comm
self._rank, self._size = ranksize(comm)
def __enter__(self):
return self
def __exit__(self, *args):
for array in self._shared:
try:
sa.delete(array)
except FileNotFoundError:
pass
def create(self, array=None, dtype=None):
comm, rank = self._comm, self._rank
if rank:
shared = sa.attach(comm.bcast(None, root=0))
else:
dtype = array.dtype if dtype is None else dtype
if self._size == 1:
return array.astype(dtype)
filename = str(time.time())
shared = sa.create(filename, array.shape, dtype=dtype)
shared += array.astype(dtype)
self._shared.append(comm.bcast(filename, root=0))
checkpoint(comm)
return shared
def splitcooc(f, ncooc=None):
row = deque()
col = deque()
if ncooc is None:
position = f.tell()
ncooc = int((f.seek(0, 2)-position)/NBYTES)
f.seek(position)
for cooc in range(ncooc):
i, j, xij = struct.unpack(FMT, f.read(NBYTES))
row.append(INT(i))
col.append(INT(j))
yield FLOAT(xij)
for idx in [row, col]:
for cooc in range(ncooc):
yield idx.popleft()
def symcooc(coocfile, comm=None):
rank, size = ranksize(comm)
with open(coocfile, 'rb') as f:
flength = f.seek(0, 2)
offset = int(flength*rank/size / NBYTES)
ncooc = int(flength*(rank+1)/size / NBYTES) - offset
f.seek(NBYTES*offset)
coocs = splitcooc(f, ncooc)
val = np.fromiter(coocs, FLOAT, ncooc)
row = np.fromiter(coocs, INT, ncooc)
col = np.fromiter(coocs, INT, ncooc)
sym = row < col
symcooc = ncooc + sum(sym)
values, rowdata, coldata = [np.empty(symcooc, dtype=dtype) for dtype in [FLOAT, INT, INT]]
values[:ncooc], rowdata[:ncooc], coldata[:ncooc] = val, row, col
values[ncooc:], rowdata[ncooc:], coldata[ncooc:] = val[sym], col[sym], row[sym]
return values, rowdata, coldata
# NOTE: Open using 'with ... as' to prevent too many open POSIX files
class GloVe(SharedArrayManager):
def _load_cooc_data(self, coocfile, alpha, xmax):
data, self.row, self.col = symcooc(coocfile, self._comm)
self.logcooc = np.log(data)
data /= FLOAT(xmax)
mask = data<1.0
data[mask] **= FLOAT(alpha)
data[~mask] = FLOAT(1.0)
self.weights = data
self.ncooc = data.shape[0]
self._cooc_data = [self.row, self.col, self.weights, self.logcooc]
def _shuffle_cooc_data(self, seed):
for data in self._cooc_data:
np.random.seed(seed)
np.random.shuffle(data)
@staticmethod
def _shapes(V, d):
return [(V, d)]*2 + [(V,)]*2
def _init_vecs(self, shapes, d, seed, init):
create = self.create
if self._rank:
self._params = [create() for shape in shapes]
elif init is None:
np.random.seed(seed)
self._params = [create((np.random.rand(*shape)-0.5)/d, dtype=FLOAT) for shape in shapes]
else:
self._params = [create(param, dtype=FLOAT) for param in init]
def __init__(self, coocfile, V=None, d=None, seed=None, init=None, alpha=0.75, xmax=100.0, comm=None):
'''
Args:
coocfile: binary cooccurrence file (assumed to have only upper triangular entries)
V: vocab size
d: vector dimension
seed: random seed for initializing vectors
init: tuple of numpy arrays to initialize parameters
alpha: GloVe weighting parameter
xmax: GloVe max cooccurrence parameter
comm: MPI Communicator
'''
super().__init__(comm=comm)
self._load_cooc_data(coocfile, alpha, xmax)
assert not (init is None and (V is None or d is None)), "'V' and 'd' must be defined if 'init' not given"
self._init_vecs(self._shapes(V, d), d, seed, init)
def embeddings(self):
'''returns GloVe embeddings using current parameters
Returns:
numpy array of size V x d
'''
return sum(self._params[:2]) / FLOAT(2.0)
def dump(self, fid):
'''dumps GloVe embeddings to binary file
Args:
fid: open file object or filename string
Returns:
None
'''
if not self._rank:
self.embeddings().tofile(fid)
_pnames = ['wv', 'cv', 'wb', 'cb']
_numpar = 4
def save(self, fid):
'''saves parameters to HDF5 file
Args:
fid: filename string
Returns:
None
'''
import h5py
if not self._rank:
f = h5py.File(fid)
for name, param in zip(self._pnames, self._params[:self._numpar]):
f.create_dataset(name, data=param)
f.close()
@staticmethod
@jit
def predict(i, j, wv, cv, wb, cb):
return np.dot(wv[i].T, cv[j])+wb[i]+cb[j]
def loss(self):
row, col = self.row, self.col
ncooc = self.ncooc
checkpoint(self._comm)
params = self._params[:self._numpar]
predict = self.predict
errors = np.fromiter((predict(i, j, *params) for i, j in zip(row, col)), FLOAT, ncooc) - self.logcooc
loss = np.inner(self.weights*errors, errors)
if self._size > 1:
ncooc = self._comm.allreduce(ncooc)
return self._comm.allreduce(loss/ncooc)
return loss/ncooc
@staticmethod
@jit
def sgd_epoch(row, col, weights, logcoocs, wv, cv, wb, cb, ncooc, eta):
etax2 = FLOAT(2.0*eta)
loss = FLOAT(0.0)
for i, j, weight, logcooc in zip(row, col, weights, logcoocs):
wvi, cvj = wv[i], cv[j]
error = np.dot(wvi.T, cvj) + wb[i] + cb[j] - logcooc
werror = weight*error
coef = werror*etax2
upd = coef*cvj
cvj -= coef*wvi
wvi -= upd
wb[i] -= coef
cb[j] -= coef
loss += werror*error
return loss / ncooc
def sgd(self, epochs=25, eta=0.01, seed=None, verbose=True, cumulative=True):
'''runs SGD on GloVe objective
Args:
epochs: number of epochs
eta: learning rate
seed: random seed for cooccurrence shuffling
verbose: write loss and time information
cumulative: compute cumulative loss instead of true loss; ignored if not verbose
Returns:
None
'''
comm, rank, size = self._comm, self._rank, self._size
random.seed(seed)
if verbose:
write('\rRunning '+str(epochs)+' Epochs of SGD with Learning Rate '+str(eta)+'\n', comm)
if not cumulative:
write('\rInitial Loss='+str(self.loss())+'\n', comm)
ncooc = comm.allreduce(self.ncooc)
t = time.time()
for ep in range(epochs):
if verbose:
write('Epoch '+str(ep+1), comm)
self._shuffle_cooc_data(random.randint(0, 2**32-1))
loss = self.sgd_epoch(*self._cooc_data, *self._params, ncooc, eta)
if verbose:
loss = comm.allreduce(loss) if cumulative else self.loss()
checkpoint(comm)
if verbose:
write(': Loss='+str(loss)+', Time='+str(round(time.time()-t))+' sec\n', comm)
t = time.time()
@staticmethod
@jit
def adagrad_epoch(row, col, weights, logcoocs, wv, cv, wb, cb, ssg_wv, ssg_cv, ssg_wb, ssg_cb, ncooc, eta):
eta = FLOAT(eta)
two = FLOAT(2.0)
loss = FLOAT(0.0)
for i, j, weight, logcooc in zip(row, col, weights, logcoocs):
wvi, cvj = wv[i], cv[j]
ssg_wvi, ssg_cvj = ssg_wv[i], ssg_cv[j]
error = np.dot(wvi.T, cvj) + wb[i] + cb[j] - logcooc
werror = weight*error
coef = two*werror
updi = coef*cvj
updj = coef*wvi
reg_wvi = np.sqrt(ssg_wvi)
reg_cvj = np.sqrt(ssg_cvj)
ssg_wvi += updi ** 2
ssg_cvj += updj ** 2
wvi -= eta * updi / reg_wvi
cvj -= eta * updj / reg_cvj
reg_wbi = np.sqrt(ssg_wb[i])
reg_cbj = np.sqrt(ssg_cb[j])
coefsq = coef ** 2
ssg_wb[i] += coefsq
ssg_cb[j] += coefsq
coef *= eta
wb[i] -= coef / reg_wbi
cb[j] -= coef / reg_cbj
loss += werror*error
return loss / ncooc
def adagrad(self, epochs=25, eta=0.05, seed=None, verbose=True, cumulative=True):
'''runs AdaGrad on GloVe objective
Args:
epochs: number of epochs
eta: learning rate
seed: random seed for cooccurrence shuffling
verbose: write loss and time information
cumulative: compute cumulative loss instead of true loss; ignored if not verbose
Returns:
None
'''
comm, rank, size = self._comm, self._rank, self._size
random.seed(seed)
if not hasattr(self, '_ssg'):
self._ssg = [self.create(np.ones(param.shape, dtype=FLOAT)) for param in self._params[:self._numpar]]
if verbose:
write('\rRunning '+str(epochs)+' Epochs of AdaGrad with Learning Rate '+str(eta)+'\n', comm)
if not cumulative:
write('\rInitial Loss='+str(self.loss())+'\n', comm)
ncooc = comm.allreduce(self.ncooc)
t = time.time()
for ep in range(epochs):
if verbose:
write('Epoch '+str(ep+1), comm)
self._shuffle_cooc_data(random.randint(0, 2**32-1))
loss = self.adagrad_epoch(*self._cooc_data, *self._params, *self._ssg, ncooc, eta)
if verbose:
loss = comm.allreduce(loss) if cumulative else self.loss()
checkpoint(comm)
if verbose:
write(': Loss='+str(loss)+', Time='+str(round(time.time()-t))+' sec\n', comm)
t = time.time()
# NOTE: Open using 'with ... as' to prevent too many open POSIX files
class SN(GloVe):
@staticmethod
def _shapes(V, d):
return [(V, d), (1,)]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def embeddings(self):
return self._params[0]
_pnames = ['wv', 'b']
_numpar = 2
@staticmethod
@jit
def predict(i, j, wv, b):
sumij = wv[i] + wv[j]
return np.dot(sumij.T, sumij) + b[0]
@staticmethod
@jit
def sgd_epoch(row, col, weights, logcoocs, wv, b, ncooc, eta):
etax2 = FLOAT(2.0*eta)
two = FLOAT(2.0)
loss = FLOAT(0.0)
for i, j, weight, logcooc in zip(row, col, weights, logcoocs):
wvi, wvj = wv[i], wv[j]
sumij = wvi + wvj
error = np.dot(sumij.T, sumij) + b[0] - logcooc
werror = weight*error
coef = werror*etax2
b -= coef
upd = (two*coef)*sumij
wvi -= upd
wvj -= upd
loss += werror * error
return loss / ncooc
@staticmethod
@jit
def adagrad_epoch(row, col, weights, logcoocs, wv, b, ssg_wv, ssg_b, ncooc, eta):
eta = FLOAT(eta)
two = FLOAT(2.0)
loss = FLOAT(0.0)
for i, j, weight, logcooc in zip(row, col, weights, logcoocs):
wvi, wvj = wv[i], wv[j]
ssg_wvi, ssg_wvj = ssg_wv[i], ssg_wv[j]
sumij = wvi + wvj
error = np.dot(sumij.T, sumij) + b[0] - logcooc
werror = weight*error
coef = two*werror
reg_b = np.sqrt(ssg_b)
ssg_b += coef ** 2
b -= eta*coef
upd = (two*coef)*sumij
updsq = upd ** 2
reg_wvi = np.sqrt(ssg_wvi)
ssg_wvi += updsq
reg_wvj = np.sqrt(ssg_wvj)
ssg_wvj += updsq
upd *= eta
wvi -= upd / reg_wvi
wvj -= upd / reg_wvj
loss += werror * error
return loss / ncooc
# NOTE: Open using 'with ... as' to prevent too many open POSIX files
class RegularizedGloVe(GloVe):
def _word_cooc_counts(self, V):
counts = Counter(self.row)+Counter(self.col)
array = np.fromiter((counts[i] for i in range(V)), INT, V)
if self._size > 1:
output = None if self._rank else np.empty(V, dtype=INT)
self._comm.Reduce(array, output, root=0)
return output
return array
def __init__(self, src, *args, reg=1.0, **kwargs):
super().__init__(*args, **kwargs)
create = self.create
params = self._params
params.append(create(src, dtype=FLOAT))
params.append(FLOAT(reg))
params.append(create(self._word_cooc_counts(src.shape[0]), dtype=FLOAT))
oloss = self.loss
if self._rank:
self.loss = lambda: oloss() + self._comm.bcast(None, root=0)
else:
rloss = lambda: reg/src.shape[0]*norm(self.embeddings()-src)**2
if self._size > 1:
self.loss = lambda: oloss() + self._comm.bcast(rloss(), root=0)
else:
self.loss = lambda: oloss() + rloss()
@staticmethod
@jit
def sgd_epoch(row, col, weights, logcoocs, wv, cv, wb, cb, src, reg, wcc, ncooc, eta):
etax2 = FLOAT(2.0*eta)
two = FLOAT(2.0)
regoV = FLOAT(reg / wcc.shape[0])
regcoef = FLOAT(eta * ncooc * regoV)
oloss = FLOAT(0.0)
rloss = FLOAT(0.0)
for i, j, weight, logcooc in zip(row, col, weights, logcoocs):
wvi, cvj, wcci, wccj = wv[i], cv[j], wcc[i], wcc[j]
error = np.dot(wvi.T, cvj) + wb[i] + cb[j] - logcooc
werror = weight*error
coef = werror*etax2
diffi = (wvi+cv[i])/two - src[i]
diffj = (wv[j]+cvj)/two - src[j]
upd = coef*cvj + (regcoef/wcci)*diffi
cvj -= coef*wvi + (regcoef/wccj)*diffj
wvi -= upd
wb[i] -= coef
cb[j] -= coef
oloss += werror*error
rloss += np.dot(diffi.T, diffi)/wcci + np.dot(diffj.T, diffj)/wccj
return (oloss + regoV*rloss) / ncooc
@staticmethod
@jit
def adagrad_epoch(row, col, weights, logcoocs, wv, cv, wb, cb, src, reg, wcc, ssg_wv, ssg_cv, ssg_wb, ssg_cb, ncooc, eta):
eta = FLOAT(eta)
two = FLOAT(2.0)
regoV = FLOAT(reg / wcc.shape[0])
regcoef = FLOAT(ncooc * regoV)
oloss = FLOAT(0.0)
rloss = FLOAT(0.0)
for i, j, weight, logcooc in zip(row, col, weights, logcoocs):
wvi, cvj, wcci, wccj = wv[i], cv[j], wcc[i], wcc[j]
ssg_wvi, ssg_cvj = ssg_wv[i], ssg_cv[j]
error = np.dot(wvi.T, cvj) + wb[i] + cb[j] - logcooc
werror = weight*error
coef = two*werror
diffi = (wvi+cv[i])/two - src[i]
diffj = (wv[j]+cvj)/two - src[j]
updi = coef*cvj + (regcoef/wcci)*diffi
updj = coef*wvi + (regcoef/wccj)*diffj
reg_wvi = np.sqrt(ssg_wvi)
reg_cvj = np.sqrt(ssg_cvj)
ssg_wvi += updi ** 2
ssg_cvj += updj ** 2
wvi -= eta * updi / reg_wvi
cvj -= eta * updj / reg_cvj
reg_wbi = np.sqrt(ssg_wb[i])
reg_cbj = np.sqrt(ssg_cb[j])
coefsq = coef ** 2
ssg_wb[i] += coefsq
ssg_cb[j] += coefsq
coef *= eta
wb[i] -= coef / reg_wbi
cb[j] -= coef / reg_cbj
oloss += werror*error
rloss += np.dot(diffi.T, diffi)/wcci + np.dot(diffj.T, diffj)/wccj
return (oloss + regoV*rloss) / ncooc
# NOTE: Open using 'with ... as' to prevent too many open POSIX files
class RegularizedSN(SN, RegularizedGloVe):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@staticmethod
@jit
def sgd_epoch(row, col, weights, logcoocs, wv, b, src, reg, wcc, ncooc, eta):
etax2 = FLOAT(2.0*eta)
two = FLOAT(2.0)
regoV = FLOAT(reg / wcc.shape[0])
regcoef = FLOAT(etax2 * ncooc * regoV)
oloss = FLOAT(0.0)
rloss = FLOAT(0.0)
for i, j, weight, logcooc in zip(row, col, weights, logcoocs):
wvi, wvj, wcci, wccj = wv[i], wv[j], wcc[i], wcc[j]
sumij = wvi + wvj
error = np.dot(sumij.T, sumij) + b[0] - logcooc
werror = weight*error
coef = werror*etax2
b -= coef
diffi = wvi - src[i]
diffj = wvj - src[j]
upd = (two*coef)*sumij
wvi -= upd + (regcoef/wcci)*diffi
wvj -= upd + (regcoef/wccj)*diffj
oloss += werror*error
rloss += np.dot(diffi.T, diffi)/wcci + np.dot(diffj.T, diffj)/wccj
return (oloss + regoV*rloss) / ncooc
@staticmethod
@jit
def adagrad_epoch(row, col, weights, logcoocs, wv, b, src, reg, wcc, ssg_wv, ssg_b, ncooc, eta):
eta = FLOAT(eta)
two = FLOAT(2.0)
regoV = FLOAT(reg / wcc.shape[0])
regcoef = FLOAT(ncooc * regoV)
oloss = FLOAT(0.0)
rloss = FLOAT(0.0)
for i, j, weight, logcooc in zip(row, col, weights, logcoocs):
wvi, wvj, wcci, wccj = wv[i], wv[j], wcc[i], wcc[j]
ssg_wvi, ssg_wvj = ssg_wv[i], ssg_wv[j]
sumij = wvi + wvj
error = np.dot(sumij.T, sumij) + b[0] - logcooc
werror = weight*error
coef = two*werror
reg_b = np.sqrt(ssg_b)
ssg_b += coef ** 2
b -= eta*coef
diffi = wvi - src[i]
diffj = wvj - src[j]
upd = (two*coef)*sumij
updi = upd + (regcoef/wcci)*diffi
updj = upd + (regcoef/wccj)*diffj
regi = np.sqrt(ssg_wvi)
regj = np.sqrt(ssg_wvj)
ssg_wvi += updi ** 2
ssg_wvj += updj ** 2
wvi -= eta * updi
wvj -= eta * updj
oloss += werror*error
rloss += np.dot(diffi.T, diffi)/wcci + np.dot(diffj.T, diffj)/wccj
return (oloss + regoV*rloss) / ncooc
def align_params(params, srcvocab, tgtvocab, mean_fill=True):
output = []
for param in params:
if len(param.shape) == 1:
if param.shape[0] == 1:
output.append(param)
continue
shape = (len(tgtvocab),)
default = np.mean(param)
else:
shape = (len(tgtvocab), param.shape[1])
default = np.mean(param, axis=0)
array = np.empty(shape, dtype=FLOAT)
if not mean_fill:
default *= FLOAT(0.0)
w2e = dict(zip(srcvocab, param))
for i, w in enumerate(tgtvocab):
array[i] = w2e.get(w, default)
output.append(array)
return output
def induce_embeddings(srcvocab, srccooc, srcvecs, tgtvocab, tgtcooc, comm=None):
from scipy import sparse as sp
from sklearn.linear_model import LinearRegression as LR
rank, size = ranksize(comm)
Vsrc, d = srcvecs.shape
Vtgt = len(tgtvocab)
with SharedArrayManager(comm=comm) as sam:
write('Loading Source Cooccurrences\n', comm)
data, row, col = symcooc(srccooc, comm)
srcvecs = sam.create(srcvecs, dtype=FLOAT)
X = sp.csr_matrix((data, (row, col)), shape=(Vsrc, Vsrc), dtype=FLOAT)
write('Computing Source Counts\n', comm)
if size > 1:
C = None if rank else np.empty(Vsrc, dtype=FLOAT)
comm.Reduce(np.array(X.sum(1))[:,0], C, root=0)
C = sam.create(C)
else:
C = np.array(X.sum(1))[:,0]
write('Building Source Context Vectors\n', comm)
if size > 1:
U = None if rank else np.empty((Vsrc, d), dtype=FLOAT)
comm.Reduce(X.dot(srcvecs), U, root=0)
U = sam.create(U)
else:
U = X.dot(srcvecs)
U = U[C>0]
C = C[C>0]
start, stop = int(rank/size*Vsrc), int((rank+1)/size*Vsrc)
U[start:stop] /= C[start:stop, None]
checkpoint(comm)
write('Learning Induction Matrix\n', comm)
M = sam.create(np.zeros((d, d), dtype=FLOAT))
start, stop = int(rank/size*d), int((rank+1)/size*d)
M[:,start:stop] = LR(fit_intercept=False).fit(X[:,start:stop], srcvecs).coef_
checkpoint(comm)
write('Loading Target Cooccurrences\n', comm)
data, row, col = symcooc(tgtcooc, comm)
tgt2idx = {w: i for i, w in enumerate(tgtvocab)}
tgt2src = {tgt2idx.get(w): i for i, w in enumerate(srcvocab)}
zero = FLOAT(0.0)
for i, j in enumerate(col):
try:
col[i] = tgt2src[j]
except KeyError:
data[i] = zero
X = sp.csr_matrix((data, (row, col)), shape=(Vtgt, Vsrc), dtype=FLOAT)
X.eliminate_zeros()
write('Computing Target Counts\n', comm)
if size > 1:
C = None if rank else np.empty(Vtgt, dtype=FLOAT)
comm.Reduce(np.array(X.sum(1))[:,0], C, root=0)
C = sam.create(C)
else:
C = np.array(X.sum(1))[:,0]
write('Building Target Context Vectors\n', comm)
rank, size = ranksize(comm)
if size > 1:
U = None if rank else np.empty((Vtgt, d), dtype=FLOAT)
comm.Reduce(X.dot(srcvecs), U, root=0)
U = sam.create(U)
else:
U = X.dot(srcvecs)
nz = sum(C>0)
start, stop = int(rank/size*nz), int((rank+1)/size*nz)
U[C>0][start:stop] /= C[C>0][start:stop, None]
write('Computing Induced Embeddings\n', comm)
tgtvecs = sam.create(np.zeros((Vtgt, d), dtype=FLOAT))
tgtvecs[start:stop] = U[start:stop].dot(M.T)
checkpoint(comm)
if not rank:
return tgtvecs
def main(args, comm=None):
if args.mode == 'vocab' or args.mode[:4] in 'thru':
vocab_count(args.input, args.vocab, args.min_count, args.verbose, comm)
if args.mode == 'cooc' or args.mode[:4] in 'thru':
cooc_count(args.input, args.vocab, args.cooc, args.window_size, args.unweighted, args.verbose, comm)
Embedding = GloVe if args.mode[-5:] == 'glove' else SN if args.mode[-2:] == 'sn' else None
if Embedding is None:
if not args.mode in {'vocab', 'cooc', 'thru-cooc'}:
raise(NotImplementedError)
return
with open(args.vocab, 'r') as f:
V = len(f.readlines())
with Embedding(args.cooc, V, args.dimension, alpha=args.alpha, xmax=args.xmax, comm=comm) as E:
if args.sgd:
E.sgd(args.niter, args.eta, verbose=args.verbose)
else:
E.adagrad(args.niter, args.eta, verbose=args.verbose)
E.dump(args.output)
def parse():
parser = argparse.ArgumentParser(prog='python text_embeddings/solvers.py')
parser.add_argument('mode', help="'vocab', 'cooc', 'glove', 'sn', 'thru-cooc', 'thru-glove', or 'thru-sn'")
parser.add_argument('vocab', help='vocabulary .txt file')
parser.add_argument('-i', '--input', help='corpus .txt file')
parser.add_argument('-c', '--cooc', help='cooccurrence .bin file')
parser.add_argument('-o', '--output', help='embedding .bin file')
parser.add_argument('-m', '--min_count', default=1, help='minimum word count in corpus', type=int)
parser.add_argument('-w', '--window_size', default=10, help='size of cooccurrence window', type=int)
parser.add_argument('-u', '--unweighted', action='store_true', help='no distance weighting')
parser.add_argument('-d', '--dimension', default=300, help='embedding dimension', type=int)
parser.add_argument('-x', '--xmax', default=100.0, help='maximum cooccurrence', type=float)
parser.add_argument('-a', '--alpha', default=0.75, help='weighting exponent', type=float)
parser.add_argument('-s', '--sgd', action='store_true', help='use SGD')
parser.add_argument('-n', '--niter', default=25, help='number of training epochs', type=int)
parser.add_argument('-e', '--eta', default=0.05, help='learning rate', type=float)
parser.add_argument('-v', '--verbose', action='store_true', help='display output')
return parser.parse_args()
if __name__ == '__main__':
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
except ImportError:
comm = None
main(parse(), comm=comm)
|
the-stack_0_15581 | # Copyright (c) 2019-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
import logging
import multiprocessing
import os
import signal
import sys
from multiprocessing import Event
from pathlib import Path
from typing import Any, Dict, List, NamedTuple
from .filesystem import acquire_lock, remove_if_exists
from .process import register_unique_process
LOG: logging.Logger = logging.getLogger(__name__)
Subscription = NamedTuple(
"Subscription", [("root", str), ("name", str), ("subscription", Dict[str, Any])]
)
class WatchmanSubscriber(object):
def __init__(self, base_path: str) -> None:
self._base_path: str = base_path
self._alive: bool = True
self._ready: multiprocessing.synchronize.Event = Event()
@property
def _name(self) -> str:
"""
A name to identify the subscriber. Used as the directory and file names
for the log, lock, and pid files.
"""
raise NotImplementedError
@property
def _subscriptions(self) -> List[Subscription]:
"""
List of subscriptions
"""
raise NotImplementedError
def _handle_response(self, response: Dict[str, Any]) -> None:
"""
Callback invoked when a message is received from watchman
"""
raise NotImplementedError
@staticmethod
def _compute_pid_path(base_path: str, name: str) -> str:
return str(Path(base_path, f"{name}.pid"))
@property
@functools.lru_cache(1)
def _watchman_client(self) -> "pywatchman.client": # noqa
try:
import pywatchman # noqa
# The client will block indefinitely when timeout is None.
return pywatchman.client(timeout=None)
except ImportError as exception:
LOG.info("Not starting %s due to %s", self._name, str(exception))
sys.exit(1)
def _subscribe_to_watchman(self, subscription: Subscription) -> None:
self._watchman_client.query("watch", subscription.root)
self._watchman_client.query(
"subscribe", subscription.root, subscription.name, subscription.subscription
)
def _run(self) -> None:
try:
os.makedirs(self._base_path)
except OSError:
pass
lock_path: str = os.path.join(self._base_path, "{}.lock".format(self._name))
LOG.debug(f"WatchmanSubscriber: Trying to acquire lock file {lock_path}.")
def cleanup() -> None:
LOG.info("Cleaning up lock and pid files before exiting.")
remove_if_exists(lock_path)
def interrupt_handler(_signal_number=None, _frame=None) -> None:
LOG.info("Interrupt signal received.")
cleanup()
sys.exit(0)
signal.signal(signal.SIGINT, interrupt_handler)
# Die silently if unable to acquire the lock.
with acquire_lock(lock_path, blocking=False), (
register_unique_process(
os.getpid(), self._compute_pid_path(self._base_path, self._name)
)
):
LOG.debug("Acquired lock on %s", lock_path)
file_handler = logging.FileHandler(
os.path.join(self._base_path, "%s.log" % self._name), mode="w"
)
file_handler.setFormatter(
logging.Formatter("%(asctime)s %(levelname)s %(message)s")
)
LOG.addHandler(file_handler)
subscriptions = self._subscriptions
for subscription in subscriptions:
self._subscribe_to_watchman(subscription)
if not subscriptions:
LOG.info("No watchman roots to subscribe to.")
connection = self._watchman_client.recvConn
if not connection:
LOG.error("Connection to Watchman for %s not found", self._name)
sys.exit(1)
while self._alive:
# This call is blocking, which prevents this loop from burning CPU.
response = connection.receive()
if response.get("is_fresh_instance", False):
LOG.info(
"Ignoring initial watchman message for %s",
response.get("root", "<no-root-found>"),
)
else:
self._handle_response(response)
self._ready.set() # At least one message has been received.
cleanup()
def daemonize(self) -> None:
"""We double-fork here to detach the daemon process from the parent.
If we were to just fork the child as a daemon, we'd have to worry about the
parent process exiting zombifying the daemon."""
LOG.debug("Daemonizing the %s.", self._name)
if os.fork() == 0:
pid = os.fork()
if pid == 0:
try:
LOG.propagate = False
# Closing the sys.stdout and stderr file descriptors here causes
# the program to crash when attempting to log.
os.close(sys.stdout.fileno())
os.close(sys.stderr.fileno())
self._run()
sys.exit(0)
except Exception as exception:
LOG.info("Not running %s due to %s", self._name, str(exception))
sys.exit(1)
else:
sys.exit(0)
@staticmethod
def stop_subscriber(base_path: str, subscriber_name: str) -> None:
try:
pid_path = Path(
WatchmanSubscriber._compute_pid_path(base_path, subscriber_name)
)
pid = int(pid_path.read_text())
os.kill(pid, signal.SIGINT)
LOG.debug("Stopped the %s with pid %d.", subscriber_name, pid)
except FileNotFoundError:
LOG.debug(f"Could not stop the {subscriber_name} because it was not found.")
except (OSError, ValueError) as exception:
LOG.debug(
f"Could not stop the {subscriber_name} "
f"because of exception `{exception}`."
)
|
the-stack_0_15582 | def compare(before, after):
def extract(f):
for i in open(f):
if i.startswith(' '):
yield i.strip()
bwords = set(extract(before))
awords = set(extract(after))
print(len(bwords), len(awords))
print('Removed:')
for w in sorted(awords - bwords):
print(' ', w)
print('Added:')
for w in sorted(bwords - awords):
print(' ', w)
if __name__ == '__main__':
import sys
compare(*sys.argv[1:])
|
the-stack_0_15583 | from devito.ir.iet import IterationTree, FindSections, FindSymbols
from devito.symbolics import Keyword, Macro
from devito.tools import as_tuple, filter_ordered, split
from devito.types import Array, Global, LocalObject
__all__ = ['filter_iterations', 'retrieve_iteration_tree', 'derive_parameters',
'diff_parameters']
def retrieve_iteration_tree(node, mode='normal'):
"""
A list with all Iteration sub-trees within an IET.
Examples
--------
Given the Iteration tree:
.. code-block:: c
Iteration i
expr0
Iteration j
Iteration k
expr1
Iteration p
expr2
Return the list: ::
[(Iteration i, Iteration j, Iteration k), (Iteration i, Iteration p)]
Parameters
----------
iet : Node
The searched Iteration/Expression tree.
mode : str, optional
- ``normal``
- ``superset``: Iteration trees that are subset of larger iteration trees
are dropped.
"""
assert mode in ('normal', 'superset')
trees = [IterationTree(i) for i in FindSections().visit(node) if i]
if mode == 'normal':
return trees
else:
found = []
for i in trees:
if any(set(i).issubset(set(j)) for j in trees if i != j):
continue
found.append(i)
return found
def filter_iterations(tree, key=lambda i: i):
"""
Return the first sub-sequence of consecutive Iterations such that
``key(iteration)`` is True.
"""
filtered = []
for i in tree:
if key(i):
filtered.append(i)
elif len(filtered) > 0:
break
return filtered
def derive_parameters(iet, drop_locals=False):
"""
Derive all input parameters (function call arguments) from an IET
by collecting all symbols not defined in the tree itself.
"""
# Extract all candidate parameters
candidates = FindSymbols().visit(iet)
# Symbols, Objects, etc, become input parameters as well
basics = FindSymbols('basics').visit(iet)
candidates.extend(i.function for i in basics)
# Filter off duplicates (e.g., `x_size` is extracted by both calls to FindSymbols)
candidates = filter_ordered(candidates)
# Filter off symbols which are defined somewhere within `iet`
defines = [s.name for s in FindSymbols('defines').visit(iet)]
parameters = [s for s in candidates if s.name not in defines]
# Drop globally-visible objects
parameters = [p for p in parameters if not isinstance(p, (Global, Keyword, Macro))]
# Maybe filter out all other compiler-generated objects
if drop_locals:
parameters = [p for p in parameters if not isinstance(p, (Array, LocalObject))]
return parameters
def diff_parameters(iet, root, indirectly_provided=None):
"""
Derive the parameters of a sub-IET, `iet`, within a Callable, `root`, and
split them into two groups:
* the "read-only" parameters, and
* the "dynamic" parameters, whose value changes at some point in `root`.
The `indirectly_provided` are the parameters that are provided indirectly to
`iet`, for example via a composite type (e.g., a C struct).
"""
required = derive_parameters(iet)
required = [i for i in required if i not in as_tuple(indirectly_provided)]
known = set(root.parameters) | set(i for i in required if i.is_Array)
parameters, dynamic_parameters = split(required, lambda i: i in known)
return required, parameters, dynamic_parameters
|
the-stack_0_15584 | # -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify it under
# the terms of the (LGPL) GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Library Lesser General Public License
# for more details at ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jurko Gospodnetić ( [email protected] )
"""
Suds library transport related unit tests.
Implemented using the 'pytest' testing framework.
"""
if __name__ == "__main__":
from . import __init__
__init__.runUsingPyTest(globals())
import suds
from suds.transport import Reply, Request
import pytest
import sys
@pytest.mark.parametrize("message", (
"",
"for a bitch it's haaaard...",
"I'm here to kick ass,\nand chew bubble gum...\nand I'm all out of gum.",
"šuć-muć pa ožeži.. za 100 €\n\nwith multiple\nlines...",
"\n\n\n\n\n\n",
"中原千军逐蒋"))
def test_reply_as_string(message):
code = 17
reply = Reply(code, {"aaa":1}, message)
expected = """\
CODE: %s
HEADERS: %s
MESSAGE:
%s""" % (code, reply.headers, message)
assert str(reply) == expected
if sys.version_info < (3, 0):
assert str(reply) == expected.encode("utf-8")
@pytest.mark.parametrize(("code", "headers", "message"), (
(1, {}, "ola"),
(2, {"semper":"fi"}, "中原千军逐蒋\n城楼万众检阅")))
def test_reply_constructor(code, headers, message):
reply = Reply(code, headers, message)
assert reply.code == code
assert reply.headers == headers
assert reply.message == message
@pytest.mark.parametrize("message", (
"",
"for a bitch it's haaaard...",
"I'm here to kick ass,\nand chew bubble gum...\nand I'm all out of gum.",
"šuć-muć pa ožeži.. za 100 €\n\nwith multiple\nlines...",
"\n\n\n\n\n\n",
"中原千军逐蒋"))
def test_request_as_string(message):
request = Request("my url", message)
request.headers["aaa"] = 1
expected = """\
URL: my url
HEADERS: %s
MESSAGE:
%s""" % (request.headers, message)
assert str(request) == expected
if sys.version_info < (3, 0):
assert str(request) == expected.encode("utf-8")
@pytest.mark.parametrize(("url", "message"), (
("for a bitch it's haaaard...", "it's hard out here..."),
("中原千军逐蒋", "城楼万众检阅")))
def test_request_constructor(url, message):
request = Request(url, message)
assert request.url == url
assert request.message == message
assert request.headers == {}
def test_request_without_message():
request = Request("for a bitch it's haaaard...")
assert request.url == "for a bitch it's haaaard..."
assert request.message is None
assert request.headers == {}
|
the-stack_0_15588 | from __future__ import absolute_import, print_function, unicode_literals
import datetime
import random
from django.db.models import Max, Min, Sum
from django.db.models.query import F
from kolibri.auth.filters import HierarchyRelationsFilter
from kolibri.auth.models import Classroom, Facility, FacilityUser
from kolibri.content.models import ContentNode
from kolibri.logger.models import AttemptLog, ContentSessionLog, ContentSummaryLog, MasteryLog
from le_utils.constants import content_kinds
def get_or_create_facilities(**options):
n_facilities = options['n_facilities']
n_on_device = Facility.objects.all().count()
n_to_create = n_facilities - n_on_device
if n_to_create > 0:
print('Generating {n} facility object(s)'.format(n=n_to_create))
for i in range(0, n_to_create):
Facility.objects.create(name='Test Facility {i}'.format(i=i + 1))
return Facility.objects.all()[0:n_facilities]
def get_or_create_classrooms(**options):
n_classes = options['n_classes']
facility = options['facility']
n_on_device = Classroom.objects.filter(parent=facility).count()
n_to_create = n_classes - n_on_device
if n_to_create > 0:
print('Generating {n} classroom object(s) for facility: {name}'.format(
n=n_to_create,
name=facility.name,
))
for i in range(0, n_to_create):
Classroom.objects.create(
parent=facility,
name='Classroom {i}{a}'.format(i=i + 1, a=random.choice('ABCD'))
)
return Classroom.objects.filter(parent=facility)[0:n_classes]
def get_or_create_classroom_users(**options):
classroom = options['classroom']
n_users = options['n_users']
user_data = options['user_data']
facility = options['facility']
# The headers in the user_data.csv file that we use to generate user Full Names
# Note, we randomly pick from these to give deliberately varied (and sometimes idiosyncratic)
# Full names - because we should never assume that users have names like us
user_data_name_fields = ["GivenName", "MiddleInitial", "Surname"]
n_in_classroom = HierarchyRelationsFilter(FacilityUser.objects.all()).filter_by_hierarchy(
ancestor_collection=classroom,
target_user=F("id"),
).count()
# Only generate new users if there are fewer users than requested.
n_to_create = n_users - n_in_classroom
if n_to_create > 0:
print('Generating {n} user object(s) for class: {classroom} in facility: {facility}'.format(
n=n_to_create,
classroom=classroom,
facility=facility,
))
for i in range(0, n_to_create):
# Get the first base data that does not have a matching user already
base_data = user_data[n_in_classroom + i]
# Randomly create the name from 1 to 3 of the three user name fields
name = " ".join([base_data[key] for key in random.sample(user_data_name_fields, random.randint(1, 3))])
user = FacilityUser.objects.create(
facility=facility,
full_name=name,
username=base_data['Username']
)
# Set a dummy password so that if we want to login as this learner later, we can.
user.set_password('password')
user.save()
# Add the user to the current classroom
classroom.add_member(user)
return HierarchyRelationsFilter(FacilityUser.objects.all()).filter_by_hierarchy(
target_user=F("id"),
ancestor_collection=classroom,
)[0:n_users]
def add_channel_activity_for_user(**options): # noqa: max-complexity=16
n_content_items = options['n_content_items']
channel = options['channel']
user = options['user']
now = options['now']
channel_id = channel.id
default_channel_content = ContentNode.objects.exclude(kind=content_kinds.TOPIC).filter(channel_id=channel_id)
print('Generating {i} user interaction(s) for user: {user} for channel: {channel}'.format(
i=n_content_items,
user=user,
channel=channel.name
))
# Generate a content interaction history for this many content items
for i in range(0, n_content_items):
# Use this to randomly select a content node to generate the interaction for
index = random.randint(0, default_channel_content.count() - 1)
random_node = default_channel_content[index]
# We will generate between 1 and 5 content session logs for this content item
session_logs = []
for j in range(0, random.randint(1, 5)):
# How many minutes did they spend in this session? Up to 15
duration = random.random() * 15
# Assume they spent some of this session time not doing anything - the lazy...
idle_time = random.random() * duration
session_logs.append(ContentSessionLog(
user=user,
channel_id=channel_id,
content_id=random_node.content_id,
start_timestamp=now - datetime.timedelta(i + j, 0, duration),
end_timestamp=now - datetime.timedelta(i + j),
# How many seconds did they actually spend doing something?
time_spent=(duration - idle_time) * 60,
progress=random.random(),
kind=random_node.kind,
))
# Assume they have not completed
completion_timestamp = None
cumulative_progress = 0
# Go through all the session logs and add up the progress in each
for session_log in session_logs:
cumulative_progress = min(cumulative_progress + session_log.progress, 1.0)
# If the progress is 1 or more, they have completed! Set the completion timestamp
# For the end of this session, for the sake of argument.
if cumulative_progress >= 1.0:
completion_timestamp = session_log.end_timestamp
session_log.save()
# Now that we have created all the Session Logs, infer the summary log from them
summary_log, created = ContentSummaryLog.objects.get_or_create(
user=user,
kind=random_node.kind,
content_id=random_node.content_id,
# Use defaults here so that we don't try to create a new Summary Log with the same
# kind/content_id/user combo, as this would violate uniqueness constraints
defaults={
'channel_id': channel_id,
# Start timestamp is the earliest start timestamp of the session logs
'start_timestamp': min(session_logs, key=lambda x: x.start_timestamp).start_timestamp,
# End timestamp is the latest of all the end timestamps
'end_timestamp': max(session_logs, key=lambda x: x.end_timestamp).end_timestamp,
'completion_timestamp': completion_timestamp,
'time_spent': sum(session_log.time_spent for session_log in session_logs),
'progress': min(sum(session_log.progress for session_log in session_logs), 1.0),
}
)
if not created:
# We didn't create the summary log this time, so it probably means it has other session logs
# Aggregate the information from there to update the relevant fields on the summary log
updates = ContentSessionLog.objects.filter(
user=user,
kind=random_node.kind,
content_id=random_node.content_id
).aggregate(
start_timestamp=Min('start_timestamp'),
end_timestamp=Max('end_timestamp'),
progress=Sum('progress')
)
summary_log.start_timestamp = updates['start_timestamp']
summary_log.end_timestamp = updates['end_timestamp']
if summary_log.progress < 1.0 and updates['progress'] >= 1.0:
# If it was not previously completed, and is now, set the completion timestamp to the
# final end timestamp of the session logs.
summary_log.completion_timestamp = updates['end_timestamp']
summary_log.progress = min(1.0, updates['progress'])
summary_log.save()
# If we are dealing with anything but an assessment (currently only exercises)
# we are done - if not, create additional data here
if random_node.kind == content_kinds.EXERCISE:
# Generate a mastery log if needed
mastery_log, created = MasteryLog.objects.get_or_create(
user=user,
mastery_level=1,
summarylog=summary_log,
defaults={
'start_timestamp': summary_log.start_timestamp,
'end_timestamp': summary_log.end_timestamp,
'complete': summary_log.progress >= 1.0,
'completion_timestamp': completion_timestamp,
'mastery_criterion': {
'm': 5,
'n': 5,
'type': 'm_of_n',
},
}
)
if not created:
# Not created, so update relevant fields on it based on new interactions
if not mastery_log.complete and summary_log.progress >= 1.0:
mastery_log.complete = True
mastery_log.completion_timestamp = summary_log.completion_timestamp
mastery_log.end_timestamp = summary_log.end_timestamp
# Get the list of assessment item ids from the assessment meta data
assessment_item_ids = random_node.assessmentmetadata.first().assessment_item_ids
for i, session_log in enumerate(reversed(session_logs)):
# Always make students get 5 attempts correct in the most recent session
# if the exercise is complete
complete = (i == 0 and mastery_log.complete)
if complete:
n = 5
else:
# Otherwise, let them have answered between 1 and 5 questions per session
n = random.randint(1, 5)
# How long did they spend on these n questions?
timespan = session_log.end_timestamp - session_log.start_timestamp
# Index through each individual question
for j in range(0, n):
if complete:
# If this is the session where they completed the exercise, always
# make them get it right
correct = True
else:
# Otherwise only let students get odd indexed questions right,
# ensuring they will always have a mastery breaking sequence
# as zero based indexing means their first attempt will always be wrong!
correct = j % 2 == 1
start_timestamp = session_log.end_timestamp - (timespan / n) * (j + 1)
end_timestamp = session_log.end_timestamp - (timespan / n) * j
# If incorrect, must have made at least two attempts at the question
question_attempts = 1 if correct else random.randint(2, 5)
question_interval = (end_timestamp - start_timestamp) / question_attempts
# If they got it wrong, give 20/80 chance that they took a hint to do so
hinted = random.choice((False, False, False, False, not correct))
if hinted:
first_interaction = {
'correct': False,
'type': 'hint',
}
else:
first_interaction = {
'correct': correct,
'type': 'answer',
}
first_interaction.update({
'answer': {},
'timestamp': start_timestamp + question_interval
})
interaction_history = [first_interaction]
# If it is correct, this can be our only response, otherwise, add more.
if not correct:
for att in range(1, question_attempts - 1):
# Add on additional attempts for intervening incorrect responses
interaction_history += [{
'correct': False,
'type': 'answer',
'answer': {},
'timestamp': start_timestamp + question_interval * (att + 1),
}]
# Finally, add a correct response that allows the user to move onto the next question
interaction_history += [{
'correct': True,
'type': 'answer',
'answer': {},
'timestamp': end_timestamp,
}]
AttemptLog.objects.create(
# Choose a random assessment item id from the exercise
item=random.choice(assessment_item_ids),
# Just let each attempt be a fixed proportion of the total time spent on the exercise
start_timestamp=start_timestamp,
end_timestamp=end_timestamp,
time_spent=timespan.total_seconds(),
# Mark all attempts as complete, as assume that student gave correct answer eventually
complete=True,
# Mark as correct or incorrect
correct=correct,
hinted=hinted,
# We can't meaningfully generate fake answer data for Perseus exercises
# (which are currently our only exercise type) - so don't bother.
answer={},
simple_answer='',
interaction_history=interaction_history,
user=user,
masterylog=mastery_log,
sessionlog=session_log,
)
|
the-stack_0_15590 | #!/usr/bin/env python
# encoding: utf-8
# Sample-based Monte Carlo Denoising using a Kernel-Splatting Network
# Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand
# Siggraph 2019
#
# Copyright (c) 2019 Michaël Gharbi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Denoise an image using a previously trained model."""
import os
import argparse
import shutil
import tempfile
import time
import pyexr
import torch as th
import numpy as np
from torch.utils.data import DataLoader
import skimage.io as skio
from sbmc import losses
from denoise import _pad
import ttools
from ttools.modules.image_operators import crop_like
import sbmc
LOG = ttools.get_logger(__name__)
def main(args):
if not os.path.exists(args.data):
raise ValueError("input {} does not exist".format(args.data))
# Load the data
data_params = dict(spp=args.spp)
kpcn_data_params = dict(spp=args.spp, kpcn_mode=True)
data = sbmc.FullImagesDataset(args.data, **data_params)
data_kpcn = sbmc.FullImagesDataset(args.data, **data_params, mode="kpcn")
dataloader = DataLoader(data, batch_size=1, shuffle=False, num_workers=0)
dataloader_kpcn = DataLoader(data_kpcn, batch_size=1, shuffle=False, num_workers=0)
# Load the model
temp = th.load(f"{args.model1}", map_location=th.device('cpu'))
model_one = sbmc.RecurrentMultisteps(data.num_features, data.num_global_features)
model_one.load_state_dict(temp['model'])
model_one.train(False)
temp = th.load("/home/emil/Documents/Temporal-SBMC-extension/data/pretrained_models/gharbi2019_sbmc/final.pth" , map_location=th.device("cpu"))
sbmc_model = sbmc.Multisteps(data.num_features, data.num_global_features)
sbmc_model.load_state_dict(temp["model"])
sbmc_model.train(False)
temp = th.load("/home/emil/Documents/Temporal-SBMC-extension/data/pretrained_models/bako2017_finetuned/final.pth", map_location=th.device("cpu"))
kpcn_model = sbmc.KPCN(27)
kpcn_model.load_state_dict(temp["model"])
kpcn_model.train(False)
device = "cuda" if th.cuda.is_available() else "cpu"
if (device == "cuda"):
LOG.info("Using CUDA")
model_one.cuda()
sbmc_model.cuda()
kpcn_model.cuda()
rmse_checker = losses.RelativeMSE()
rmse_checker.to(device)
radiances = []
batch = next(iter(dataloader))
kpcn_batch = next(iter(dataloader_kpcn))
for k in batch.keys():
if not batch[k].__class__ == th.Tensor:
continue
batch[k] = batch[k].to(device) # Sets the tensors to the correct device type
for k in kpcn_batch.keys():
print(k)
if not kpcn_batch[k].__class__ == th.Tensor:
continue
kpcn_batch[k] = kpcn_batch[k].to(device) # Sets the tensors to the correct device type
# Compute the output with RSBMC
with th.no_grad():
output = model_one(batch)["radiance"]
output_sbmc = sbmc_model(batch)["radiance"]
output_kpcn = kpcn_model(kpcn_batch)["radiance"]
# tgt = crop_like(batch["target_image"], output)
radiances.append(batch["low_spp"])
radiances.append(_pad(batch, output, False)) # Add RSBMC to the output
radiances.append(_pad(batch, output_sbmc, False))
radiances.append(_pad(kpcn_batch, output_kpcn, True))
radiances.append(batch["target_image"]) # Add target to the output
save_img(radiances, args.save_dir)
def save_img(radiances, checkpoint_dir):
tmp_empty = th.zeros_like(radiances[0]) # Empty filler tensor
# Difference between models and ground thruth
# diff_model1 = (radiance1 - tgt).abs()
# diff_model2 = (radiance2 - tgt).abs()
# Create output data in the form:
# low spp input --
# ouput model1 -- Diff with tgt
# ouput model2 -- Diff with tgt
# tgt --
# first_row = th.cat([tmp_empty, low_radiance, tmp_empty], -1)
# second_row = th.cat([tmp_empty, radiance1, diff_model1], -1)
# third_row = th.cat([tmp_empty, radiance2, diff_model2], -1)
# fourth_row = th.cat([tmp_empty, tgt, tmp_empty], -1)
# Concate the data in a vertical stack
# data = th.cat([first_row, second_row, third_row, fourth_row], -2)
data = th.cat(radiances, -1)
data = th.clamp(data, 0)
data /= 1 + data
data = th.pow(data, 1.0/2.2)
data = th.clamp(data, 0, 1)
data = data[0, ...].cpu().detach().numpy().transpose([1, 2, 0])
data = np.ascontiguousarray(data)
# Add text to the images
os.makedirs(checkpoint_dir, exist_ok=True)
outputfile = os.path.join(checkpoint_dir, f'spp.png')
pyexr.write(outputfile, data)
png = outputfile.replace(".exr", ".png")
skio.imsave(png, (np.clip(data, 0, 1)*255).astype(np.uint8))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'--model1', required=True, help="path to the first model")
parser.add_argument(
'--save_dir', required=True, help="path to the dir where everything has to be saved")
parser.add_argument(
'--data', required=True, help="path to the training data.")
parser.add_argument(
'--amount', required=False, type=int,default=1, help="Amount of frames to denoise and compare")
parser.add_argument('--spp', type=int,
help="number of samples to use as input.")
args = parser.parse_args()
ttools.set_logger(True)
main(args)
|
the-stack_0_15592 | #!/usr/bin/env python3
# Copyright 2019 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import csv
import os
import sys
import chart_util as util
class Parser(util.Parser):
def __init__(self, csv_dialect):
self.csv_dialect = csv_dialect
self.header = ['name', 'ngraphs', 'type', 'nodes', 'metg']
self.table = []
def process(self, row, data):
self.table.append({'metg': data, **row})
def error_value(self):
return 'error'
def complete(self):
out = csv.DictWriter(sys.stdout, self.header, dialect=self.csv_dialect)
out.writeheader()
for row in self.table:
out.writerow(row)
def driver(machine, threshold, csv_dialect, verbose):
parser = Parser(csv_dialect)
parser.parse(machine, threshold, True, verbose)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--machine', required=True)
parser.add_argument('-t', '--threshold', type=float, default=0.5)
parser.add_argument('--csv-dialect', default='excel-tab')
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
driver(**vars(args))
|
the-stack_0_15593 | # MIT License
#
# Copyright (c) 2018 Evgeny Medvedev, [email protected]
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import click
from blockchainetl import misc_utils
@click.command(context_settings=dict(help_option_names=['-h', '--help']))
@click.option('-i', '--input', default='-', type=str, help='The input file. If not specified stdin is used.')
@click.option('-o', '--output', default='-', type=str, help='The output file. If not specified stdout is used.')
@click.option('-p', '--predicate', required=True, type=str,
help='Predicate in Python code e.g. "item[\'is_erc20\']".')
def filter_items(input, output, predicate):
def evaluated_predicate(item):
eval_environment = globals()
if 'datetime' in predicate:
import datetime
eval_environment['datetime'] = datetime
return eval(predicate, eval_environment, {'item': item})
misc_utils.filter_items(input, output, evaluated_predicate)
|
the-stack_0_15595 | r"""Inspect MEG and EEG raw data, and interactively mark channels as bad.
example usage:
$ mne_bids inspect --subject_id=01 --task=experiment --session=test \
--datatype=meg --suffix=meg --bids_root=bids_root
"""
# Authors: Richard Höchenberger <[email protected]>
#
# License: BSD (3-clause)
from mne.utils import logger
import mne_bids
from mne_bids import BIDSPath, inspect_dataset
def run():
"""Run the mark_bad_channels command."""
from mne.commands.utils import get_optparser
parser = get_optparser(__file__, usage="usage: %prog options args",
prog_prefix='mne_bids',
version=mne_bids.__version__)
parser.add_option('--bids_root', dest='bids_root',
help='The path of the folder containing the BIDS '
'dataset')
parser.add_option('--subject_id', dest='subject',
help=('Subject name'))
parser.add_option('--session_id', dest='session',
help='Session name')
parser.add_option('--task', dest='task',
help='Task name')
parser.add_option('--acq', dest='acquisition',
help='Acquisition parameter')
parser.add_option('--run', dest='run',
help='Run number')
parser.add_option('--proc', dest='processing',
help='Processing label.')
parser.add_option('--rec', dest='recording',
help='Recording name')
parser.add_option('--type', dest='datatype',
help='Recording data type, e.g. meg, ieeg or eeg')
parser.add_option('--suffix', dest='suffix',
help='The filename suffix, i.e. the last part before '
'the extension')
parser.add_option('--ext', dest='extension',
help='The filename extension, including the leading '
'period, e.g. .fif')
parser.add_option('--find_flat', dest='find_flat',
help='Whether to auto-detect flat channels and time '
'segments')
parser.add_option('--l_freq', dest='l_freq',
help='The high-pass filter cutoff frequency')
parser.add_option('--h_freq', dest='h_freq',
help='The low-pass filter cutoff frequency')
parser.add_option('--verbose', dest='verbose', action='store_true',
help='Whether do generate additional diagnostic output')
opt, args = parser.parse_args()
if args:
parser.print_help()
parser.error(f'Please do not specify arguments without flags. '
f'Got: {args}.\n')
if opt.bids_root is None:
parser.print_help()
parser.error('You must specify bids_root')
bids_path = BIDSPath(subject=opt.subject, session=opt.session,
task=opt.task, acquisition=opt.acquisition,
run=opt.run, processing=opt.processing,
recording=opt.recording, datatype=opt.datatype,
suffix=opt.suffix, extension=opt.extension,
root=opt.bids_root)
find_flat = True if opt.find_flat is None else bool(opt.find_flat)
l_freq = None if opt.l_freq is None else float(opt.l_freq)
h_freq = None if opt.h_freq is None else float(opt.h_freq)
logger.info(f'Inspecting {bids_path.basename} …')
inspect_dataset(bids_path=bids_path, find_flat=find_flat,
l_freq=l_freq, h_freq=h_freq,
verbose=opt.verbose)
if __name__ == '__main__':
run()
|
the-stack_0_15597 | """This file and its contents are licensed under the Apache License 2.0. Please see the included NOTICE for copyright information and LICENSE for a copy of the license.
"""
import logging
import drf_yasg.openapi as openapi
from drf_yasg.utils import swagger_auto_schema
from rest_framework import generics, status
from rest_framework.parsers import FormParser, JSONParser, MultiPartParser
from rest_framework.views import APIView
from rest_framework.response import Response
from core.permissions import all_permissions
from core.utils.common import get_object_with_check_and_log
from projects.models import Project
from ml.serializers import MLBackendSerializer
from ml.models import MLBackend
from core.utils.common import bool_from_request
logger = logging.getLogger(__name__)
class MLBackendListAPI(generics.ListCreateAPIView):
parser_classes = (JSONParser, FormParser, MultiPartParser)
permission_required = all_permissions.projects_change
serializer_class = MLBackendSerializer
swagger_schema = None
def get_queryset(self):
project_pk = self.request.query_params.get("project")
project = get_object_with_check_and_log(self.request, Project, pk=project_pk)
self.check_object_permissions(self.request, project)
ml_backends = MLBackend.objects.filter(project_id=project.id)
for mlb in ml_backends:
mlb.update_state()
return ml_backends
def perform_create(self, serializer):
ml_backend = serializer.save()
ml_backend.update_state()
class MLBackendDetailAPI(generics.RetrieveUpdateDestroyAPIView):
"""RUD storage by pk specified in URL"""
parser_classes = (JSONParser, FormParser, MultiPartParser)
serializer_class = MLBackendSerializer
permission_required = all_permissions.projects_change
queryset = MLBackend.objects.all()
swagger_schema = None
def get_object(self):
ml_backend = super(MLBackendDetailAPI, self).get_object()
ml_backend.update_state()
return ml_backend
def perform_update(self, serializer):
ml_backend = serializer.save()
ml_backend.update_state()
class MLBackendTrainAPI(APIView):
"""Train
After you've activated an ML backend, call this API to start training with the already-labeled tasks.
"""
permission_required = all_permissions.projects_change
@swagger_auto_schema(
request_body=openapi.Schema(
type=openapi.TYPE_OBJECT,
properties={
"use_ground_truth": openapi.Schema(
type=openapi.TYPE_BOOLEAN,
description="Whether to include ground truth annotations in training",
)
},
),
responses={
200: openapi.Response(
title="Training OK", description="Training has successfully started."
),
500: openapi.Response(
description="Training error",
schema=openapi.Schema(
title="Error message",
desciption="Error message",
type=openapi.TYPE_STRING,
example="Server responded with an error.",
),
),
},
tags=["Machine Learning"],
)
def post(self, request, *args, **kwargs):
ml_backend = get_object_with_check_and_log(request, MLBackend, pk=self.kwargs["pk"])
self.check_object_permissions(self.request, ml_backend)
ml_backend.train()
return Response(status=status.HTTP_200_OK)
|
the-stack_0_15598 | import csv
from decimal import Decimal
from io import BytesIO, StringIO
import os
from collections import OrderedDict
from tempfile import TemporaryDirectory
from unittest import skipIf
from zipfile import ZipFile
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.gis.geos import Point, LineString, GeometryCollection
from django.contrib.gis import gdal
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.translation import activate, deactivate_all
from geotrek.common.tests import CommonTest
from mapentity.tests.factories import SuperUserFactory
from mapentity.serializers.shapefile import ZipShapeSerializer
from geotrek.authent.tests.factories import PathManagerFactory, StructureFactory
from geotrek.core.tests.factories import StakeFactory
from geotrek.core.models import PathAggregation
from geotrek.common.tests.factories import OrganismFactory
from geotrek.common.tests import TranslationResetMixin
from geotrek.maintenance.models import Intervention, InterventionStatus, Project
from geotrek.maintenance.views import InterventionFormatList, ProjectFormatList
from geotrek.core.tests.factories import PathFactory, TopologyFactory
from geotrek.infrastructure.models import Infrastructure
from geotrek.infrastructure.tests.factories import InfrastructureFactory
from geotrek.outdoor.tests.factories import CourseFactory
from geotrek.signage.tests.factories import BladeFactory, SignageFactory
from geotrek.signage.models import Signage
from geotrek.maintenance.tests.factories import (InterventionFactory, InfrastructureInterventionFactory,
InterventionDisorderFactory, InterventionStatusFactory, ManDayFactory,
ProjectFactory, ContractorFactory, InterventionJobFactory,
SignageInterventionFactory, ProjectWithInterventionFactory)
from geotrek.trekking.tests.factories import POIFactory, TrekFactory, ServiceFactory
class InterventionViewsTest(CommonTest):
model = Intervention
modelfactory = InterventionFactory
userfactory = PathManagerFactory
get_expected_json_attrs = None # Disable API tests
extra_column_list = ['heliport_cost', 'subcontract_cost', 'disorders', 'jobs']
expected_column_list_extra = ['id', 'name', 'heliport_cost', 'subcontract_cost', 'disorders', 'jobs']
expected_column_formatlist_extra = ['id', 'heliport_cost', 'subcontract_cost', 'disorders', 'jobs']
def get_bad_data(self):
return OrderedDict([
('name', ''),
('manday_set-TOTAL_FORMS', '0'),
('manday_set-INITIAL_FORMS', '1'),
('manday_set-MAX_NUM_FORMS', '0'),
]), 'This field is required.'
def get_good_data(self):
InterventionStatusFactory.create()
good_data = {
'name': 'test',
'date': '2012-08-23',
'disorders': InterventionDisorderFactory.create().pk,
'comments': '',
'slope': 0,
'area': 0,
'subcontract_cost': 0.0,
'stake': StakeFactory.create().pk,
'height': 0.0,
'project': '',
'width': 0.0,
'length': 0.0,
'status': InterventionStatus.objects.all()[0].pk,
'heliport_cost': 0.0,
'material_cost': 0.0,
'manday_set-TOTAL_FORMS': '2',
'manday_set-INITIAL_FORMS': '0',
'manday_set-MAX_NUM_FORMS': '',
'manday_set-0-nb_days': '48.75',
'manday_set-0-job': InterventionJobFactory.create().pk,
'manday_set-0-id': '',
'manday_set-0-DELETE': '',
'manday_set-1-nb_days': '12',
'manday_set-1-job': InterventionJobFactory.create().pk,
'manday_set-1-id': '',
'manday_set-1-DELETE': '',
}
if settings.TREKKING_TOPOLOGY_ENABLED:
path = PathFactory.create()
good_data['topology'] = '{"paths": [%s]}' % path.pk,
else:
good_data['topology'] = 'SRID=4326;POINT (5.1 6.6)'
return good_data
def test_creation_form_on_signage(self):
if settings.TREKKING_TOPOLOGY_ENABLED:
signa = SignageFactory.create()
else:
signa = SignageFactory.create(geom='SRID=2154;POINT (700000 6600000)')
signage = "%s" % signa
response = self.client.get('%s?target_id=%s&target_type=%s' % (Intervention.get_add_url(),
signa.pk,
ContentType.objects.get_for_model(Signage).pk
))
self.assertEqual(response.status_code, 200)
self.assertContains(response, signage)
# Should be able to save form successfully
data = self.get_good_data()
response = self.client.post('%s?target_id=%s&target_type=%s' % (Intervention.get_add_url(),
signa.pk,
ContentType.objects.get_for_model(Signage).pk
),
data)
self.assertEqual(response.status_code, 302)
self.assertEqual(signa, Intervention.objects.get().target)
def test_detail_target_objects(self):
if settings.TREKKING_TOPOLOGY_ENABLED:
path = PathFactory.create(geom=LineString((200, 200), (300, 300)))
signa = SignageFactory.create(paths=[(path, .5, .5)])
signa.save()
infrastructure = InfrastructureFactory.create(paths=[(path, .5, .5)])
infrastructure.save()
poi = POIFactory.create(paths=[(path, .5, .5)])
trek = TrekFactory.create(paths=[(path, .5, .5)])
service = ServiceFactory.create(paths=[(path, .5, .5)])
topo = TopologyFactory.create(paths=[(path, .5, .5)])
topo.save()
path_other = PathFactory.create(geom=LineString((10000, 0), (10010, 0)))
signa_other = SignageFactory.create(paths=[(path_other, .5, .5)])
signa_other.save()
else:
signa = SignageFactory.create(geom='SRID=2154;POINT (250 250)')
infrastructure = InfrastructureFactory.create(geom='SRID=2154;POINT (250 250)')
poi = POIFactory.create(geom='SRID=2154;POINT (250 250)')
trek = TrekFactory.create(geom='SRID=2154;POINT (250 250)')
service = ServiceFactory.create(geom='SRID=2154;POINT (250 250)')
topo = TopologyFactory.create(geom='SRID=2154;POINT (250 250)')
signa_other = SignageFactory.create(geom='SRID=2154;POINT (10005 0)')
intervention_signa = InterventionFactory.create(target=signa)
intervention_infra = InterventionFactory.create(target=infrastructure)
intervention_poi = InterventionFactory.create(target=poi)
intervention_trek = InterventionFactory.create(target=trek)
intervention_service = InterventionFactory.create(target=service)
intervention_topo = InterventionFactory.create(target=topo)
blade = BladeFactory(signage=signa, number="1")
intervention_blade = InterventionFactory.create(target=blade)
intervention_other = InterventionFactory.create(target=signa_other)
response = self.client.get(signa.get_detail_url())
self.assertEqual(response.status_code, 200)
self.assertContains(response, intervention_signa.target_display)
self.assertContains(response, intervention_infra.target_display)
self.assertContains(response, intervention_poi.target_display)
self.assertContains(response, intervention_trek.target_display)
self.assertContains(response, intervention_service.target_display)
self.assertContains(response, intervention_blade.target_display)
self.assertContains(response, intervention_topo.target_display)
self.assertNotContains(response, intervention_other.target_display)
def test_creation_form_on_signage_with_errors(self):
if settings.TREKKING_TOPOLOGY_ENABLED:
signa = SignageFactory.create()
else:
signa = SignageFactory.create(geom='SRID=2154;POINT (700000 6600000)')
signage = "%s" % signa
response = self.client.get('%s?target_id=%s&target_type=%s' % (Intervention.get_add_url(),
signa.pk,
ContentType.objects.get_for_model(Signage).pk
))
self.assertEqual(response.status_code, 200)
self.assertContains(response, signage)
data = self.get_good_data()
# If form invalid, it should not fail
data.pop('status')
response = self.client.post('%s?target_id=%s&target_type=%s' % (Intervention.get_add_url(),
signa.pk,
ContentType.objects.get_for_model(Signage).pk
),
data)
self.assertEqual(response.status_code, 200)
self.assertFalse(Intervention.objects.exists())
def test_update_form_on_signage(self):
if settings.TREKKING_TOPOLOGY_ENABLED:
signa = SignageFactory.create()
else:
signa = SignageFactory.create(geom='SRID=2154;POINT (700000 6600000)')
signage = "%s" % signa
intervention = InterventionFactory.create(target=signa)
response = self.client.get(intervention.get_update_url())
self.assertEqual(response.status_code, 200)
self.assertContains(response, signage)
# Should be able to save form successfully
form = response.context['form']
data = form.initial
data['disorders'] = data['disorders'][0].pk
data['project'] = ''
data.update(**{
'manday_set-TOTAL_FORMS': '0',
'manday_set-INITIAL_FORMS': '0',
'manday_set-MAX_NUM_FORMS': '',
})
# Form URL is modified in form init
formurl = '%s?target_id=%s&target_type=%s' % (intervention.get_update_url(), signa.pk, ContentType.objects.get_for_model(Signage).pk)
response = self.client.post(formurl, data)
self.assertEqual(response.status_code, 302)
def test_update_signage(self):
target_year = 2017
if settings.TREKKING_TOPOLOGY_ENABLED:
intervention = SignageInterventionFactory.create()
else:
intervention = SignageInterventionFactory.create(geom='SRID=2154;POINT (700000 6600000)')
signa = intervention.target
# Save infrastructure form
response = self.client.get(signa.get_update_url())
form = response.context['form']
data = form.initial
data['name_en'] = 'modified'
data['implantation_year'] = target_year
if settings.TREKKING_TOPOLOGY_ENABLED:
data['topology'] = '{"paths": [%s]}' % PathFactory.create().pk
else:
data['geom'] = 'SRID=4326;POINT (2.0 6.6)'
data['manager'] = OrganismFactory.create().pk
response = self.client.post(signa.get_update_url(), data)
self.assertEqual(response.status_code, 302)
# Check that intervention was not deleted (bug #783)
intervention = Intervention.objects.first()
self.assertFalse(intervention.deleted)
self.assertEqual(intervention.target.name, 'modified')
self.assertEqual(intervention.target.implantation_year, target_year)
def test_creation_form_on_infrastructure(self):
if settings.TREKKING_TOPOLOGY_ENABLED:
infra = InfrastructureFactory.create()
else:
infra = InfrastructureFactory.create(geom='SRID=2154;POINT (700000 6600000)')
response = self.client.get('%s?target_id=%s&target_type=%s' % (Intervention.get_add_url(),
infra.pk,
ContentType.objects.get_for_model(Infrastructure).pk))
self.assertEqual(response.status_code, 200)
# Should be able to save form successfully
data = self.get_good_data()
response = self.client.post('%s?target_id=%s&target_type=%s' % (Intervention.get_add_url(),
infra.pk,
ContentType.objects.get_for_model(Infrastructure).pk),
data)
self.assertEqual(response.status_code, 302)
def test_creation_form_on_infrastructure_with_errors(self):
if settings.TREKKING_TOPOLOGY_ENABLED:
infra = InfrastructureFactory.create()
else:
infra = InfrastructureFactory.create(geom='SRID=2154;POINT (700000 6600000)')
response = self.client.get('%s?target_id=%s&target_type=%s' % (Intervention.get_add_url(),
infra.pk,
ContentType.objects.get_for_model(Infrastructure).pk))
self.assertEqual(response.status_code, 200)
data = self.get_good_data()
# If form invalid, it should not fail
data.pop('status')
response = self.client.post('%s?target_id=%s&target_type=%s' % (Intervention.get_add_url(),
infra.pk,
ContentType.objects.get_for_model(Infrastructure).pk), data)
self.assertEqual(response.status_code, 200)
def test_update_form_on_infrastructure(self):
if settings.TREKKING_TOPOLOGY_ENABLED:
infra = InfrastructureFactory.create()
else:
infra = InfrastructureFactory.create(geom='SRID=2154;POINT (700000 6600000)')
intervention = InterventionFactory.create(target=infra)
response = self.client.get(intervention.get_update_url())
self.assertEqual(response.status_code, 200)
# Should be able to save form successfully
form = response.context['form']
data = form.initial
data['disorders'] = data['disorders'][0].pk
data['project'] = ''
data.update(**{
'manday_set-TOTAL_FORMS': '0',
'manday_set-INITIAL_FORMS': '0',
'manday_set-MAX_NUM_FORMS': '',
})
# Form URL is modified in form init
formurl = '%s?target_id=%s&target_type=%s' % (Intervention.get_add_url(),
infra.pk,
ContentType.objects.get_for_model(Infrastructure).pk)
response = self.client.post(formurl, data)
self.assertEqual(response.status_code, 302)
def test_disorders_not_mandatory(self):
data = self.get_good_data()
data.pop('disorders')
response = self.client.post(Intervention.get_add_url(), data)
self.assertEqual(response.status_code, 302)
def test_update_infrastructure(self):
target_year = 2017
if settings.TREKKING_TOPOLOGY_ENABLED:
intervention = InfrastructureInterventionFactory.create()
else:
intervention = InfrastructureInterventionFactory.create(geom='SRID=2154;POINT (700000 6600000)')
infra = intervention.target
# Save infrastructure form
response = self.client.get(infra.get_update_url())
form = response.context['form']
data = form.initial
data['name_en'] = 'modified'
data['implantation_year'] = target_year
data['accessibility'] = ''
if settings.TREKKING_TOPOLOGY_ENABLED:
data['topology'] = '{"paths": [%s]}' % PathFactory.create().pk
else:
data['geom'] = 'SRID=4326;POINT (2.0 6.6)'
response = self.client.post(infra.get_update_url(), data)
self.assertEqual(response.status_code, 302)
intervention = Intervention.objects.first()
self.assertFalse(intervention.deleted)
self.assertEqual(intervention.target.name, 'modified')
self.assertEqual(intervention.target.implantation_year, target_year)
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
def test_form_default_stake(self):
"""
Without segmentation dynamic we do not have paths so we can't put any stake by default coming from paths
"""
good_data = self.get_good_data()
good_data['stake'] = ''
good_data['topology'] = """
{"offset":0,"positions":{"0":[0.8298653170816073,1],"2":[0,0.04593024777973237]},"paths":[%s,%s,%s]}
""" % (PathFactory.create().pk, PathFactory.create().pk, PathFactory.create().pk)
response = self.client.post(Intervention.get_add_url(), good_data)
self.assertEqual(response.status_code, 302)
response = self.client.get(response._headers['location'][1])
self.assertTrue('object' in response.context)
intervention = response.context['object']
self.assertFalse(intervention.stake is None)
def test_form_deleted_projects(self):
p1 = ProjectFactory.create()
p2 = ProjectFactory.create()
i = InterventionFactory.create(project=p1)
response = self.client.get(i.get_update_url())
self.assertEqual(response.status_code, 200)
form = self.get_form(response)
projects = form.fields['project'].queryset.all()
self.assertCountEqual(projects, [p1, p2])
p2.delete()
projects = form.fields['project'].queryset.all()
self.assertCountEqual(projects, [p1])
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
def test_csv_on_topology_multiple_paths(self):
# We create an intervention on multiple paths and we check in csv target's field we have all the paths
path_AB = PathFactory.create(name="PATH_AB", geom=LineString((0, 0), (4, 0)))
path_CD = PathFactory.create(name="PATH_CD", geom=LineString((4, 0), (8, 0)))
InterventionFactory.create(target=TopologyFactory.create(paths=[(path_AB, 0.2, 1),
(path_CD, 0, 1)]))
response = self.client.get(self.model.get_format_list_url() + '?format=csv')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Content-Type'), 'text/csv')
# Read the csv
lines = list(csv.reader(StringIO(response.content.decode("utf-8")), delimiter=','))
index_line = lines[0].index('On')
self.assertEqual(lines[1][index_line],
f'Path: {path_AB.name} ({path_AB.pk}), Path: {path_CD.name} ({path_CD.pk})')
def test_no_html_in_csv_infrastructure(self):
if settings.TREKKING_TOPOLOGY_ENABLED:
InfrastructureInterventionFactory.create()
else:
InfrastructureInterventionFactory.create(geom='SRID=2154;POINT (700000 6600000)')
super().test_no_html_in_csv()
def test_no_html_in_csv_signage(self):
if settings.TREKKING_TOPOLOGY_ENABLED:
SignageInterventionFactory.create()
else:
SignageInterventionFactory.create(geom='SRID=2154;POINT (700000 6600000)')
super().test_no_html_in_csv()
def test_structurerelated_not_loggedin(self):
# Test that it does not fail on update if not logged in
self.client.logout()
response = self.client.get(Intervention.get_add_url())
self.assertEqual(response.status_code, 302)
i = InterventionFactory.create()
response = self.client.get(i.get_update_url())
self.assertEqual(response.status_code, 302)
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
def test_creation_form_line(self):
path = PathFactory.create(geom=LineString(Point(700000, 6600000), Point(700300, 6600300), srid=settings.SRID))
self.super_user = SuperUserFactory.create(username='admin', password='super')
self.client.login(username='admin', password='super')
data = self.get_good_data()
data['structure'] = StructureFactory.create().pk
data['topology'] = '{"paths": [%s], "positions":{"0":[0,1]}}' % path.pk,
response = self.client.post('%s' % (Intervention.get_add_url()),
data)
self.assertEqual(PathAggregation.objects.count(), 1)
self.assertEqual(response.status_code, 302)
self.assertEqual(Intervention.objects.first().geom, path.geom)
self.assertEqual(Intervention.objects.first().target.kind, 'INTERVENTION')
class ProjectViewsTest(CommonTest):
model = Project
modelfactory = ProjectWithInterventionFactory
userfactory = PathManagerFactory
get_expected_json_attrs = None # Disable API tests
extra_column_list = ['domain', 'contractors']
expected_column_list_extra = ['id', 'name', 'domain', 'contractors']
expected_column_formatlist_extra = ['id', 'domain', 'contractors']
def get_bad_data(self):
return OrderedDict([
('begin_year', ''),
('funding_set-TOTAL_FORMS', '0'),
('funding_set-INITIAL_FORMS', '1'),
('funding_set-MAX_NUM_FORMS', '0'),
]), 'This field is required.'
def get_good_data(self):
return {
'name': 'test',
'stake': '',
'type': '',
'domain': '',
'begin_year': '2010',
'end_year': '2012',
'constraints': '',
'global_cost': '12',
'comments': '',
'contractors': ContractorFactory.create().pk,
'project_owner': OrganismFactory.create().pk,
'project_manager': OrganismFactory.create().pk,
'funding_set-TOTAL_FORMS': '2',
'funding_set-INITIAL_FORMS': '0',
'funding_set-MAX_NUM_FORMS': '',
'funding_set-0-amount': '468.0',
'funding_set-0-organism': OrganismFactory.create().pk,
'funding_set-0-project': '',
'funding_set-0-id': '',
'funding_set-0-DELETE': '',
'funding_set-1-amount': '789',
'funding_set-1-organism': OrganismFactory.create().pk,
'funding_set-1-project': '',
'funding_set-1-id': '',
'funding_set-1-DELETE': ''
}
def _check_update_geom_permission(self, response):
pass
def test_project_layer(self):
p1 = ProjectFactory.create()
ProjectFactory.create()
if settings.TREKKING_TOPOLOGY_ENABLED:
InterventionFactory.create(project=p1)
else:
InterventionFactory.create(project=p1, geom='SRID=2154;POINT (700000 6600000)')
# Check that only p1 is in geojson
response = self.client.get(self.model.get_layer_url())
self.assertEqual(response.status_code, 200)
geojson = response.json()
features = geojson['features']
self.assertEqual(len(Project.objects.all()), 2)
self.assertEqual(len(features), 1)
self.assertEqual(features[0]['properties']['pk'], p1.pk)
def test_project_bbox_filter(self):
p1 = ProjectFactory.create()
ProjectFactory.create()
ProjectFactory.create()
if settings.TREKKING_TOPOLOGY_ENABLED:
t = TopologyFactory.create()
else:
t = TopologyFactory.create(geom='SRID=2154;POINT (700000 6600000)')
InterventionFactory.create(project=p1, target=t)
def jsonlist(bbox):
url = self.model.get_jsonlist_url() + bbox
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
jsondict = response.json()
return jsondict['aaData']
# Check that projects without interventions are always present
self.assertEqual(len(Project.objects.all()), 3)
self.assertEqual(len(jsonlist('')), 3)
self.assertEqual(len(jsonlist('?bbox=POLYGON((1%202%200%2C1%202%200%2C1%202%200%2C1%202%200%2C1%202%200))')), 2)
# Give a bbox that match intervention, and check that all 3 projects are back
bbox = '?bbox=POLYGON((2.9%2046.4%2C%203.1%2046.4%2C%203.1%2046.6%2C%202.9%2046.6%2C%202.9%2046.4))'
self.assertEqual(len(jsonlist(bbox)), 3)
def test_deleted_interventions(self):
project = ProjectFactory.create()
if settings.TREKKING_TOPOLOGY_ENABLED:
intervention = InterventionFactory.create()
else:
intervention = InterventionFactory.create(geom='SRID=2154;POINT (700000 6600000)')
project.interventions.add(intervention)
response = self.client.get(project.get_detail_url())
self.assertEqual(response.status_code, 200)
self.assertContains(response, intervention.name)
intervention.delete()
response = self.client.get(project.get_detail_url())
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, intervention.name)
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
class ExportTest(TranslationResetMixin, TestCase):
def test_shape_mixed(self):
"""
Test that a project made of intervention of different geom create multiple files.
Check that those files are each of a different type (Point/LineString) and that
the project and the intervention are correctly referenced in it.
"""
# Create topology line
line = PathFactory.create(geom=LineString(Point(10, 10), Point(11, 10)))
topo_line = TopologyFactory.create(paths=[line])
closest_path = PathFactory(geom=LineString(Point(0, 0), Point(1, 1), srid=settings.SRID))
topo_point = TopologyFactory.create(paths=[(closest_path, 0.5, 0.5)])
self.assertEqual(topo_point.paths.get(), closest_path)
# Create one intervention by geometry (point/linestring/geometrycollection)
it_point = InterventionFactory.create(target=topo_point)
it_line = InterventionFactory.create(target=topo_line)
course_point_a = Point(0, 0, srid=2154)
course_point_b = Point(5, 5, srid=2154)
course_line = LineString((0, 0), (1, 1), srid=2154)
course_geometry_collection = GeometryCollection(course_point_a, course_point_b, course_line, srid=2154)
course = CourseFactory.create(geom=course_geometry_collection)
it_geometrycollection = InterventionFactory.create(target=course)
# reload
it_point = type(it_point).objects.get(pk=it_point.pk)
it_line = type(it_line).objects.get(pk=it_line.pk)
proj = ProjectFactory.create()
proj.interventions.add(it_point)
proj.interventions.add(it_line)
proj.interventions.add(it_geometrycollection)
# instanciate the class based view 'abnormally' to use create_shape directly
# to avoid making http request, authent and reading from a zip
pfl = ZipShapeSerializer()
devnull = open(os.devnull, "wb")
pfl.serialize(Project.objects.all(), stream=devnull, delete=False,
fields=ProjectFormatList().columns)
shapefiles = pfl.path_directory
shapefiles = [shapefile for shapefile in os.listdir(shapefiles) if shapefile[-3:] == "shp"]
layers = {
s: gdal.DataSource(os.path.join(pfl.path_directory, s))[0] for s in shapefiles
}
self.assertEqual(len(layers), 2)
geom_type_layer = {layer.name: layer for layer in layers.values()}
geom_types = geom_type_layer.keys()
self.assertIn('MultiPoint', geom_types)
self.assertIn('MultiLineString', geom_types)
for layer in layers.values():
self.assertEqual(layer.srs.name, 'RGF93_Lambert_93')
self.assertCountEqual(layer.fields, [
'id', 'name', 'period', 'type', 'domain', 'constraint',
'global_cos', 'interventi', 'comments',
'contractor', 'project_ow', 'project_ma', 'founders',
'related_st', 'insertion_', 'update_dat',
'cities', 'districts', 'restricted'
])
self.assertEqual(len(layer), 1)
self.assertEqual(len(layer), 1)
for feature in geom_type_layer['MultiPoint']:
self.assertEqual(str(feature['id']), str(proj.pk))
self.assertEqual(len(feature.geom.geos), 3)
geoms = {geos.wkt for geos in feature.geom.geos}
self.assertSetEqual(geoms, {it_point.geom.wkt, course_point_a.wkt, course_point_b.wkt})
for feature in geom_type_layer['MultiLineString']:
self.assertEqual(str(feature['id']), str(proj.pk))
self.assertEqual(len(feature.geom.geos), 2)
geoms = {geos.wkt for geos in feature.geom.geos}
self.assertSetEqual(geoms, {it_line.geom.wkt, course_line.wkt})
@override_settings(ENABLE_JOBS_COSTS_DETAILED_EXPORT=True)
class TestDetailedJobCostsExports(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = SuperUserFactory.create()
cls.job1 = InterventionJobFactory(job="Worker", cost=12)
cls.job2 = InterventionJobFactory(job="Streamer", cost=60)
cls.job1_column_name = "Cost Worker"
cls.job2_column_name = "Cost Streamer"
cls.interv = InterventionFactory()
cls.manday1 = ManDayFactory(nb_days=3, job=cls.job1, intervention=cls.interv)
cls.manday2 = ManDayFactory(nb_days=2, job=cls.job2, intervention=cls.interv)
cls.job3 = InterventionJobFactory(job="Banker", cost=5000)
cls.job3_column_name = "Cost Banker"
def setUp(self):
self.client.force_login(self.user)
def test_detailed_mandays_export(self):
'''Test detailed intervention job costs are exported properly, and follow data changes'''
# Assert each job used in intervention has a column in export view
columns = InterventionFormatList().columns
self.assertIn(self.job1_column_name, columns)
self.assertIn(self.job2_column_name, columns)
# Assert no duplicate in column exports
self.assertEqual(len(columns), len(set(columns)))
# Assert job not used in intervention is not exported
self.assertNotIn(self.job3_column_name, columns)
# Assert queryset contains right amount for each cost
qs = InterventionFormatList().get_queryset()
interv_in_query_set = qs.get(id=self.interv.id)
cost1_in_query_set = getattr(interv_in_query_set, self.job1_column_name)
self.assertEqual(cost1_in_query_set, self.job1.cost * self.manday1.nb_days)
cost2_in_query_set = getattr(interv_in_query_set, self.job2_column_name)
self.assertEqual(cost2_in_query_set, self.job2.cost * self.manday2.nb_days)
# Assert cost is calculated properly when we add and remove mandays on the same job
# Add manday and refresh
manday1bis = ManDayFactory(nb_days=1, job=self.job1, intervention=self.interv)
qs = InterventionFormatList().get_queryset()
interv_in_query_set = qs.get(id=self.interv.id)
cost1_in_query_set = getattr(interv_in_query_set, self.job1_column_name)
self.assertEqual(cost1_in_query_set, self.job1.cost * (self.manday1.nb_days + manday1bis.nb_days))
# Remove manday and refresh
manday1bis.delete()
qs = InterventionFormatList().get_queryset()
interv_in_query_set = qs.get(id=self.interv.id)
cost1_in_query_set = getattr(interv_in_query_set, self.job1_column_name)
self.assertEqual(cost1_in_query_set, self.job1.cost * self.manday1.nb_days)
# Assert deleted manday does not create an entry
self.manday1.delete()
columns = InterventionFormatList().columns
self.assertNotIn(self.job1_column_name, columns)
# Test column translations don't mess it up
activate('fr')
columns = InterventionFormatList().columns
self.assertIn(f"Coût {self.job2}", columns)
qs = InterventionFormatList().get_queryset()
interv_in_query_set = qs.get(id=self.interv.id)
cost2_in_query_set = getattr(interv_in_query_set, f"Coût {self.job2}")
self.assertEqual(cost2_in_query_set, self.job2.cost * self.manday2.nb_days)
deactivate_all()
def test_csv_detailed_cost_content(self):
'''Test CSV job costs exports contain accurate total price'''
response = self.client.get('/intervention/list/export/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Content-Type'), 'text/csv')
# Assert right costs in CSV
reader = csv.DictReader(StringIO(response.content.decode("utf-8")), delimiter=',')
for row in reader:
self.assertEqual(Decimal(row[self.job1_column_name]), self.job1.cost * self.manday1.nb_days)
self.assertEqual(Decimal(row[self.job2_column_name]), self.job2.cost * self.manday2.nb_days)
def test_shp_detailed_cost_content(self):
'''Test SHP job costs exports contain accurate total price'''
signage = SignageFactory.create()
InterventionFactory.create(target=signage)
i_course = InterventionFactory.create(target=CourseFactory.create())
ManDayFactory.create(intervention=i_course, nb_days=2)
response = self.client.get('/intervention/list/export/?format=shp')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Content-Type'), 'application/zip')
# Assert right costs in CSV
with ZipFile(BytesIO(response.content)) as mzip:
temp_directory = TemporaryDirectory()
mzip.extractall(path=temp_directory.name)
shapefiles = [shapefile for shapefile in os.listdir(temp_directory.name) if shapefile[-3:] == "shp"]
layers = {
s: gdal.DataSource(os.path.join(temp_directory.name, s))[0] for s in shapefiles
}
l_linestring = layers['LineString.shp']
l_point = layers['Point.shp']
feature_linestring = l_linestring[0]
feature_point = l_point[0]
self.assertEqual(Decimal(str(feature_linestring['cost_worke'])), self.job1.cost * self.manday1.nb_days)
self.assertEqual(Decimal(str(feature_linestring['cost_strea'])), self.job2.cost * self.manday2.nb_days)
self.assertIsNone(feature_point.get('cost_worke'))
self.assertIsNone(feature_point.get('cost_strea'))
@override_settings(ENABLE_JOBS_COSTS_DETAILED_EXPORT=True)
class TestInterventionTargetExports(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = SuperUserFactory.create()
cls.path = PathFactory(name="mypath")
cls.interv = InterventionFactory(target=cls.path)
def setUp(self):
self.client.force_login(self.user)
def test_csv_target_content(self):
response = self.client.get('/intervention/list/export/', params={'format': 'csv'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Content-Type'), 'text/csv')
# Assert right format in CSV
reader = csv.DictReader(StringIO(response.content.decode("utf-8")), delimiter=',')
for row in reader:
self.assertEqual(row["On"], f"Path: {self.path.name} ({self.path.pk})")
|
the-stack_0_15599 | # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import yaml
import os.path as osp
import numpy as np
from sklearn.metrics import confusion_matrix, roc_curve, auc
class Evaluator(object):
def __init__(self, model_path, topk=5):
with open(osp.join(model_path, "model.yml")) as f:
model_info = yaml.load(f.read(), Loader=yaml.Loader)
with open(osp.join(model_path, 'eval_details.json'), 'r') as f:
eval_details = json.load(f)
self.topk = topk
self.labels = model_info['_Attributes']['labels']
self.true_labels = np.array(eval_details['true_labels'])
self.pred_scores = np.array(eval_details['pred_scores'])
label_ids_list = list(range(len(self.labels)))
self.no_appear_label_ids = set(label_ids_list) - set(
self.true_labels.tolist())
def cal_confusion_matrix(self):
'''计算混淆矩阵。
'''
pred_labels = np.argsort(self.pred_scores)[:, -1:].flatten()
cm = confusion_matrix(
self.true_labels.tolist(),
pred_labels.tolist(),
labels=list(range(len(self.labels))))
return cm
def cal_precision_recall_F1(self):
'''计算precision、recall、F1。
'''
out = {}
out_avg = {}
out_avg['precision'] = 0.0
out_avg['recall'] = 0.0
out_avg['F1'] = 0.0
pred_labels = np.argsort(self.pred_scores)[:, -1:].flatten()
for label_id in range(len(self.labels)):
out[self.labels[label_id]] = {}
if label_id in self.no_appear_label_ids:
out[self.labels[label_id]]['precision'] = -1.0
out[self.labels[label_id]]['recall'] = -1.0
out[self.labels[label_id]]['F1'] = -1.0
continue
pred_index = np.where(pred_labels == label_id)[0].tolist()
tp = np.sum(
self.true_labels[pred_index] == pred_labels[pred_index])
tp_fp = len(pred_index)
tp_fn = len(np.where(self.true_labels == label_id)[0].tolist())
out[self.labels[label_id]]['precision'] = tp * 1.0 / tp_fp
out[self.labels[label_id]]['recall'] = tp * 1.0 / tp_fn
out[self.labels[label_id]]['F1'] = 2 * tp * 1.0 / (tp_fp + tp_fn)
ratio = tp_fn * 1.0 / self.true_labels.shape[0]
out_avg['precision'] += out[self.labels[label_id]][
'precision'] * ratio
out_avg['recall'] += out[self.labels[label_id]]['recall'] * ratio
out_avg['F1'] += out[self.labels[label_id]]['F1'] * ratio
return out, out_avg
def cal_auc(self):
'''计算AUC。
'''
out = {}
for label_id in range(len(self.labels)):
part_pred_scores = self.pred_scores[:, label_id:label_id + 1]
part_pred_scores = part_pred_scores.flatten()
fpr, tpr, thresholds = roc_curve(
self.true_labels, part_pred_scores, pos_label=label_id)
label_auc = auc(fpr, tpr)
if label_id in self.no_appear_label_ids:
out[self.labels[label_id]] = -1.0
continue
out[self.labels[label_id]] = label_auc
return out
def cal_accuracy(self):
'''计算Accuracy。
'''
out = {}
k = min(self.topk, len(self.labels))
pred_top1_label = np.argsort(self.pred_scores)[:, -1]
pred_topk_label = np.argsort(self.pred_scores)[:, -k:]
acc1 = sum(pred_top1_label == self.true_labels) / len(self.true_labels)
acck = sum([
np.isin(x, y) for x, y in zip(self.true_labels, pred_topk_label)
]) / len(self.true_labels)
out['acc1'] = acc1
out['acck'] = acck
out['k'] = k
return out
def generate_report(self):
'''生成评估报告。
'''
report = dict()
report['Confusion_Matrix'] = self.cal_confusion_matrix().tolist()
report['PRF1_average'] = {}
report['PRF1'], report['PRF1_average'][
'over_all'] = self.cal_precision_recall_F1()
auc = self.cal_auc()
for k, v in auc.items():
report['PRF1'][k]['auc'] = v
acc = self.cal_accuracy()
report["Acc1"] = acc["acc1"]
report["Acck"] = acc["acck"]
report["topk"] = acc["k"]
report['label_list'] = self.labels
return report
|
the-stack_0_15601 | dsg = dict(
zip(
[ord(c) for c in "\x60\x61\x66\x67\x6a\x6b\x6c\x6d\x6e\x6f\x70\x71\x72\x73\x74\x75\x76\x77\x78\x7b\x7e"],
u"\u25c6\u2592\u25cb\u00b1\u2518\u2510\u250c\u2514\u253c\u2500\u2500\u2500\u2500\u2500\u251c\u2524\u2534\u252c\u2502\u03c0\xb7"
)
)
text = {
0: "reset",
24: "underline-off",
25: "blink-off",
27: "reverse-off",
1: "bold" ,
2: "dim" ,
4: "underline",
5: "blink",
7: "reverse",
}
colors = {
"foreground": {
39: "default",
# This is technically "default with underscore", but I don't understand
# the utility of mixing the text styling with the colors. Instead I'm
# going to just leave it as "default" until I see something buggy or
# someone complains.
38: "default",
30: "black",
31: "red",
32: "green",
33: "brown",
34: "blue",
35: "magenta",
36: "cyan",
37: "white",
},
"background": {
49: "default",
40: "black",
41: "red",
42: "green",
43: "brown",
44: "blue",
45: "magenta",
46: "cyan",
47: "white",
}
}
|
the-stack_0_15602 | from __future__ import print_function
import os, shutil
"""
A little module to wrap the params enum for use in Cython code
Ian Bell, May 2014
"""
def params_constants(enum_key):
fName = os.path.join('..', '..', 'include', 'DataStructures.h')
contents = open(fName, 'r').read()
left = contents.find('{', contents.find('enum ' + enum_key));
right = contents.find('}', left)
entries = contents[left + 1:right]
if entries.find('/*') > -1: raise ValueError('/* */ style comments are not allowed, replace them with // style comments')
if not entries: raise ValueError('Unable to find ' + enum_key)
lines = entries.split('\n')
lines = [line for line in lines if not line.strip().startswith('//')]
for i, line in enumerate(lines):
if line.find('/'):
lines[i] = line.split('/')[0]
if '=' in lines[i]:
lines[i] = lines[i].split('=')[0].strip() + ','
# Chomp all the whitespace, split at commas
keys = ''.join(lines).replace(' ', '').split(',')
keys = [k for k in keys if k]
return keys
def config_constants():
fName = os.path.join('..', '..', 'include', 'Configuration.h')
contents = open(fName, 'r').readlines()
matching_lines = [i for i, line in enumerate(contents) if "#define CONFIGURATION_KEYS_ENUM" in line]
assert(len(matching_lines) == 1)
iline = matching_lines[0] + 1
keys = []
while iline < 1000 and contents[iline].strip().startswith('X('):
line = contents[iline].strip()[2::]
key = line.split(',')[0]
keys.append(key)
iline += 1
return ('configuration_keys', keys)
def generate_cython(data, config_data):
print('****** Writing the constants module ******')
# Write the PXD definition file
pxd_output_file = open('CoolProp/constants_header.pxd', 'w')
pxd_output_file.write('# This file is automatically generated by the generate_constants_module.py script in wrappers/Python.\n# DO NOT MODIFY THE CONTENTS OF THIS FILE!\n\ncdef extern from "DataStructures.h" namespace "CoolProp":\n')
for enum_key, entries in data:
pxd_output_file.write('\tctypedef enum ' + enum_key + ':\n')
for param in entries:
param = param.strip()
pxd_output_file.write('\t\t' + param + '\n')
pxd_output_file.write('\n\ncdef extern from "Configuration.h":\n')
enum_key, entries = config_data
pxd_output_file.write('\tctypedef enum ' + enum_key + ':\n')
for param in entries:
param = param.strip()
pxd_output_file.write('\t\t' + param + '\n')
pxd_output_file.close()
# Write the PYX implementation file
pyx_output_file = open('CoolProp/_constants.pyx', 'w')
pyx_output_file.write('# This file is automatically generated by the generate_constants_module.py script in wrappers/Python.\n')
pyx_output_file.write('# DO NOT MODIFY THE CONTENTS OF THIS FILE!\n')
pyx_output_file.write('cimport constants_header\n\n')
for enum_key, entries in data:
for param in entries:
param = param.strip()
pyx_output_file.write(param + ' = ' + 'constants_header.' + param + '\n')
enum_key, entries = config_data
for param in entries:
param = param.strip()
pyx_output_file.write(param + ' = ' + 'constants_header.' + param + '\n')
pyx_output_file.close()
# Write the PY implementation file
py_output_file = open('CoolProp/constants.py', 'w')
py_output_file.write('# This file is automatically generated by the generate_constants_module.py script in wrappers/Python.\n# DO NOT MODIFY THE CONTENTS OF THIS FILE!\nfrom __future__ import absolute_import\n\nfrom . import _constants\n\n')
for enum_key, entries in data:
for param in entries:
param = param.strip()
py_output_file.write(param + ' = ' + '_constants.' + param + '\n')
enum_key, entries = config_data
for param in entries:
param = param.strip()
py_output_file.write(param + ' = ' + '_constants.' + param + '\n')
py_output_file.close()
def generate():
data = [(enum, params_constants(enum)) for enum in ['parameters', 'input_pairs', 'fluid_types', 'phases']]
generate_cython(data, config_constants())
if __name__ == '__main__':
generate()
|
the-stack_0_15603 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""ReLU op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
relu_op_info = TBERegOp("ReLU") \
.fusion_type("ELEMWISE") \
.async_flag(False) \
.binfile_name("relu.so") \
.compute_cost(10) \
.kernel_name("relu") \
.partial_flag(True) \
.input(0, "x", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.op_pattern("formatAgnostic") \
.dtype_format(DataType.I8_None, DataType.I8_None) \
.dtype_format(DataType.I32_None, DataType.I32_None) \
.dtype_format(DataType.F16_None, DataType.F16_None) \
.dtype_format(DataType.F32_None, DataType.F32_None) \
.get_op_info()
@op_info_register(relu_op_info)
def _relu_tbe():
"""Relu TBE register"""
return
|
the-stack_0_15604 | from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
import sys
import random
from mega import MutableNamedTuple
from line import *
from gl_skel import *
# TODO layers, lighting, rects blinks, symbols
def draw_line(line):
glLoadIdentity()
glTranslatef(0.1, 0.1, -2.0)
glLineWidth(8)
state = line[0]
glColor3f(state.cr, state.cg, state.cb)
glBegin(GL_LINE_STRIP)
start = len(line) - state.display
if start < 1 or not state.endable:
start = 1
for x, y in line[start:-1]:
glVertex2f(x, y)
glColor3f(1.0, 1.0, 1.0)
x, y = line[-1]
glVertex2f(x, y)
glEnd()
data = MutableNamedTuple()
data.lines = list()
data.lines.append(create_line(w=100,h=100, g=1, b=0))
data.lines.append(create_line(w=100,h=100, x=100))
data.lines.append(create_line(w=100,h=100, y=100, r=1, b=0))
data.lines.append(create_line(w=100,h=100, y=100, x=100, r=0.4, b=0.4, g=0.4))
data.rotx = 0
data.roty = 0
data.rotz = 0
def step2(d):
data.rotx += 10
data.roty += 10
data.rotz += 10
for l in data.lines:
gen_line(l, 5)
glutTimerFunc(1, step2, 1)
glutPostRedisplay()
def glLine(x, y, e, d):
glBegin(GL_LINE_STRIP)
glVertex2f(x, y)
glVertex2f(e, d)
glEnd()
def draw_axis():
glPushMatrix()
#glLoadIdentity()
glColor3f(1, 0, 0)
glLine(-1000, 0, 1000, 0)
glColor3f(0, 0, 1)
glLine(0, -1000, 0, 1000)
glColor3f(0, 1, 0)
glBegin(GL_LINES)
glVertex3f(0, 0, -1000)
glVertex3f(0, 0, 1000)
glEnd()
glPopMatrix()
def draw_grid():
glColor3f(0.5, 0.5, 0.5)
glBegin(GL_LINES)
for x in range(20):
for y in range(20):
glVertex2f(x * 5, y * 5)
glEnd()
def DrawGLScene():
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
glShadeModel(GL_SMOOTH)
glTranslatef(10, 10, -10)
glRotatef(50, 1, 1, 0)
#glRotatef(data.rotx, 1, 0, 0)
#glRotatef(data.roty, 0, 1, 0)
#glRotatef(data.rotz, 0, 0, 1)
draw_axis()
draw_grid()
for l in data.lines:
draw_line(l)
glutSwapBuffers()
def keyPressed(*args):
print(args, ESCAPE)
if args[0] == ESCAPE:
sys.exit()
def main():
init_window()
glutDisplayFunc(DrawGLScene)
glutIdleFunc(DrawGLScene)
glutKeyboardFunc(keyPressed)
glutTimerFunc(3, step2, 1)
glutReshapeFunc(ReSizeGLScene)
InitGL(640, 480)
glutMainLoop()
if __name__=='__main__':
main()
|
the-stack_0_15605 | #!/usr/bin/env python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example illustrates how to get campaign criteria, or negative keywords.
"""
import argparse
import sys
from google.ads.googleads.client import GoogleAdsClient
from google.ads.googleads.errors import GoogleAdsException
def main(client, customer_id, campaign_id):
ga_service = client.get_service("GoogleAdsService")
query = f"""
SELECT
campaign.id,
campaign_criterion.campaign,
campaign_criterion.criterion_id,
campaign_criterion.negative,
campaign_criterion.type,
campaign_criterion.keyword.text,
campaign_criterion.keyword.match_type
FROM campaign_criterion
WHERE campaign.id = {campaign_id}"""
search_request = client.get_type("SearchGoogleAdsStreamRequest")
search_request.customer_id = customer_id
search_request.query = query
response = ga_service.search_stream(request=search_request)
for batch in response:
for row in batch.results:
criterion = row.campaign_criterion
print(
f'Campaign criterion with ID "{criterion.criterion_id}" '
"was retrieved:"
)
if criterion.type_.name == "KEYWORD":
print(
f'\t{" " if criterion.negative else "Negative "} '
f'Keyword with text "{criterion.keyword.text}" and '
f"match type {criterion.keyword.match_type}."
)
else:
print(f"Not a keyword: {criterion.type_.name}")
if __name__ == "__main__":
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
googleads_client = GoogleAdsClient.load_from_storage(version="v6")
parser = argparse.ArgumentParser(
description=(
"List campaign criteria, or negative keywords, for a "
"given campaign."
)
)
# The following argument(s) should be provided to run the example.
parser.add_argument(
"-c",
"--customer_id",
type=str,
required=True,
help="The Google Ads customer ID.",
)
parser.add_argument(
"-i", "--campaign_id", type=str, required=True, help="The campaign ID."
)
args = parser.parse_args()
try:
main(googleads_client, args.customer_id, args.campaign_id)
except GoogleAdsException as ex:
print(
f'Request with ID "{ex.request_id}" failed with status '
f'"{ex.error.code().name}" and includes the following errors:'
)
for error in ex.failure.errors:
print(f' Error with message "{error.message}".')
if error.location:
for field_path_element in error.location.field_path_elements:
print(f"\t\tOn field: {field_path_element.field_name}")
sys.exit(1)
|
the-stack_0_15607 | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 11 00:06:05 2018
__Kaggle DIGIT RECOGNATION BY XGBOOST___________
@author: MD SAIF UDDIN
"""
import pandas as pd
train = pd.read_csv("train.csv").as_matrix()
X = train[:, 1:]
Y = train[:, 0]
test = pd.read_csv("test.csv").as_matrix()
from xgboost import XGBClassifier
forest = XGBClassifier()
forest = forest.fit(X, Y)
print(forest.score(X, Y))
from sklearn.model_selection import cross_val_score
scores = cross_val_score(forest, X, Y, scoring = 'accuracy', cv = 10)
print(scores)
print(scores.mean())
prid_forest = forest.predict(test)
import numpy as np
sam = pd.read_csv("sample_submission.csv")
def write_prediction(prediction, name):
ImageId = np.array(sam['ImageId']).astype(int)
solution = pd.DataFrame(prediction, ImageId, columns = ['Label'])
solution.to_csv(name, index_label = ['ImageId'])
write_prediction(prid_forest, "samdigit_xgboost.csv") |
the-stack_0_15608 | import os
import subprocess
from nmigen.build import *
from nmigen.vendor.lattice_ecp5 import *
from .resources import *
__all__ = ["VersaECP5Platform"]
class VersaECP5Platform(LatticeECP5Platform):
device = "LFE5UM-45F"
package = "BG381"
speed = "8"
default_clk = "clk100"
default_rst = "rst"
resources = [
Resource("rst", 0, PinsN("T1", dir="i"), Attrs(IO_TYPE="LVCMOS33")),
Resource("clk100", 0, DiffPairs("P3", "P4", dir="i"),
Clock(100e6), Attrs(IO_TYPE="LVDS")),
Resource("pclk", 0, DiffPairs("A4", "A5", dir="i"),
Attrs(IO_TYPE="LVDS")),
*LEDResources(pins="E16 D17 D18 E18 F17 F18 E17 F16",
attrs=Attrs(IO_TYPE="LVCMOS25")),
Resource("alnum_led", 0,
Subsignal("a", PinsN("M20", dir="o")),
Subsignal("b", PinsN("L18", dir="o")),
Subsignal("c", PinsN("M19", dir="o")),
Subsignal("d", PinsN("L16", dir="o")),
Subsignal("e", PinsN("L17", dir="o")),
Subsignal("f", PinsN("M18", dir="o")),
Subsignal("g", PinsN("N16", dir="o")),
Subsignal("h", PinsN("M17", dir="o")),
Subsignal("j", PinsN("N18", dir="o")),
Subsignal("k", PinsN("P17", dir="o")),
Subsignal("l", PinsN("N17", dir="o")),
Subsignal("m", PinsN("P16", dir="o")),
Subsignal("n", PinsN("R16", dir="o")),
Subsignal("p", PinsN("R17", dir="o")),
Subsignal("dp", PinsN("U1", dir="o")),
Attrs(IO_TYPE="LVCMOS25")
),
*SwitchResources(pins={0: "H2", 1: "K3", 2: "G3", 3: "F2" },
attrs=Attrs(IO_TYPE="LVCMOS15")),
*SwitchResources(pins={4: "J18", 5: "K18", 6: "K19", 7: "K20"},
attrs=Attrs(IO_TYPE="LVCMOS25")),
UARTResource(0,
rx="C11", tx="A11",
attrs=Attrs(IO_TYPE="LVCMOS33", PULLMODE="UP")
),
*SPIFlashResources(0,
cs_n="R2", clk="U3", cipo="W2", copi="V2", wp_n="Y2", hold_n="W1",
attrs=Attrs(IO_TYPE="LVCMOS33")
),
Resource("eth_clk125", 0, Pins("L19", dir="i"),
Clock(125e6), Attrs(IO_TYPE="LVCMOS25")),
Resource("eth_clk125_pll", 0, Pins("U16", dir="i"),
Clock(125e6), Attrs(IO_TYPE="LVCMOS25")), # NC by default
Resource("eth_rgmii", 0,
Subsignal("rst", PinsN("U17", dir="o")),
Subsignal("mdc", Pins("T18", dir="o")),
Subsignal("mdio", Pins("U18", dir="io")),
Subsignal("tx_clk", Pins("P19", dir="o")),
Subsignal("tx_ctl", Pins("R20", dir="o")),
Subsignal("tx_data", Pins("N19 N20 P18 P20", dir="o")),
Subsignal("rx_clk", Pins("L20", dir="i")),
Subsignal("rx_ctl", Pins("U19", dir="i")),
Subsignal("rx_data", Pins("T20 U20 T19 R18", dir="i")),
Attrs(IO_TYPE="LVCMOS25")
),
Resource("eth_sgmii", 0,
Subsignal("rst", PinsN("U17", dir="o"), Attrs(IO_TYPE="LVCMOS25")),
Subsignal("mdc", Pins("T18", dir="o"), Attrs(IO_TYPE="LVCMOS25")),
Subsignal("mdio", Pins("U18", dir="io"), Attrs(IO_TYPE="LVCMOS25")),
Subsignal("tx", DiffPairs("W13", "W14", dir="o")),
Subsignal("rx", DiffPairs("Y14", "Y15", dir="i")),
),
Resource("eth_clk125", 1, Pins("J20", dir="i"),
Clock(125e6), Attrs(IO_TYPE="LVCMOS25")),
Resource("eth_clk125_pll", 1, Pins("C18", dir="i"),
Clock(125e6), Attrs(IO_TYPE="LVCMOS25")), # NC by default
Resource("eth_rgmii", 1,
Subsignal("rst", PinsN("F20", dir="o")),
Subsignal("mdc", Pins("G19", dir="o")),
Subsignal("mdio", Pins("H20", dir="io")),
Subsignal("tx_clk", Pins("C20", dir="o")),
Subsignal("tx_ctrl", Pins("E19", dir="o")),
Subsignal("tx_data", Pins("J17 J16 D19 D20", dir="o")),
Subsignal("rx_clk", Pins("J19", dir="i")),
Subsignal("rx_ctrl", Pins("F19", dir="i")),
Subsignal("rx_data", Pins("G18 G16 H18 H17", dir="i")),
Attrs(IO_TYPE="LVCMOS25")
),
Resource("eth_sgmii", 1,
Subsignal("rst", PinsN("F20", dir="o"), Attrs(IO_TYPE="LVCMOS25")),
Subsignal("mdc", Pins("G19", dir="o"), Attrs(IO_TYPE="LVCMOS25")),
Subsignal("mdio", Pins("H20", dir="io"), Attrs(IO_TYPE="LVCMOS25")),
Subsignal("tx", DiffPairs("W17", "W18", dir="o")),
Subsignal("rx", DiffPairs("Y16", "Y17", dir="i")),
),
Resource("ddr3", 0,
Subsignal("rst", PinsN("N4", dir="o")),
Subsignal("clk", DiffPairs("M4", "N5", dir="o"), Attrs(IO_TYPE="SSTL135D_I")),
Subsignal("clk_en", Pins("N2", dir="o")),
Subsignal("cs", PinsN("K1", dir="o")),
Subsignal("we", PinsN("M1", dir="o")),
Subsignal("ras", PinsN("P1", dir="o")),
Subsignal("cas", PinsN("L1", dir="o")),
Subsignal("a", Pins("P2 C4 E5 F5 B3 F4 B5 E4 C5 E3 D5 B4 C3", dir="o")),
Subsignal("ba", Pins("P5 N3 M3", dir="o")),
Subsignal("dqs", DiffPairs("K2 H4", "J1 G5", dir="io"), Attrs(IO_TYPE="SSTL135D_I", DIFFRESISTOR="100", TERMINATION="OFF")),
Subsignal("dq", Pins("L5 F1 K4 G1 L4 H1 G2 J3 D1 C1 E2 C2 F3 A2 E1 B1",
dir="io")), Attrs(TERMINATION="75"),
Subsignal("dm", Pins("J4 H5", dir="o")),
Subsignal("odt", Pins("L2", dir="o")),
Attrs(IO_TYPE="SSTL135_I", SLEWRATE="FAST")
)
]
connectors = [
Connector("expcon", 1, """
- - - B19 B12 B9 E6 D6 E7 D7 B11 B6 E9 D9 B8 C8 D8 E8 C7 C6
- - - - - - - - - - - - - - - - - - - -
"""), # X3
Connector("expcon", 2, """
A8 - A12 A13 B13 C13 D13 E13 A14 C14 D14 E14 D11 C10 A9 B10 D12 E12 - -
B15 - C15 - D15 - E15 A16 B16 - C16 D16 B17 - C17 A17 B18 A7 A18 -
"""), # X4
]
@property
def file_templates(self):
return {
**super().file_templates,
"{{name}}-openocd.cfg": r"""
interface ftdi
{# FTDI descriptors is identical between non-5G and 5G recent Versa boards #}
ftdi_device_desc "Lattice ECP5_5G VERSA Board"
ftdi_vid_pid 0x0403 0x6010
ftdi_channel 0
ftdi_layout_init 0xfff8 0xfffb
reset_config none
adapter_khz 25000
# ispCLOCK device (unusable with openocd and must be bypassed)
#jtag newtap ispclock tap -irlen 8 -expected-id 0x00191043
# ECP5 device
{% if "5G" in platform.device -%}
jtag newtap ecp5 tap -irlen 8 -expected-id 0x81112043 ; # LFE5UM5G-45F
{% else -%}
jtag newtap ecp5 tap -irlen 8 -expected-id 0x01112043 ; # LFE5UM-45F
{% endif %}
"""
}
def toolchain_program(self, products, name):
openocd = os.environ.get("OPENOCD", "openocd")
with products.extract("{}-openocd.cfg".format(name), "{}.svf".format(name)) \
as (config_filename, vector_filename):
subprocess.check_call([openocd,
"-f", config_filename,
"-c", "transport select jtag; init; svf -quiet {}; exit".format(vector_filename)
])
if __name__ == "__main__":
from .test.blinky import *
VersaECP5Platform().build(Blinky(), do_program=True)
|
the-stack_0_15611 | # Copyright 2020 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""``AbstractRunner`` is the base class for all ``Pipeline`` runner
implementations.
"""
import logging
from abc import ABC, abstractmethod
from concurrent.futures import ALL_COMPLETED, ThreadPoolExecutor, as_completed, wait
from typing import Any, Dict, Iterable
from kedro.framework.hooks import get_hook_manager
from kedro.io import AbstractDataSet, DataCatalog
from kedro.pipeline import Pipeline
from kedro.pipeline.node import Node
class AbstractRunner(ABC):
"""``AbstractRunner`` is the base class for all ``Pipeline`` runner
implementations.
"""
def __init__(self, is_async: bool = False):
"""Instantiates the runner classs.
Args:
is_async: If True, the node inputs and outputs are loaded and saved
asynchronously with threads. Defaults to False.
"""
self._is_async = is_async
@property
def _logger(self):
return logging.getLogger(self.__module__)
def run(
self, pipeline: Pipeline, catalog: DataCatalog, run_id: str = None
) -> Dict[str, Any]:
"""Run the ``Pipeline`` using the ``DataSet``s provided by ``catalog``
and save results back to the same objects.
Args:
pipeline: The ``Pipeline`` to run.
catalog: The ``DataCatalog`` from which to fetch data.
run_id: The id of the run.
Raises:
ValueError: Raised when ``Pipeline`` inputs cannot be satisfied.
Returns:
Any node outputs that cannot be processed by the ``DataCatalog``.
These are returned in a dictionary, where the keys are defined
by the node outputs.
"""
catalog = catalog.shallow_copy()
unsatisfied = pipeline.inputs() - set(catalog.list())
if unsatisfied:
raise ValueError(
"Pipeline input(s) {} not found in the "
"DataCatalog".format(unsatisfied)
)
free_outputs = pipeline.outputs() - set(catalog.list())
unregistered_ds = pipeline.data_sets() - set(catalog.list())
for ds_name in unregistered_ds:
catalog.add(ds_name, self.create_default_data_set(ds_name))
if self._is_async:
self._logger.info(
"Asynchronous mode is enabled for loading and saving data"
)
self._run(pipeline, catalog, run_id)
self._logger.info("Pipeline execution completed successfully.")
return {ds_name: catalog.load(ds_name) for ds_name in free_outputs}
def run_only_missing(
self, pipeline: Pipeline, catalog: DataCatalog
) -> Dict[str, Any]:
"""Run only the missing outputs from the ``Pipeline`` using the
``DataSet``s provided by ``catalog`` and save results back to the same
objects.
Args:
pipeline: The ``Pipeline`` to run.
catalog: The ``DataCatalog`` from which to fetch data.
Raises:
ValueError: Raised when ``Pipeline`` inputs cannot be satisfied.
Returns:
Any node outputs that cannot be processed by the ``DataCatalog``.
These are returned in a dictionary, where the keys are defined
by the node outputs.
"""
free_outputs = pipeline.outputs() - set(catalog.list())
missing = {ds for ds in catalog.list() if not catalog.exists(ds)}
to_build = free_outputs | missing
to_rerun = pipeline.only_nodes_with_outputs(*to_build) + pipeline.from_inputs(
*to_build
)
# we also need any memory data sets that feed into that
# including chains of memory data sets
memory_sets = pipeline.data_sets() - set(catalog.list())
output_to_memory = pipeline.only_nodes_with_outputs(*memory_sets)
input_from_memory = to_rerun.inputs() & memory_sets
to_rerun += output_to_memory.to_outputs(*input_from_memory)
return self.run(to_rerun, catalog)
@abstractmethod # pragma: no cover
def _run(
self, pipeline: Pipeline, catalog: DataCatalog, run_id: str = None
) -> None:
"""The abstract interface for running pipelines, assuming that the
inputs have already been checked and normalized by run().
Args:
pipeline: The ``Pipeline`` to run.
catalog: The ``DataCatalog`` from which to fetch data.
run_id: The id of the run.
"""
pass
@abstractmethod # pragma: no cover
def create_default_data_set(self, ds_name: str) -> AbstractDataSet:
"""Factory method for creating the default data set for the runner.
Args:
ds_name: Name of the missing data set
Returns:
An instance of an implementation of AbstractDataSet to be
used for all unregistered data sets.
"""
pass
def _suggest_resume_scenario(
self, pipeline: Pipeline, done_nodes: Iterable[Node]
) -> None:
remaining_nodes = set(pipeline.nodes) - set(done_nodes)
postfix = ""
if done_nodes:
node_names = (n.name for n in remaining_nodes)
resume_p = pipeline.only_nodes(*node_names)
start_p = resume_p.only_nodes_with_inputs(*resume_p.inputs())
start_node_names = (n.name for n in start_p.nodes)
postfix += ' --from-nodes "{}"'.format(",".join(start_node_names))
self._logger.warning(
"There are %d nodes that have not run.\n"
"You can resume the pipeline run by adding the following "
"argument to your previous command:\n%s",
len(remaining_nodes),
postfix,
)
def run_node(
node: Node, catalog: DataCatalog, is_async: bool = False, run_id: str = None
) -> Node:
"""Run a single `Node` with inputs from and outputs to the `catalog`.
Args:
node: The ``Node`` to run.
catalog: A ``DataCatalog`` containing the node's inputs and outputs.
is_async: If True, the node inputs and outputs are loaded and saved
asynchronously with threads. Defaults to False.
run_id: The id of the pipeline run
Returns:
The node argument.
"""
if is_async:
node = _run_node_async(node, catalog, run_id)
else:
node = _run_node_sequential(node, catalog, run_id)
for name in node.confirms:
catalog.confirm(name)
return node
def _run_node_sequential(node: Node, catalog: DataCatalog, run_id: str = None) -> Node:
inputs = {name: catalog.load(name) for name in node.inputs}
hook_manager = get_hook_manager()
is_async = False
hook_manager.hook.before_node_run( # pylint: disable=no-member
node=node, catalog=catalog, inputs=inputs, is_async=is_async, run_id=run_id
)
try:
outputs = node.run(inputs)
except Exception as exc:
hook_manager.hook.on_node_error( # pylint: disable=no-member
error=exc,
node=node,
catalog=catalog,
inputs=inputs,
is_async=is_async,
run_id=run_id,
)
raise exc
hook_manager.hook.after_node_run( # pylint: disable=no-member
node=node,
catalog=catalog,
inputs=inputs,
outputs=outputs,
is_async=is_async,
run_id=run_id,
)
for name, data in outputs.items():
catalog.save(name, data)
return node
def _run_node_async(node: Node, catalog: DataCatalog, run_id: str = None) -> Node:
with ThreadPoolExecutor() as pool:
inputs = {
name: pool.submit(catalog.load, name) for name in node.inputs
} # Python dict is thread-safe
wait(inputs.values(), return_when=ALL_COMPLETED)
inputs = {key: value.result() for key, value in inputs.items()}
hook_manager = get_hook_manager()
is_async = True
hook_manager.hook.before_node_run( # pylint: disable=no-member
node=node, catalog=catalog, inputs=inputs, is_async=is_async, run_id=run_id
)
try:
outputs = node.run(inputs)
except Exception as exc:
hook_manager.hook.on_node_error( # pylint: disable=no-member
error=exc,
node=node,
catalog=catalog,
inputs=inputs,
is_async=is_async,
run_id=run_id,
)
raise exc
hook_manager.hook.after_node_run( # pylint: disable=no-member
node=node,
catalog=catalog,
inputs=inputs,
outputs=outputs,
is_async=is_async,
run_id=run_id,
)
save_futures = set()
for name, data in outputs.items():
save_futures.add(pool.submit(catalog.save, name, data))
for future in as_completed(save_futures):
exception = future.exception()
if exception:
raise exception
return node
|
the-stack_0_15613 | from tkinter import*
import sqlite3
root = Tk()
root.title('Honeycomb Cakes Customer Information System!')
root.geometry('550x420')
root.iconbitmap(r"C:\Users\Oreoluwa Daramola\Documents\Data Science projects\cake.ico")
root.configure(bg='black')
#Create Data base
con = sqlite3.connect('Customer Information.db')
cursor = con.cursor()
#Create Table
#cursor.execute("""CREATE TABLE Customers (id integer PRIMARY KEY,full_name text,home_adress text,phone text,email text,items_ordered text,amount_paid real,delivery_address text))""")
#Create Submit Function
def submit():
con = sqlite3.connect('Customer Information.db')
cursor = con.cursor()
con.commit()
#con.close()
#INSERT INTO TABLES
cursor.execute("INSERT INTO Customers Values(:id,:full_name,:home_address,:phone,:email,:items_ordered,:amount_paid,:delivery_address)",
{
'id':id.get(),
'full_name':full_name.get(),
'home_address':home_address.get(),
'phone':phone.get(),
'email': email.get(),
'items_ordered':items_ordered.get(),
'amount_paid':amount_paid.get(),
'delivery_address':delivery_address.get()
})
def query():
return
con = sqlite3.connect('Customer Information.db')
cursor = con.cursor()
#query
cursor.execute("SELECT*, old FROM Customer")
records = cursor.fetchall()
print(records)
con.commit()
#con.close()
# Clear text boxes
id.delete(0,END)
full_name.delete(0,END)
home_address.delete(0,END)
phone.delete(0,END)
email.delete(0,END)
items_ordered.delete(0,END)
amount_paid.delete(0,END)
delivery_address.delete(0,END)
id=Entry(root,width=50,font=('Arial',14))
id.grid(row=0,column=1,padx=20,columnspan=2)
full_name = Entry(root,width=50,font=('Arial',14))
full_name.grid(row=1,column=1,columnspan=2)
home_address=Entry(root,width=50,font=('Arial',14))
home_address.grid(row=2,column=1,columnspan=2)
phone=Entry(root,width=50,font=('Arial',14))
phone.grid(row=3,column=1,columnspan=2)
email=Entry(root,width=50,font=('Arial',14))
email.grid(row=4,column=1,columnspan=2)
items_ordered=Entry(root,width=50,font=('Arial',14))
items_ordered.grid(row=5,column=1,columnspan=2)
amount_paid=Entry(root,width=50,font=('Arial',14))
amount_paid.grid(row=6,column=1,columnspan=2)
delivery_address=Entry(root,width=50,font=('Arial',14))
delivery_address.grid(row=7,column=1,columnspan=2)
#Create Text Box labels
id_label =Label(root,text="ID")
id_label.grid(row=0,column=0,padx=10,pady=10)
full_name_label =Label(root,text="Full Name")
full_name_label.grid(row=1,column=0,padx=10,pady=10)
address_label =Label(root,text="Address")
address_label.grid(row=2,column=0,padx=10,pady=10)
phone_label =Label(root,text="Phone")
phone_label.grid(row=3,column=0,padx=10,pady=10)
email_label =Label(root,text="Email")
email_label.grid(row=4,column=0,padx=10,pady=10)
items_ordered_label =Label(root,text="Itemsordered")
items_ordered_label.grid(row=5,column=0,padx=10,pady=10)
amount_paid_label =Label(root,text="Amountpaid")
amount_paid_label.grid(row=6,column=0,padx=10,pady=10)
delivery_address_label =Label(root,text="Delivery Address")
delivery_address_label.grid(row=7,column=0,pady=10,padx=10)
#Create Submit Button
submit_btn = Button(root,text="Add Record to Database",command= submit)
submit_btn.grid(row=8,column=0,columnspan=2,pady=10,padx=10,ipadx=200)
#create a query button
query_btn = Button(root,text='Show Records',command=query)
query_btn.grid(row=9,column=0,columnspan=2,pady=10,padx=10,ipadx=137)
con.commit()
#con.close()
root.mainloop() |
the-stack_0_15614 | from dateutil import rrule
from .regexes import ElementPart, element_kind_map, regex_list
from .util import ts_to_datetime
class CronValidator:
@classmethod
def parse(cls, expression):
"""
:param str expression:
:return:
"""
parts = expression.split(" ")
if len(parts) != 5:
raise ValueError("Invalid expression")
elements = []
for i in range(0, 5):
m = regex_list[i].fullmatch(parts[i])
if not m:
raise ValueError(f"Invalid expression part {i}")
kind = None
body = None
for key, value in m.groupdict().items():
if value:
kind = key
body = value
break
element_cls = element_kind_map.get(kind)
elements.append(element_cls(part=ElementPart(i + 1), body=body))
return elements
@classmethod
def match_timestamp(cls, expression, ts, tz_name):
"""
:param expression:
:param ts:
:param tz_name:
:return:
"""
dt = ts_to_datetime(ts, tz_name)
elements = cls.parse(expression)
for element in elements:
if not element.match(dt):
return False
return True
@classmethod
def match_datetime(cls, expression, dt):
"""
:param expression:
:param dt:
:return:
"""
elements = cls.parse(expression)
for element in elements:
if not element.match(dt):
return False
return True
@classmethod
def get_execution_time(cls, expression, from_dt, to_dt):
"""
:param expression:
:param from_dt:
:param to_dt:
:return:
"""
for dt in rrule.rrule(rrule.MINUTELY, dtstart=from_dt, until=to_dt):
if cls.match_datetime(expression, dt):
yield dt.replace(second=0, microsecond=0)
|
the-stack_0_15616 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v10.errors",
marshal="google.ads.googleads.v10",
manifest={"LabelErrorEnum",},
)
class LabelErrorEnum(proto.Message):
r"""Container for enum describing possible label errors.
"""
class LabelError(proto.Enum):
r"""Enum describing possible label errors."""
UNSPECIFIED = 0
UNKNOWN = 1
CANNOT_APPLY_INACTIVE_LABEL = 2
CANNOT_APPLY_LABEL_TO_DISABLED_AD_GROUP_CRITERION = 3
CANNOT_APPLY_LABEL_TO_NEGATIVE_AD_GROUP_CRITERION = 4
EXCEEDED_LABEL_LIMIT_PER_TYPE = 5
INVALID_RESOURCE_FOR_MANAGER_LABEL = 6
DUPLICATE_NAME = 7
INVALID_LABEL_NAME = 8
CANNOT_ATTACH_LABEL_TO_DRAFT = 9
CANNOT_ATTACH_NON_MANAGER_LABEL_TO_CUSTOMER = 10
__all__ = tuple(sorted(__protobuf__.manifest))
|
the-stack_0_15620 | # coding=utf-8
import time
import multiprocessing
import os
import sys
import signal
try:
from setproctitle import getproctitle, setproctitle
except ImportError:
setproctitle = None
from diamond.utils.signals import signal_to_exception
from diamond.utils.signals import SIGALRMException
from diamond.utils.signals import SIGHUPException
def collector_process(collector, metric_queue, log):
"""
"""
proc = multiprocessing.current_process()
if setproctitle:
setproctitle('%s - %s' % (getproctitle(), proc.name))
signal.signal(signal.SIGALRM, signal_to_exception)
signal.signal(signal.SIGHUP, signal_to_exception)
signal.signal(signal.SIGUSR2, signal_to_exception)
interval = float(collector.config['interval'])
max_time = int(interval * 0.9)
log.debug('Starting')
log.debug('Interval: %s seconds', interval)
log.debug('Max collection time: %s seconds', max_time)
# Validate the interval
if interval <= 0:
log.critical('interval of %s is not valid!', interval)
sys.exit(1)
next_collection = time.time()
reload_config = False
# Setup stderr/stdout as /dev/null so random print statements in thrid
# party libs do not fail and prevent collectors from running.
# https://github.com/BrightcoveOS/Diamond/issues/722
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
while(True):
try:
time_to_sleep = next_collection - time.time()
if time_to_sleep > 0:
time.sleep(time_to_sleep)
next_collection += interval
# Ensure collector run times fit into the collection window
signal.alarm(max_time)
# Collect!
collector._run()
# Success! Disable the alarm
signal.alarm(0)
# Reload the config if requested
# This is outside of the alarm code as we don't want to interrupt
# it and end up with half a loaded config
if reload_config:
log.debug('Reloading config')
collector.load_config()
log.info('Config reloaded')
reload_config = False
except SIGALRMException:
log.error('Took too long to run! Killed!')
continue
except SIGHUPException:
log.info('Scheduling config reload due to HUP')
reload_config = True
pass
except Exception:
log.exception('Collector failed!')
break
def handler_process(handlers, metric_queue, log):
proc = multiprocessing.current_process()
if setproctitle:
setproctitle('%s - %s' % (getproctitle(), proc.name))
log.debug('Starting process %s', proc.name)
while(True):
metrics = metric_queue.get(block=True, timeout=None)
for metric in metrics:
for handler in handlers:
handler._process(metric)
for handler in handlers:
handler._flush()
|
the-stack_0_15622 | #!/usr/bin/env python3
import sys, random
assert sys.version_info >= (3,7), "This script requires at least Python 3.7"
def choose_color(last_color):
colors = ['red','orange','yellow','green','blue','violet','purple']
c = random.choice(colors)
while c == last_color:
c = random.choice(colors)
return c
#on lines 6-11, we're commanding the program to choose from a list of colors randomly
print('Greetings!')
play_again = ''
best_count = sys.maxsize # the biggest number
last_color = ''
while (play_again != 'n' and play_again != 'no'):
count = 0
color = ''
match_color = choose_color(last_color)
while (color != match_color):
color = input("\nWhat is my favorite color? ") #\n is a special code that adds a new line
color = color.lower().strip()
count += 1
if (color == match_color):
print('Correct!')
else:
print('Sorry, try again. You have guessed {guesses} times.'.format(guesses=count))
print('\nYou guessed it in {} tries!'.format(count))
if (count < best_count):
print('This was your best guess so far!')
best_count = count
play_again = input("\nWould you like to play again (yes or no)? ").lower().strip()
print('Thanks for playing!')
|
the-stack_0_15624 | #!/usr/bin/env python
"""
Real-time rep counter for jumping jacks and squats.
Usage:
run_fitness_rep_counter.py [--camera_id=CAMERA_ID]
[--path_in=FILENAME]
[--path_out=FILENAME]
[--title=TITLE]
[--model_name=NAME]
[--model_version=VERSION]
[--use_gpu]
run_fitness_rep_counter.py (-h | --help)
Options:
--path_in=FILENAME Video file to stream from
--path_out=FILENAME Video file to stream to
--title=TITLE This adds a title to the window display
--model_name=NAME Name of the model to be used.
--model_version=VERSION Version of the model to be used.
--use_gpu Whether to run inference on the GPU or not.
"""
from typing import Callable
from typing import Optional
from docopt import docopt
import sense.display
from sense.controller import Controller
from sense.downstream_tasks.fitness_rep_counting import INT2LAB
from sense.downstream_tasks.fitness_rep_counting import LAB2INT
from sense.downstream_tasks.nn_utils import LogisticRegression
from sense.downstream_tasks.nn_utils import Pipe
from sense.downstream_tasks.postprocess import AggregatedPostProcessors
from sense.downstream_tasks.postprocess import PostprocessClassificationOutput
from sense.downstream_tasks.postprocess import TwoPositionsCounter
from sense.loading import build_backbone_network
from sense.loading import get_relevant_weights
from sense.loading import ModelConfig
SUPPORTED_MODEL_CONFIGURATIONS = [
ModelConfig('StridedInflatedEfficientNet', 'pro', ['rep_counter']),
]
def run_fitness_rep_counter(model_name: str,
model_version: str,
title: Optional[str] = None,
display_fn: Optional[Callable] = None,
**kwargs):
"""
:param model_name:
Model from backbone (StridedInflatedEfficientNet or StridedInflatedMobileNetV2).
:param model_version:
Model version (pro or lite)
:param title:
Title of the image frame on display.
:param display_fn:
Optional function to further process displayed image
"""
# Load weights
selected_config, weights = get_relevant_weights(
SUPPORTED_MODEL_CONFIGURATIONS,
model_name,
model_version
)
# Load backbone network
backbone_network = build_backbone_network(selected_config, weights['backbone'])
# Load a logistic regression classifier
rep_counter = LogisticRegression(num_in=backbone_network.feature_dim,
num_out=5)
rep_counter.load_state_dict(weights['rep_counter'])
rep_counter.eval()
# Concatenate backbone network and rep counter
net = Pipe(backbone_network, rep_counter)
postprocessor = [
AggregatedPostProcessors(
post_processors=[
TwoPositionsCounter(
pos0_idx=LAB2INT['counting - jumping_jacks_position=arms_down'],
pos1_idx=LAB2INT['counting - jumping_jacks_position=arms_up'],
threshold0=0.4,
threshold1=0.4,
out_key='Jumping Jacks',
),
TwoPositionsCounter(
pos0_idx=LAB2INT['counting - squat_position=high'],
pos1_idx=LAB2INT['counting - squat_position=low'],
threshold0=0.4,
threshold1=0.4,
out_key='squats',
),
],
out_key='counting',
),
PostprocessClassificationOutput(INT2LAB, smoothing=1)
]
display_ops = [
sense.display.DisplayFPS(expected_camera_fps=net.fps,
expected_inference_fps=net.fps / net.step_size),
sense.display.DisplayTopKClassificationOutputs(top_k=1, threshold=0.5),
sense.display.DisplayExerciseRepCounts()
]
display_results = sense.display.DisplayResults(title=title, display_ops=display_ops,
border_size_top=100, display_fn=display_fn)
# Run live inference
controller = Controller(
neural_network=net,
post_processors=postprocessor,
results_display=display_results,
callbacks=[],
**kwargs
)
controller.run_inference()
if __name__ == "__main__":
# Parse arguments
args = docopt(__doc__)
run_fitness_rep_counter(
camera_id=int(args['--camera_id'] or 0),
path_in=args['--path_in'] or None,
path_out=args['--path_out'] or None,
title=args['--title'] or None,
model_name=args['--model_name'] or None,
model_version=args['--model_version'] or None,
use_gpu=args['--use_gpu'],
)
|
the-stack_0_15625 | from bisect import bisect_left
def gcd(a, b):
while(b):
a %= b
a, b = b, a # Swap para tener el mas chico en b
return a
def divisors(n):
d = []
for i in range(1, int(n**0.5)+1):
if (n % i == 0):
d.append(i) if(i*i == n) else d.extend([i, n//i])
return list(sorted(d))
def solve():
n = int(input())
if n == 1:
print(1, 1)
return
for i in divisors(n):
if i*i <= n or gcd(i, n//i) > 1:
continue
else:
print(n//i, i)
return
solve()
|
the-stack_0_15627 | from __future__ import division, print_function
import apache_beam as beam
import apache_beam as beam
import sqlalchemy
from sqlalchemy.orm import sessionmaker
class ReadFromDBFn(beam.DoFn):
def __init__(self, url, query, query_params={}, *args, **kwargs):
super(ReadFromDBFn, self).__init__(*args, **kwargs)
self.url = url
self.query = query
self.query_params = query_params
def process(self, data):
data = dict(data)
engine = sqlalchemy.create_engine(self.url, pool_timeout=10)
query_params = self.query_params
if 'db_query_params' in data:
query_params = data['db_query_params']
for record in engine.execute(sqlalchemy.sql.text(self.query), query_params):
yield dict(record)
class WriteIntoDB(beam.PTransform):
def __init__(self, url, table, update_ignores=(), *args, **kwargs):
super(WriteIntoDB, self).__init__(*args, **kwargs)
self.url = url
self.table = table
self.update_ignores = update_ignores
def expand(self, pcoll):
return pcoll | beam.ParDo(WriteIntoDBFn(
url=self.url,
table=self.table,
update_ignores=self.update_ignores,
))
class WriteIntoDBFn(beam.DoFn):
def __init__(self, url, table, update_ignores, *args, **kwargs):
super(WriteIntoDBFn, self).__init__(*args, **kwargs)
self.url = url
self.table = table
self.update_ignores = update_ignores
@staticmethod
def column_reflect_listener(inspector, table, column_info):
if isinstance(column_info['type'], sqlalchemy.dialects.postgresql.base.UUID):
column_info['type'].as_uuid = True
def start_bundle(self):
engine = sqlalchemy.create_engine(self.url, pool_timeout=10)
self.SessionClass = sessionmaker(bind=engine)
self.session = self.SessionClass()
engine = self.session.bind
metadata = sqlalchemy.MetaData(bind=engine)
self.table = sqlalchemy.Table(self.table, metadata, autoload=True, listeners=[
('column_reflect', self.column_reflect_listener)
])
def process(self, data):
try:
insert_stmt = sqlalchemy.dialects.postgresql.insert(self.table) \
.values(data) \
.returning(sqlalchemy.sql.elements.literal_column('*'))
cols = [col for col in insert_stmt.excluded if col.name not in self.update_ignores]
update_columns = {col.name: col for col in cols}
upsert_stmt = insert_stmt.on_conflict_do_update(
index_elements=[col for col in self.table.primary_key],
set_=update_columns
)
for rowproxy in self.session.execute(upsert_stmt).yield_per(1000):
yield {col.name: getattr(rowproxy, col.name) for col in self.table.columns}
self.session.commit()
except:
self.session.rollback()
self.session.close()
self.session.bind.dispose()
raise
def finish_bundle(self):
self.session.close()
self.session.bind.dispose()
self.session = None |
the-stack_0_15628 | class Solution:
def twoSum(self, nums: List[int], target: int) -> List[List[int]]:
complement = {}
out = []
for i,n in enumerate(nums):
complement[target-n] = i
for i,n in enumerate(nums):
idx = complement.get(n, None)
if idx != None and idx != i:
out.append([nums[idx], nums[i]])
return out
def threeSum(self, nums: List[int]) -> List[List[int]]:
if len(nums) < 3:
return []
nums.sort()
out = []
if set(nums) == {0}:
return [[0,0,0]]
i = 0
while len(nums) >= 3:
l_twosum = self.twoSum(nums[1:], -nums[0])
if l_twosum != None:
for l in l_twosum:
l.append(nums[0])
out.append(l)
nums.pop(0)
for i,l in enumerate(out):
out[i] = sorted(l)
out = list(map(list, set(map(tuple, out))))
return out
|
the-stack_0_15632 | # -*- coding: utf-8 -*-
'''
Run tests of notebooks using nbval -- called from testDocumentation
Created on May 24, 2017
@author: cuthbert
'''
import sys
import subprocess
# noinspection PyPackageRequirements
import pytest # @UnusedImport # pylint: disable=unused-import,import-error
# noinspection PyPackageRequirements
import nbval # @UnusedImport # pylint: disable=unused-import,import-error
from music21 import environment
from music21 import common
# pytest --nbval usersGuide_15_key.ipynb --sanitize-with ../../nbval-sanitize.cfg -q
skip = ['installJupyter.ipynb']
def runAll():
sourcePath = common.getRootFilePath() / 'documentation' / 'source'
goodFiles = []
for innerDir in ('about', 'developerReference', 'installing', 'usersGuide'):
fullDir = sourcePath / innerDir
for f in sorted(fullDir.rglob('*.ipynb')):
if f.name in skip:
continue
if 'checkpoint' in str(f):
continue
goodFiles.append(f)
for f in goodFiles:
print("Running: ", str(f))
try:
retVal = runOne(f)
except KeyboardInterrupt:
break
if retVal == 512:
return None
def runOne(nbFile):
us = environment.UserSettings()
museScore = us['musescoreDirectPNGPath']
us['musescoreDirectPNGPath'] = '/skip' + str(museScore)
# this config file changes 0x39f3a0 to 0xADDRESS.
sanitize_fn = str(common.getRootFilePath()
/ 'documentation'
/ 'docbuild'
/ 'nbval-sanitize.cfg'
)
try:
retVal = subprocess.run(
['pytest',
'--disable-pytest-warnings',
'--nbval', str(nbFile),
'--sanitize-with', sanitize_fn,
'-q'],
check=False,
)
# except (Exception, KeyboardInterrupt): # specifically looking at KeyboardInterrupt.
# raise
finally:
us['musescoreDirectPNGPath'] = museScore
return retVal
if __name__ == '__main__':
if len(sys.argv) > 1:
runOne(sys.argv[1])
else:
runAll()
|
the-stack_0_15634 | import pytest
import pandas as pd
from pyam import IamDataFrame, compare
# when making any updates to this file,
# please also update the `data_table_formats` tutorial notebook!
def test_cast_from_value_col(test_df_year):
df_with_value_cols = pd.DataFrame(
[
["model_a", "scen_a", "World", "EJ/yr", 2005, 1, 0.5],
["model_a", "scen_a", "World", "EJ/yr", 2010, 6.0, 3],
["model_a", "scen_b", "World", "EJ/yr", 2005, 2, None],
["model_a", "scen_b", "World", "EJ/yr", 2010, 7, None],
],
columns=[
"model",
"scenario",
"region",
"unit",
"year",
"Primary Energy",
"Primary Energy|Coal",
],
)
df = IamDataFrame(
df_with_value_cols, value=["Primary Energy", "Primary Energy|Coal"]
)
assert compare(test_df_year, df).empty
pd.testing.assert_frame_equal(df.data, test_df_year.data, check_like=True)
def test_cast_from_value_col_and_args(test_df_year):
# checks for issue [#210](https://github.com/IAMconsortium/pyam/issues/210)
df_with_value_cols = pd.DataFrame(
[
["scen_a", "World", "EJ/yr", 2005, 1, 0.5],
["scen_a", "World", "EJ/yr", 2010, 6.0, 3],
["scen_b", "World", "EJ/yr", 2005, 2, None],
["scen_b", "World", "EJ/yr", 2010, 7, None],
],
columns=[
"scenario",
"iso",
"unit",
"year",
"Primary Energy",
"Primary Energy|Coal",
],
)
df = IamDataFrame(
df_with_value_cols,
model="model_a",
region="iso",
value=["Primary Energy", "Primary Energy|Coal"],
)
assert compare(test_df_year, df).empty
pd.testing.assert_frame_equal(df.data, test_df_year.data, check_like=True)
def test_cast_with_model_arg_raises():
df = pd.DataFrame(
[
["model_a", "scen_a", "World", "EJ/yr", 2005, 1, 0.5],
],
columns=[
"model",
"scenario",
"region",
"unit",
"year",
"Primary Energy",
"Primary Energy|Coal",
],
)
pytest.raises(ValueError, IamDataFrame, df, model="foo")
def test_cast_with_model_arg(test_df):
df = test_df.timeseries().reset_index()
df.rename(columns={"model": "foo"}, inplace=True)
df = IamDataFrame(df, model="foo")
assert compare(test_df, df).empty
pd.testing.assert_frame_equal(df.data, test_df.data)
def test_cast_by_column_concat(test_df_year):
df = pd.DataFrame(
[
["scen_a", "World", "Primary Energy", None, "EJ/yr", 1, 6.0],
["scen_a", "World", "Primary Energy", "Coal", "EJ/yr", 0.5, 3],
["scen_b", "World", "Primary Energy", None, "EJ/yr", 2, 7],
],
columns=["scenario", "region", "var_1", "var_2", "unit", 2005, 2010],
)
df = IamDataFrame(df, model="model_a", variable=["var_1", "var_2"])
assert compare(test_df_year, df).empty
pd.testing.assert_frame_equal(df.data, test_df_year.data, check_like=True)
def test_cast_with_variable_and_value(test_df):
pe_df = test_df.filter(variable="Primary Energy")
df = pe_df.data.rename(columns={"value": "lvl"}).drop("variable", axis=1)
df = IamDataFrame(df, variable="Primary Energy", value="lvl")
assert compare(pe_df, df).empty
pd.testing.assert_frame_equal(df.data, pe_df.data.reset_index(drop=True))
def test_cast_from_r_df(test_pd_df):
df = test_pd_df.copy()
# last two columns are years
df.columns = list(df.columns[:-2]) + ["X{}".format(c) for c in df.columns[-2:]]
obs = IamDataFrame(df)
exp = IamDataFrame(test_pd_df)
assert compare(obs, exp).empty
pd.testing.assert_frame_equal(obs.data, exp.data)
def test_cast_from_r_df_err(test_pd_df):
df = test_pd_df.copy()
# last two columns are years
df.columns = list(df.columns[:-2]) + ["Xfoo", "Xbar"]
pytest.raises(ValueError, IamDataFrame, df)
|
the-stack_0_15635 | # mypy: allow-untyped-defs
import sys
from mozlog.structured import structuredlog, commandline
from .. import wptcommandline
from .update import WPTUpdate
def remove_logging_args(args):
"""Take logging args out of the dictionary of command line arguments so
they are not passed in as kwargs to the update code. This is particularly
necessary here because the arguments are often of type file, which cannot
be serialized.
:param args: Dictionary of command line arguments.
"""
for name in list(args.keys()):
if name.startswith("log_"):
args.pop(name)
def setup_logging(args, defaults):
"""Use the command line arguments to set up the logger.
:param args: Dictionary of command line arguments.
:param defaults: Dictionary of {formatter_name: stream} to use if
no command line logging is specified"""
logger = commandline.setup_logging("web-platform-tests-update", args, defaults)
remove_logging_args(args)
return logger
def run_update(logger, **kwargs):
updater = WPTUpdate(logger, **kwargs)
return updater.run()
def main():
args = wptcommandline.parse_args_update()
logger = setup_logging(args, {"mach": sys.stdout})
assert structuredlog.get_default_logger() is not None
success = run_update(logger, **args)
sys.exit(0 if success else 1)
|
the-stack_0_15636 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import sys
class Subread(MakefilePackage):
"""The Subread software package is a tool kit for processing next-gen
sequencing data."""
homepage = "http://subread.sourceforge.net/"
url = "https://downloads.sourceforge.net/project/subread/subread-1.5.2/subread-1.5.2-source.tar.gz"
version('2.0.0', sha256='bd7b45f7d8872b0f5db5d23a385059f21d18b49e432bcb6e3e4a879fe51b41a8')
version('1.6.4', sha256='b7bd0ee3b0942d791aecce6454d2f3271c95a010beeeff2daf1ff71162e43969')
version('1.6.2', sha256='77b4896c1c242967c5883a06c0a5576a5ff220008a12aa60af9669d2f9a87d7a')
version('1.6.0', sha256='31251ec4c134e3965d25ca3097890fb37e2c7a4163f6234515534fd325b1002a')
version('1.5.2', sha256='a8c5f0e09ed3a105f01866517a89084c7302ff70c90ef8714aeaa2eab181a0aa')
depends_on('zlib')
def build(self, spec, prefix):
plat = sys.platform
with working_dir('src'):
if plat.startswith('linux'):
filter_file(
'CC_EXEC = gcc',
'CC_EXEC = {0}'.format(spack_cc),
'Makefile.Linux'
)
if spec.target.family == 'aarch64':
filter_file('-mtune=core2', '', 'Makefile.Linux')
if spec.satisfies('@1.6.2:1.6.4'):
filter_file(
'-mtune=core2',
'',
'longread-one/Makefile'
)
elif spec.satisfies('@1.6.0'):
filter_file(
'-mtune=core2',
'',
'longread-mapping/Makefile'
)
make('-f', 'Makefile.Linux')
elif plat.startswith('darwin'):
make('-f', 'Makefile.MacOS')
else:
raise InstallError("The communication mechanism %s is not"
"supported" % plat)
def install(self, spec, prefix):
install_tree('bin', prefix.bin)
|
the-stack_0_15637 | # -*- coding: utf-8 -*-
# Copyright 2018-2019 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pytest fixtures.
Everything in this file applies to all tests. This basically makes the
tests not READ from your local home directory but instead this mock
config.
"""
import os
from mock import patch, mock_open
from streamlit import config
from streamlit import util
os.environ["HOME"] = "/mock/home/folder"
CONFIG_FILE_CONTENTS = """
[global]
sharingMode = "off"
unitTest = true
[browser]
gatherUsageStats = false
"""
config_path = util.get_streamlit_file_path("config.toml")
with patch(
"streamlit.config.open", mock_open(read_data=CONFIG_FILE_CONTENTS), create=True
), patch("streamlit.config.os.path.exists") as path_exists:
path_exists.side_effect = lambda path: path == config_path
config.parse_config_file()
|
the-stack_0_15639 | import argparse
import os
import subprocess
import pandas as pd
# To get the trec_eval script you can follow this link: https://trec.nist.gov/trec_eval/
def run_to_csv_using_trec_eval(run_names, out_file_name,
trec_eval_location='./alter_library_code/anserini/eval/trec_eval.9.0.4/trec_eval',
path_to_qrels='./2019_data/evaluation_topics_mod.qrel'):
"""
Writes to tsv file the results obtained for each run using trec_eval script
run_names - list of strs with path to runs
out_file_name - str, name of the results file
trec_eval_location - str, path to the trec eval script
path_to_qrels - str, path to qrels file
"""
results = {}
for run_name in run_names:
output = subprocess.run([trec_eval_location,
'-m', 'map',
'-m', 'recip_rank',
'-m', 'recall',
'-m', 'all_trec',
'-c', path_to_qrels,
run_name],
stdout=subprocess.PIPE).stdout.decode('utf-8')
# print("output", output)
current_metrics = {}
print(f"{os.path.basename(run_name)} Output Metrics:\n {output}")
print()
lines = output.split("\n")
for line in lines[:-1]: # last is empty line
metric, _, value = line.split("\t")
current_metrics[metric.rstrip()] = value # possible conversion needed to float or int
# concatenate
results[os.path.basename(run_name)] = current_metrics
df = pd.DataFrame.from_dict(results, orient="index")
# get only some columns
df1 = df[['recall_1000', 'map', 'recip_rank', 'ndcg_cut_3', 'P_3']]
# write to file
df1.to_csv("results/" + out_file_name + ".tsv", sep="\t")
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='''Get the official metrics using the Trec eval script.''')
parser.add_argument('--run_name', required=True,
type=str, help='Path to file in the trec run format')
parser.add_argument('--out_file_name', required=True,
type=str, help='Path to output file to write the metrics')
parser.add_argument('--trec_eval_location', required=True,
type=str, help='Path to the trec_eval script')
parser.add_argument('--path_to_qrels', required=True, default='./2019_data/evaluation_topics_mod.qrel',
type=str, help='Path to the CAsT qrels file.')
args = parser.parse_args()
print("Started running...")
run_to_csv_using_trec_eval(run_names=[args.run_name], out_file_name=args.out_file_name,
trec_eval_location=args.trec_eval_location, path_to_qrels=args.path_to_qrels)
print('Done!')
# Command examples
# python3 run_trec_eval_official_metrics.py --run_name <path to trec run file> --out_file_name <path to output file> --trec_eval_location ./alter_library_code/anserini/eval/trec_eval.9.0.4/trec_eval --path_to_qrels ./2019_data/evaluation_topics_mod.qrel
|
the-stack_0_15640 | #!/usr/bin/env python
# -- coding: utf-8 --
"""
Copyright (c) 2019. All rights reserved.
Created by C. L. Wang on 2020/2/18
"""
import os
import cv2
import torch
import torchvision.models as models
import torchvision.transforms as transforms
from PIL import Image
from core.img_core.model import IQAModel
from root_dir import DATASET_DIR, MODELS_DIR
@DeprecationWarning
class ImgQualityAssessment(object):
"""
图像质量评估,目前效果较差
"""
def __init__(self):
self.model_path = os.path.join(MODELS_DIR, 'epoch-57.pkl')
self.model, self.device = self.init_model()
self.test_transform = self.get_test_transform()
self.n_frames = 10 # 检测视频的图像数
def init_model(self):
base_model = models.vgg16(pretrained=True)
model = IQAModel(base_model)
if not torch.cuda.is_available():
model.load_state_dict(torch.load(self.model_path, map_location=torch.device('cpu')))
else:
model.load_state_dict(torch.load(self.model_path))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
model.eval()
return model, device
def get_test_transform(self):
test_transform = transforms.Compose([
transforms.Scale(256),
transforms.CenterCrop(224),
transforms.ToTensor()
])
return test_transform
def predict_img(self, img_pil):
"""
预测图像
"""
imt = self.test_transform(img_pil)
imt = imt.unsqueeze(dim=0)
imt = imt.to(self.device)
with torch.no_grad():
out = self.model(imt)
out = out.view(10, 1)
mean, std = 0.0, 0.0
for j, e in enumerate(out, 1):
mean += j * e
for k, e in enumerate(out, 1):
std += (e * (k - mean) ** 2) ** 0.5
mean, std = float(mean), float(std)
return mean, std
def predict_img_path(self, img_path):
"""
预测图像路径
"""
imt = Image.open(img_path)
mean, std = self.predict_img(imt)
return mean, std
def predict_vid(self, vid_path):
"""
基于视频帧计算的视频质量值
"""
cap = cv2.VideoCapture(vid_path)
n_frame = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
gap = n_frame // self.n_frames
n_list = [k for k in range(0, n_frame, gap)]
n_list = n_list[1:-2] # 去掉前后两帧
sum_mean, sum_std = 0.0, 0.0 # 视频的blur总值
for i in n_list:
cap.set(cv2.CAP_PROP_POS_FRAMES, i)
ret, frame = cap.read()
img_pil = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
mean, std = self.predict_img(img_pil)
sum_mean += mean
sum_std += std
avg_mean = sum_mean / self.n_frames
avg_std = sum_std / self.n_frames
print('[Info] 视频质量均值: {}, 方差: {}'.format(avg_mean, avg_std))
norm_mean = avg_mean / 10.0
norm_std = avg_std / 10.0
return norm_mean, norm_std
def main():
iqa = ImgQualityAssessment()
img_path = os.path.join(DATASET_DIR, 'imgs', 'landscape.jpg')
print('[Info] 图像路径: {}'.format(img_path))
mean, std = iqa.predict_img_path(img_path)
print('[Info] 均值: {}, 方差: {}'.format(mean, std))
if __name__ == '__main__':
main()
|
the-stack_0_15641 | import common
import unittest
import os
import datetime
import subprocess
from pathlib import Path
unique_name = common.unique_name
def get_timestamp(y, m, d):
ts = datetime.datetime(y, m, d, 9, 0, 0)
return int(datetime.datetime.timestamp(ts))
class Test(common.TestBase):
def setUp(self):
super().setUpClass()
self.other = common.RepoDir()
self.other.tempdir_new()
def tearDown(self):
self.other.tempdir_clear()
def test_0_basic(self):
self.repo.cmd("export", self.other.path, "--chunk-size", "1b" , "--dry-run")
assert( self.other.n_files() == 0 )
self.repo.cmd("export", self.other.path, "--chunk-size", "1b")
assert( self.other.n_files(verbose=True) > 0 )
# Ok, the file should there, lets extract
os.chdir(self.other.path)
subprocess.run( "cat *.split_tgz.*|tar -vzx ", shell=True )
os.chdir(self.repo.path)
self.other.file_check("root_file_1", exists=True)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_15644 | #!/usr/bin/env python
import sys
import os
import math
# ensure that the kicad-footprint-generator directory is available
#sys.path.append(os.environ.get('KIFOOTPRINTGENERATOR')) # enable package import from parent directory
#sys.path.append("D:\hardware\KiCAD\kicad-footprint-generator") # enable package import from parent directory
sys.path.append(os.path.join(sys.path[0],"..","..","kicad_mod")) # load kicad_mod path
sys.path.append(os.path.join(sys.path[0],"..","..")) # load kicad_mod path
sys.path.append(os.path.join(sys.path[0],"..","tools")) # load kicad_mod path
from KicadModTree import * # NOQA
from footprint_scripts_terminal_blocks import *
if __name__ == '__main__':
script_generated_note="script-generated using https://github.com/pointhi/kicad-footprint-generator/scripts/TerminalBlock_Philmore";
classname="TerminalBlock_Philmore"
pins=range(2,3+1)
rm=5
package_height=10.2
leftbottom_offset=[rm/2, 5.4]
ddrill=1.2
pad=[2.4,2.4]
screw_diameter=2.75
bevel_height=[0.5,1.6]
slit_screw=False
screw_pin_offset=[0,0]
secondHoleDiameter=0
secondHoleOffset=[0,0]
thirdHoleDiameter=0
thirdHoleOffset=[0,-4]
fourthHoleDiameter=0
fourthHoleOffset=[0,0]
fabref_offset=[0,2.5]
nibbleSize=[]
nibblePos=[]
for p in pins:
name="TB13{0}".format(p);
webpage="http://www.philmore-datak.com/mc/Page%20197.pdf";
classname_description="Terminal Block Philmore ".format(name);
footprint_name="TerminalBlock_Philmore_{0}_1x{2:02}_P{1:3.2f}mm_Horizontal".format(name, rm, p)
makeTerminalBlockStd(footprint_name=footprint_name,
pins=p, rm=rm,
package_height=package_height, leftbottom_offset=leftbottom_offset,
ddrill=ddrill, pad=pad, screw_diameter=screw_diameter, bevel_height=bevel_height, slit_screw=slit_screw, screw_pin_offset=screw_pin_offset, secondHoleDiameter=secondHoleDiameter, secondHoleOffset=secondHoleOffset, thirdHoleDiameter=thirdHoleDiameter, thirdHoleOffset=thirdHoleOffset, fourthHoleDiameter=fourthHoleDiameter, fourthHoleOffset=fourthHoleOffset,
nibbleSize=nibbleSize, nibblePos=nibblePos, fabref_offset=fabref_offset,
tags_additional=[], lib_name='${KICAD6_3DMODEL_DIR}/'+classname, classname=classname, classname_description=classname_description,
webpage=webpage, script_generated_note=script_generated_note)
|
the-stack_0_15646 | import logging
import pendulum
from discord.ext import commands
import helpers.BOT_ERROR as BOT_ERROR
from helpers.SQLiteHelper import SQLiteHelper
import CONFIG
class SantaCountdownHelper():
def __init__(self, sqlitehelper: SQLiteHelper):
self.pend_format = "M/D/YY [@] h:m A Z"
self.cd_table_name = "Countdowns"
self.sqlhelp = sqlitehelper
self.logger = logging.getLogger('SantaBot.SantaCountdownHelper')
self.logger.info("Creating SantaCountdownHelper")
def __get_cd_table_name(self, guild_id: str):
return f"{self.cd_table_name}_{guild_id}"
def __countdown_cmd_set(self, ctx: commands.Context, cd_name: str, cd_time: str):
result_str = ""
try:
pend_test_convert = pendulum.from_format(cd_time, self.pend_format) # check that the format is correct
if(self.sqlhelp.insert_records(self.__get_cd_table_name(ctx.guild.id), "(name, time, user_id)", [f"('{cd_name}', '{cd_time}', {ctx.author.id})"])):
diff_str = self.__find_pend_diff_str(pend_test_convert)
result_str = f"{cd_name} countdown set for {cd_time} ({diff_str})"
else:
result_str = BOT_ERROR.COUNTDOWN_NAME_TAKEN
except ValueError as error:
expected = "ERROR: inputted time does not match expected format `month/day/year @ hour:minute AM/PM UTC_offset`\n"
result_str = expected + "ex. `5/17/20 @ 1:00 PM -06:00`"
BOT_ERROR.output_debug(result_str, self.logger)
finally:
return result_str
def __countdown_cmd_change(self, ctx: commands.Context, cd_name: str, cd_time: str):
result_str = ""
try:
pend_test_convert = pendulum.from_format(cd_time, self.pend_format) # check that the format is correct
except ValueError as error:
expected = "ERROR: inputted time does not match expected format `month/day/year @ hour:minute AM/PM UTC_offset`\n"
result_str = expected + " ex. `5/17/20 @ 1:00 PM -06:00`"
BOT_ERROR.output_debug(result_str, self.logger)
return result_str
query_get_timer_by_name = f"SELECT * FROM {self.__get_cd_table_name(ctx.guild.id)} WHERE name=\'{cd_name}\';"
query_result = self.sqlhelp.execute_read_query(query_get_timer_by_name)
if(query_result != None):
if(len(query_result) > 0):
(query_id, query_name, query_time, query_user_id) = query_result[0]
if(ctx.author.id == query_user_id):
if(self.sqlhelp.execute_update_query(self.__get_cd_table_name(ctx.guild.id), f"time=\'{cd_time}\'", f"id={query_id}")):
diff_str = self.__find_pend_diff_str(pend_test_convert)
result_str = f"Updated countdown for {cd_name}. Now set for {diff_str}"
else:
result_str = BOT_ERROR.INVALID_COUNTDOWN_NAME(cd_name)
else:
cd_owner = ctx.guild.get_member(query_user_id)
result_str = BOT_ERROR.CANNOT_CHANGE_COUNTDOWN(cd_owner.name)
else:
result_str = BOT_ERROR.INVALID_COUNTDOWN_NAME(cd_name)
return result_str
def __countdown_cmd_check(self, ctx: commands.Context, cd_name: str):
result_str = ""
query_get_timer_by_name = f"SELECT * FROM {self.__get_cd_table_name(ctx.guild.id)} WHERE name=\'{cd_name}\';"
query_result = self.sqlhelp.execute_read_query(query_get_timer_by_name)
if(query_result != None):
(query_id, query_name, query_time, query_user_id) = query_result[0]
cd_pend = pendulum.from_format(query_time, self.pend_format)
diff_str = self.__find_pend_diff_str(cd_pend)
result_str = f"Time until {cd_name}: {diff_str}"
else:
result_str = BOT_ERROR.INVALID_COUNTDOWN_NAME(cd_name)
return result_str
def __countdown_cmd_remove(self, ctx: commands.Context, cd_name: str):
result_str = ""
query_get_timer_by_name = f"SELECT * FROM {self.__get_cd_table_name(ctx.guild.id)} WHERE name=\'{cd_name}\';"
query_result = self.sqlhelp.execute_read_query(query_get_timer_by_name)
if(query_result != None):
(query_id, query_name, query_time, query_user_id) = query_result[0]
if(query_user_id == ctx.author.id):
if(self.sqlhelp.execute_delete_query(self.__get_cd_table_name(ctx.guild.id), f"id={query_id}")):
result_str = f"Countdown timer `{query_name}` removed."
else:
result_str = BOT_ERROR.INVALID_COUNTDOWN_NAME(cd_name)
else:
cd_owner = ctx.guild.get_member(query_user_id)
result_str = BOT_ERROR.CANNOT_CHANGE_COUNTDOWN(cd_owner.name)
else:
result_str = BOT_ERROR.INVALID_COUNTDOWN_NAME(cd_name)
return result_str
def __countdown_cmd_list(self, ctx: commands.Context, ):
result_str = ""
query_get_all_timers = f"SELECT * FROM {self.__get_cd_table_name(ctx.guild.id)};"
query_results = self.sqlhelp.execute_read_query(query_get_all_timers)
result_str = "Countdown Name | Owner | Time | Time Until\n"
if(query_results != None):
for (query_id, query_name, query_time, query_user_id) in query_results:
cd_pend = pendulum.from_format(query_time, self.pend_format) # convert to pendulum
diff_str = self.__find_pend_diff_str(cd_pend)
time_until_str = f"Time until {query_name}: {diff_str}"
cd_owner = ctx.guild.get_member(query_user_id).name
result_str += f"{query_name} | {cd_owner} | {query_time} | {time_until_str}\n"
return result_str
def __countdown_cmd_clean(self, ctx: commands.Context):
result_str = ""
query_get_all_timers = f"SELECT * FROM {self.__get_cd_table_name(ctx.guild.id)};"
query_results = self.sqlhelp.execute_read_query(query_get_all_timers) # get all the countdowns
if(query_results != None):
for (query_id, query_name, query_time, query_user_id) in query_results:
if(not pendulum.from_format(query_time, self.pend_format).is_future()): # if the countdown has passed, delete
result_str += f"{query_time} has passed. Deleting {query_name} countdown.\n"
self.sqlhelp.execute_delete_query(self.__get_cd_table_name(ctx.guild.id), f"id = {query_id}")
return result_str
def __find_countdown_hints(self, cd_command: str, cd_name: str, cd_time: str):
'''
Get argument hints based on the command input and user input - only called from SantaAdministrative.countdown()
'''
missing_args_str = "Missing argument(s):"
missing_name_str = "<timer name>"
missing_time_str = "<formatted time>"
missing_time_hint = "Formatted time ex. `5/17/20 @ 1:00 PM -06:00`"
complete_command_str = f"Complete command: `{CONFIG.prefix}countdown {cd_command}"
argument_help = ""
if(cd_command == "set"):
if(cd_name == ""):
argument_help = f"{missing_args_str} {missing_name_str} | {missing_time_str}\n{missing_time_hint}\n"
argument_help += f"{complete_command_str} {missing_name_str} | {missing_time_str}`"
elif(cd_time == ""):
argument_help = f"{missing_args_str} {missing_time_str}\n{missing_time_hint}\n"
argument_help += f"{complete_command_str} {cd_name} | {missing_time_str}`"
elif(cd_command == "change"):
if(cd_name == ""):
argument_help = f"{missing_args_str} {missing_name_str} | {missing_time_str}\n{missing_time_hint}\n"
argument_help += f"{complete_command_str} {missing_name_str} | {missing_time_str}`"
elif(cd_time == ""):
argument_help = f"{missing_args_str} {missing_time_str}\n{missing_time_hint}\n"
argument_help += f"{complete_command_str} {cd_name} | {missing_time_str}`"
elif(cd_command == "check"):
if(cd_name == ""):
argument_help = f"{missing_args_str} {missing_name_str}\n"
argument_help += f"{complete_command_str} {missing_name_str}`"
elif(cd_command == "remove"):
if(cd_name == ""):
argument_help = f"{missing_args_str} {missing_name_str}\n"
argument_help += f"{complete_command_str} {missing_name_str}`"
elif(cd_command == "list"):
pass
elif(cd_command == "clean"):
pass
return argument_help
def __find_pend_diff_str(self, pend: pendulum.DateTime):
cd_diff = pend.diff(pendulum.now())
(diff_days, diff_hours, diff_minutes) = (cd_diff.days, cd_diff.hours, cd_diff.minutes)
if(not pend.is_future()):
(diff_days, diff_hours, diff_minutes) = (-diff_days, -diff_hours, -diff_minutes)
diff_str = f"{diff_days} days, {diff_hours} hours, {diff_minutes} minutes from now"
return diff_str
def run_countdown_command(self, ctx: commands.Context, cd_command: str, cd_name: str, cd_time: str):
output = self.__find_countdown_hints(cd_command, cd_name, cd_time)
if(output == ""): # no hints were needed
if(not self.sqlhelp.if_table_exists(self.__get_cd_table_name(ctx.guild.id))): # make sure table exists before executing the CD command
self.sqlhelp.create_table(self.__get_cd_table_name(ctx.guild.id), "(id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT NOT NULL, time TEXT NOT NULL, user_id INTEGER NOT NULL, UNIQUE(name))")
if(cd_command == "set"):
output = self.__countdown_cmd_set(ctx, cd_name, cd_time)
elif(cd_command == "change"):
output = self.__countdown_cmd_change(ctx, cd_name, cd_time)
elif(cd_command == "check"):
output = self.__countdown_cmd_check(ctx, cd_name)
elif(cd_command == "remove"):
output = self.__countdown_cmd_remove(ctx, cd_name)
elif(cd_command == "list"):
output = self.__countdown_cmd_list(ctx)
elif(cd_command == "clean"):
output = self.__countdown_cmd_clean(ctx)
else:
output = BOT_ERROR.INVALID_COUNTDOWN_COMMAND(cd_command)
output += "\nCountdown options/sub-commands: `set`, `change`, `check` , `remove`, `list`, `clean`."
return output |
the-stack_0_15648 | from __future__ import print_function
from tensorboardX import SummaryWriter
import torch
from torch.optim.lr_scheduler import ReduceLROnPlateau
from utils import evaluate, get_lr, load_checkpoint, save_checkpoint, test, train
from config import TrainConfig as C
from loader.MSVD import MSVD
from loader.MSRVTT import MSRVTT
from models.decoder import Decoder
from models.caption_generator import CaptionGenerator
def build_loaders():
if C.corpus == "MSVD":
corpus = MSVD(C)
elif C.corpus == "MSR-VTT":
corpus = MSRVTT(C)
print('#vocabs: {} ({}), #words: {} ({}). Trim words which appear less than {} times.'.format(
corpus.vocab.n_vocabs, corpus.vocab.n_vocabs_untrimmed, corpus.vocab.n_words,
corpus.vocab.n_words_untrimmed, C.loader.min_count))
return corpus.train_data_loader, corpus.val_data_loader, corpus.test_data_loader, corpus.vocab
def build_model(vocab):
decoder = Decoder(
rnn_type=C.decoder.rnn_type,
num_layers=C.decoder.rnn_num_layers,
num_directions=C.decoder.rnn_num_directions,
feat_size=C.feat.size,
feat_len=C.loader.frame_sample_len,
embedding_size=C.vocab.embedding_size,
hidden_size=C.decoder.rnn_hidden_size,
attn_size=C.decoder.rnn_attn_size,
output_size=vocab.n_vocabs,
rnn_dropout=C.decoder.rnn_dropout)
if C.pretrained_decoder_fpath is not None:
decoder.load_state_dict(torch.load(C.pretrained_decoder_fpath)['decoder'])
print("Pretrained decoder is loaded from {}".format(C.pretrained_decoder_fpath))
model = CaptionGenerator(decoder, C.loader.max_caption_len, vocab)
model.cuda()
return model
def log_train(summary_writer, e, loss, lr, scores=None):
summary_writer.add_scalar(C.tx_train_loss, loss['total'], e)
summary_writer.add_scalar(C.tx_train_cross_entropy_loss, loss['cross_entropy'], e)
summary_writer.add_scalar(C.tx_train_entropy_loss, loss['entropy'], e)
summary_writer.add_scalar(C.tx_lr, lr, e)
print("loss: {} (CE {} + E {})".format(loss['total'], loss['cross_entropy'], loss['entropy']))
if scores is not None:
for metric in C.metrics:
summary_writer.add_scalar("TRAIN SCORE/{}".format(metric), scores[metric], e)
print("scores: {}".format(scores))
def log_val(summary_writer, e, loss, scores):
summary_writer.add_scalar(C.tx_val_loss, loss['total'], e)
summary_writer.add_scalar(C.tx_val_cross_entropy_loss, loss['cross_entropy'], e)
summary_writer.add_scalar(C.tx_val_entropy_loss, loss['entropy'], e)
for metric in C.metrics:
summary_writer.add_scalar("VAL SCORE/{}".format(metric), scores[metric], e)
print("loss: {} (CE {} + E {})".format(loss['total'], loss['cross_entropy'], loss['entropy']))
print("scores: {}".format(scores))
def log_test(summary_writer, e, scores):
for metric in C.metrics:
summary_writer.add_scalar("TEST SCORE/{}".format(metric), scores[metric], e)
print("scores: {}".format(scores))
def main():
print("MODEL ID: {}".format(C.model_id))
summary_writer = SummaryWriter(C.log_dpath)
train_iter, val_iter, test_iter, vocab = build_loaders()
model = build_model(vocab)
optimizer = torch.optim.Adam(model.parameters(), lr=C.lr, weight_decay=C.weight_decay, amsgrad=True)
lr_scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=C.lr_decay_gamma,
patience=C.lr_decay_patience, verbose=True)
best_val_CIDEr = 0.
best_epoch = None
best_ckpt_fpath = None
for e in range(1, C.epochs + 1):
ckpt_fpath = C.ckpt_fpath_tpl.format(e)
""" Train """
print("\n")
train_loss = train(e, model, optimizer, train_iter, vocab, C.decoder.rnn_teacher_forcing_ratio,
C.reg_lambda, C.gradient_clip)
log_train(summary_writer, e, train_loss, get_lr(optimizer))
""" Validation """
val_loss = test(model, val_iter, vocab, C.reg_lambda)
val_scores = evaluate(val_iter, model, vocab, beam_width=5, beam_alpha=0.)
log_val(summary_writer, e, val_loss, val_scores)
if e >= C.save_from and e % C.save_every == 0:
print("Saving checkpoint at epoch={} to {}".format(e, ckpt_fpath))
save_checkpoint(e, model, ckpt_fpath, C)
if e >= C.lr_decay_start_from:
lr_scheduler.step(val_loss['total'])
if val_scores['CIDEr'] > best_val_CIDEr:
best_epoch = e
best_val_CIDEr = val_scores['CIDEr']
best_ckpt_fpath = ckpt_fpath
""" Test with Best Model """
print("\n\n\n[BEST]")
best_model = load_checkpoint(model, best_ckpt_fpath)
best_scores = evaluate(test_iter, best_model, vocab, beam_width=5, beam_alpha=0.)
print("scores: {}".format(best_scores))
for metric in C.metrics:
summary_writer.add_scalar("BEST SCORE/{}".format(metric), best_scores[metric], best_epoch)
save_checkpoint(e, best_model, C.ckpt_fpath_tpl.format("best"), C)
if __name__ == "__main__":
main()
|
the-stack_0_15649 | # coding: utf-8
import pprint
import re
import six
class ListCertificatesRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str',
'app_id': 'str',
'limit': 'int',
'marker': 'str',
'offset': 'int'
}
attribute_map = {
'instance_id': 'Instance-Id',
'app_id': 'app_id',
'limit': 'limit',
'marker': 'marker',
'offset': 'offset'
}
def __init__(self, instance_id=None, app_id=None, limit=None, marker=None, offset=None):
"""ListCertificatesRequest - a model defined in huaweicloud sdk"""
self._instance_id = None
self._app_id = None
self._limit = None
self._marker = None
self._offset = None
self.discriminator = None
if instance_id is not None:
self.instance_id = instance_id
if app_id is not None:
self.app_id = app_id
if limit is not None:
self.limit = limit
if marker is not None:
self.marker = marker
if offset is not None:
self.offset = offset
@property
def instance_id(self):
"""Gets the instance_id of this ListCertificatesRequest.
**参数说明**:实例ID。物理多租下各实例的唯一标识,一般华为云租户无需携带该参数,仅在物理多租场景下从管理面访问API时需要携带该参数。
:return: The instance_id of this ListCertificatesRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this ListCertificatesRequest.
**参数说明**:实例ID。物理多租下各实例的唯一标识,一般华为云租户无需携带该参数,仅在物理多租场景下从管理面访问API时需要携带该参数。
:param instance_id: The instance_id of this ListCertificatesRequest.
:type: str
"""
self._instance_id = instance_id
@property
def app_id(self):
"""Gets the app_id of this ListCertificatesRequest.
**参数说明**:资源空间ID。此参数为非必选参数,存在多资源空间的用户需要使用该接口时,可以携带该参数查询指定资源空间下的证书列表,不携带该参数则会查询该用户下所有证书列表。 **取值范围**:长度不超过36,只允许字母、数字、下划线(_)、连接符(-)的组合。
:return: The app_id of this ListCertificatesRequest.
:rtype: str
"""
return self._app_id
@app_id.setter
def app_id(self, app_id):
"""Sets the app_id of this ListCertificatesRequest.
**参数说明**:资源空间ID。此参数为非必选参数,存在多资源空间的用户需要使用该接口时,可以携带该参数查询指定资源空间下的证书列表,不携带该参数则会查询该用户下所有证书列表。 **取值范围**:长度不超过36,只允许字母、数字、下划线(_)、连接符(-)的组合。
:param app_id: The app_id of this ListCertificatesRequest.
:type: str
"""
self._app_id = app_id
@property
def limit(self):
"""Gets the limit of this ListCertificatesRequest.
**参数说明**:分页查询时每页显示的记录数。 **取值范围**:1-50的整数,默认值为10。
:return: The limit of this ListCertificatesRequest.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListCertificatesRequest.
**参数说明**:分页查询时每页显示的记录数。 **取值范围**:1-50的整数,默认值为10。
:param limit: The limit of this ListCertificatesRequest.
:type: int
"""
self._limit = limit
@property
def marker(self):
"""Gets the marker of this ListCertificatesRequest.
**参数说明**:上一次分页查询结果中最后一条记录的ID,在上一次分页查询时由物联网平台返回获得。分页查询时物联网平台是按marker也就是记录ID降序查询的,越新的数据记录ID也会越大。若填写marker,则本次只查询记录ID小于marker的数据记录。若不填写,则从记录ID最大也就是最新的一条数据开始查询。如果需要依次查询所有数据,则每次查询时必须填写上一次查询响应中的marker值。 **取值范围**:长度为24的十六进制字符串,默认值为ffffffffffffffffffffffff。
:return: The marker of this ListCertificatesRequest.
:rtype: str
"""
return self._marker
@marker.setter
def marker(self, marker):
"""Sets the marker of this ListCertificatesRequest.
**参数说明**:上一次分页查询结果中最后一条记录的ID,在上一次分页查询时由物联网平台返回获得。分页查询时物联网平台是按marker也就是记录ID降序查询的,越新的数据记录ID也会越大。若填写marker,则本次只查询记录ID小于marker的数据记录。若不填写,则从记录ID最大也就是最新的一条数据开始查询。如果需要依次查询所有数据,则每次查询时必须填写上一次查询响应中的marker值。 **取值范围**:长度为24的十六进制字符串,默认值为ffffffffffffffffffffffff。
:param marker: The marker of this ListCertificatesRequest.
:type: str
"""
self._marker = marker
@property
def offset(self):
"""Gets the offset of this ListCertificatesRequest.
**参数说明**:表示从marker后偏移offset条记录开始查询。默认为0,取值范围为0-500的整数。当offset为0时,表示从marker后第一条记录开始输出。限制offset最大值是出于API性能考虑,您可以搭配marker使用该参数实现翻页,例如每页50条记录,1-11页内都可以直接使用offset跳转到指定页,但到11页后,由于offset限制为500,您需要使用第11页返回的marker作为下次查询的marker,以实现翻页到12-22页。 **取值范围**:0-500的整数,默认为0。
:return: The offset of this ListCertificatesRequest.
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this ListCertificatesRequest.
**参数说明**:表示从marker后偏移offset条记录开始查询。默认为0,取值范围为0-500的整数。当offset为0时,表示从marker后第一条记录开始输出。限制offset最大值是出于API性能考虑,您可以搭配marker使用该参数实现翻页,例如每页50条记录,1-11页内都可以直接使用offset跳转到指定页,但到11页后,由于offset限制为500,您需要使用第11页返回的marker作为下次查询的marker,以实现翻页到12-22页。 **取值范围**:0-500的整数,默认为0。
:param offset: The offset of this ListCertificatesRequest.
:type: int
"""
self._offset = offset
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListCertificatesRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_15651 | #! /usr/bin/python3
"""
This module contains implementation of the Observer pattern for State Machine
"""
import copy
import os
from abc import ABC, abstractmethod
from datetime import datetime, timedelta as td
from pathlib import Path
from shutil import copyfile
from time import sleep
from predictor.Statmodel import tsARIMA
from predictor.api import d_models_assembly, fit_models, save_modeles_in_repository, get_list_trained_models, \
predict_model, \
chart_predict, tbl_predict, prepareDataset
from predictor.control import ControlPlane
from predictor.dataset import Dataset
from predictor.demandwidget import DemandWidget
from predictor.utility import exec_time, msg2log, PlotPrintManager, cSFMT, incDateStr, PERIOD_MODEL_RETRAIN
""" State Machine for UpdateChecker"""
SM_CP_CREATE_DATASET = 0
SM_TP_MODEL_TRAINING = 1
SM_CP_UPDATE_DATASET = 2
SM_PP_PREDICTING = 3
SM_TP_MODEL_UPDATING = 4
SM_CP_DATA_WAITING = 5
SM_INVALID_STATE = 6
sm_dict = {SM_CP_CREATE_DATASET: 'CP sends first "GET"-request, receives data and creates a dataset',
SM_TP_MODEL_TRAINING: 'TP trains NN ans STS models and saves them in the repository',
SM_CP_UPDATE_DATASET: 'CP sends "GET"-request to add new data to the dataset',
SM_PP_PREDICTING: 'PP loads model from repository and performs predicting',
SM_TP_MODEL_UPDATING: 'TP updates the models and saves them in the repository',
SM_CP_DATA_WAITING: 'CP waits when data will be available',
SM_INVALID_STATE: 'Invalid State...'}
class ISubject(ABC):
"""
interface to Subject
"""
@abstractmethod
def attach(self, observer) -> None:
"""
attaches the observers to the subject
:param observer:
:return:
"""
pass
@abstractmethod
def detach(self, observer) -> None:
"""
detaches the observers from the subject
:param observer:
:return:
"""
pass
@abstractmethod
def notify(self, dct: object) -> None:
"""
notifies all observers
:type dct: object
:param dct: for parameters passing
:return:
"""
pass
class UpdateProvider(ISubject):
"""
The UpdateProvider ( or Observable in terms of Observer-pattern) implements the simple state-machine with
following states and transfers:
_state = 0 (SM_CP_CREATE_DATASET) - initial state, Control Plane sends the request, receives data and creates
a dataset.
_state = 1 (SM_TP_MODEL_TRAINING) - dataset created. The Train Plane estimates weights of NN (deep learning) and
identifies orders of models and parameters Statistical Models of the Time Series (STS)
_state = 2 (SM_CP_UPDATE_DATASET) - deep learning finished. The Control Plan sends the request to receive new
data and update the dataset.
_state = 3 (SM_PP_PREDICTING) - Predict Plane performs the forecast
_state = 4 (SM_TP_MODEL_UPDATING) - Train Plane updates models. The models are updated no more than once an
hour, that is, after 6 predictions, since the sampling rate is 10 minutes.
_state = 5 (SM_CP_DATA_WAITING) - no new data, wait 10 minutes.
The transfer sequences are
0->1->3->2
|-(no data)---->5
2-(6 times)-------->3
|-(each 7th)--->4
4->3
5->2
"""
_state: int = None
_observers = []
_changed = False
_notification_received: int = 0
_start: int = 1
def __init__(self, f):
_observers = []
self.f = f
self.state = 0 # initial for control plane
return
# getter/setter
def set_state(self, val):
type(self)._state = val
def get_state(self):
return type(self)._state
state = property(get_state, set_state)
def set_changed(self, val):
type(self)._changed = val
def get_changed(self):
return type(self)._changed
changed = property(get_changed, set_changed)
def set_notification_received(self, val):
type(self)._notification_received = val
def get_notification_received(self):
return type(self)._notification_received
notification_received = property(get_notification_received, set_notification_received)
def set_start(self, val):
type(self)._start = val
def get_start(self):
return type(self)._start
start = property(get_start, set_start)
def attach(self, observer) -> None:
msg = '{} UpdateProvider: attached an observer {}'.format(datetime.now().strftime(cSFMT), observer.__str__())
self._observers.append(observer)
msg2log(self.attach.__name__, msg, self.f)
return
def detach(self, observer) -> None:
"""
:type observer: object
"""
msg = '{} UpdateProvider: detached from observer {}'.format(datetime.now().strftime(cSFMT), observer.__str__())
self._observers.remove(observer)
msg2log(self.detach.__name__, msg, self.f)
return
def notify(self, dct: object) -> None:
msg = '{} UpdateProvider: notifying observers..'.format(datetime.now().strftime(cSFMT))
msg2log(self.notify.__name__, msg, self.f)
self.notification_received = 0
for observer in self._observers:
observer.update(self, dct)
msg = '{} UpdateProvider: notifying observers.. The observer {} has notification'.format(
datetime.now().strftime(cSFMT), observer.__str__())
msg2log(self.notify.__name__, msg, self.f)
if self.notification_received > 0:
break
return
class UpdateChecker(UpdateProvider):
def __init__(self, f=None):
super().__init__(f)
self.dct = {'DataFrame': None, 'ControlPlane': None, 'Dataset': None}
def isChanged(self):
return self.changed
def setChanged(self):
self.changed = 1
def clearChanged(self):
self.changed = 0
# state - machine functions
def _sm_CP_CREATE_DATASET(self):
pass
self.notify(self.dct)
PlotPrintManager.isNeedPrintDataset()
def _sm_TP_MODEL_TRAINING(self):
self.notify(self.dct)
def _sm_CP_UPDATE_DATASET(self):
self.notify(self.dct)
def _sm_PP_PREDICTING(self):
ds = self.dct['Dataset']
cp = self.dct['ControlPlane']
if ds.df is None:
message = f"""
The predicting state was got from Descriptor at the program starting.
The state is : {cp.drtDescriptor['state']}
Description : {cp.drtDescriptor['misc']}
Is need to create the dataset from csv-file : {cp.drtDescriptor['csvDataset']}
"""
msg2log(self._sm_PP_PREDICTING.__name__, message, self.f)
ds.csv_path = cp.drtDescriptor['csvDataset']
prepareDataset(cp, ds, self.f)
cp.ts_analysis(ds)
self.dct['ControlPlane'] = cp
self.dct['Dataset'] = ds
self.notify(self.dct)
def _sm_TP_MODEL_UPDATING(self):
self.notify(self.dct)
def _sm_CP_DATA_WAITING(self):
self.state = SM_CP_UPDATE_DATASET
# def drive(self,cp: ControlPlane, ds: Dataset)->None:
def drive(self) -> None:
cp = self.dct['ControlPlane']
ds = self.dct['Dataset']
# self._state = randrange(0, 3)
message = f"""
State-Machine:
Current time : {datetime.now().strftime(cSFMT)}
Current state: {self.state}
Description : {sm_dict[self.state]}
Transfer to next state...
"""
msg2log(self.drive.__name__, message, cp.fa)
statusDescriptor = self.getDescriptor(cp)
if statusDescriptor != 0:
cp.drtDescriptor = {}
cp.drtDescriptor['state'] = SM_CP_CREATE_DATASET
self.state = cp.drtDescriptor['state']
self.start == 0
else:
if self.start == 1:
self.state = cp.drtDescriptor['state'] # previous state
if self.state == SM_CP_CREATE_DATASET and self.start == 1:
# check file csv
if os.path.exists(cp.drtDescriptor['csvDataset']) and os.path.isfile(cp.drtDescriptor['csvDataset']):
self.state = SM_TP_MODEL_TRAINING
else:
self.state = SM_CP_CREATE_DATASET
message = f"""
Dataset is not found...
State-machine transitions to : {self.state}
Description : {sm_dict[self.state]}
The descriptor will be re-written
"""
elif self.state == SM_CP_UPDATE_DATASET and self.start == 1: # for other state state-machine will transition to state Update dataset
self.state = SM_PP_PREDICTING
elif self.state == SM_TP_MODEL_TRAINING and self.start == 1:
self.state = SM_PP_PREDICTING
elif self.state == SM_CP_DATA_WAITING and self.start == 1:
self.state = SM_CP_UPDATE_DATASET
elif self.state == SM_TP_MODEL_UPDATING and self.start == 1:
self.state = SM_CP_UPDATE_DATASET
elif self.state == SM_PP_PREDICTING and self.start == 1:
self.state = SM_CP_UPDATE_DATASET
message = f"""
Previous state : {self.state}
The following info received from saved descriptor
Description : {sm_dict[self.state]}
Dataset csv-file : {cp.drtDescriptor['csvDataset']}
Dataset typeID : {cp.drtDescriptor['typeID']}
Dataset title : {cp.drtDescriptor['title']}
Last date in dataset : {cp.drtDescriptor['lastTime']}
State : {cp.drtDescriptor['state']}
Miscelanos : {cp.drtDescriptor['misc']}
State-machine transitions to : {self.state}
Description : {sm_dict[self.state]}
"""
msg2log(self.drive.__name__, message, self.f)
#
self.start = 0
sdtn = datetime.now().strftime(cSFMT)
message = f"""
Current time : {sdtn}
Starting : {sm_dict[self.state]}
"""
msg2log(self.drive.__name__, message, self.f)
if self.state != SM_CP_CREATE_DATASET:
if self.dct["Dataset"] is None or self.dct["Dataset"].df is None:
self.state = SM_CP_CREATE_DATASET
message = f"""
The Dataset is empty.
Is need to re-start the state-machine
"""
msg2log(self.drive.__name__, message, self.f)
# state machine
if self.state == SM_CP_CREATE_DATASET:
msg2log(self.drive.__name__, '{} :{}\n'.format(self.state, sm_dict[self.state]), cp.fa)
self._sm_CP_CREATE_DATASET()
elif self.state == SM_TP_MODEL_TRAINING:
msg2log(self.drive.__name__, '{} :{}\n'.format(self.state, sm_dict[self.state]), cp.fa)
self._sm_TP_MODEL_TRAINING()
elif self.state == SM_CP_UPDATE_DATASET:
msg2log(self.drive.__name__, '{} :{}\n'.format(self.state, sm_dict[self.state]), cp.fa)
self._sm_CP_UPDATE_DATASET()
elif self.state == SM_PP_PREDICTING:
msg2log(self.drive.__name__, '{} :{}\n'.format(self.state, sm_dict[self.state]), cp.fa)
self._sm_PP_PREDICTING()
elif self.state == SM_TP_MODEL_UPDATING:
msg2log(self.drive.__name__, '{} :{}\n'.format(self.state, sm_dict[self.state]), cp.fa)
self._sm_TP_MODEL_UPDATING()
elif self.state == SM_CP_DATA_WAITING:
msg2log(self.drive.__name__, '{} :{}\n'.format(self.state, sm_dict[self.state]), cp.fa)
self._sm_CP_DATA_WAITING()
else:
msg = "Invalid state of state-machine"
msg2log(self.drive.__names__, msg, self.f)
return
return
def getDescriptor(self, cp):
'''
This method retrieves a descriptor in supposed path. The folder name and descriptor file received for
configuration ( cp.folder_descriptor and cp.file_descriptor).
:param cp:
:return: 0 - success
1- descriptor folder not found. The empty folder created. The descriptor state sets to STATE_START.
2 - no descriptor into folder. The descriptor state sets to STATE_START
-1- descriptor folder can not be loaded.
'''
supposed_path_to_descriptor_folder = Path(cp.folder_descriptor)
supposed_path_to_descriptor = Path(cp.folder_descriptor) / Path(cp.file_descriptor)
if (not os.path.exists(supposed_path_to_descriptor_folder)) or (
not os.path.isdir(supposed_path_to_descriptor_folder)):
msg = "No descriptors folder {}".format(supposed_path_to_descriptor_folder)
msg2log(self.getDescriptor.__name__, msg, self.f)
Path(supposed_path_to_descriptor_folder).mkdir(parents=True, exist_ok=True)
msg = "The descriptors folder created {}".format(supposed_path_to_descriptor_folder)
msg2log(self.getDescriptor.__name__, msg, self.f)
cp.drtDescriptor['state'] = SM_CP_CREATE_DATASET
cp.drtDescriptor['misc'] = sm_dict[SM_CP_CREATE_DATASET]
return 1
elif (not os.path.exists(supposed_path_to_descriptor)) or (not os.path.isfile(supposed_path_to_descriptor)):
msg = "No descriptors into folder {}".format(supposed_path_to_descriptor_folder)
msg2log(self.getDescriptor.__name__, msg, self.f)
cp.drtDescriptor['state'] = SM_CP_CREATE_DATASET
cp.drtDescriptor['misc'] = sm_dict[SM_CP_CREATE_DATASET]
return 2
else:
if not cp.load_descriptor():
msg = "The descriptors cannot be loaded {}".format(supposed_path_to_descriptor)
msg2log(self.getDescriptor.__name__, msg, self.f)
raise NameError(msg)
return -1
return 0
def parseDescriptor(self, cp):
'''
This method returns the state for Observable
:param cp:
:return:
'''
if cp.drtDescriptor['state'] == SM_CP_CREATE_DATASET:
pass
elif cp.drtDescriptor['state'] == SM_TP_MODEL_TRAINING:
pass
elif cp.drtDescriptor['state'] == SM_CP_UPDATE_DATASET:
pass
elif cp.drtDescriptor['state'] == SM_PP_PREDICTING:
pass
elif cp.drtDescriptor['state'] == SM_TP_MODEL_UPDATING:
pass
elif cp.drtDescriptor['state'] == SM_CP_DATA_WAITING:
pass
else:
pass
return
def setDescriptor(self, cp):
cp.save_descriptor()
return
class IObserver(ABC):
"""
observer's interface
"""
@abstractmethod
def update(self, subject: ISubject, dct) -> None:
pass
class ControlPlaneObserver(IObserver):
'''
This concrete Observer class listens a notification from Observable UpdateProvider.
A dataset being created in the real time is saved in folowing folder:
<cp.folder_rt_datasets>/<type_id>, where type_id gets from the header of the requested widget
'''
def __init__(self, f=None):
self.f = f
self.wait_index = 0
self.wait_max_index = 2
def update(self, subject, dct) -> None:
msg = '{} {} : Reached to the event.'.format(datetime.now().strftime(cSFMT), self.__str__())
msg2log(self.update.__name__, msg, self.f)
if subject.state == SM_CP_CREATE_DATASET:
subject.notification_received += 1
self.createDataset(dct)
self.updateControlPlane(dct)
cp = dct['ControlPlane']
dmwdg = dct['DataFrame']
cp.drtDescriptor["csvDataset"] = cp.csv_path
cp.drtDescriptor['typeID'] = dmwdg.type_id
cp.drtDescriptor['title'] = dmwdg.title
cp.drtDescriptor['lastTime'] = dmwdg.last_time
cp.drtDescriptor['state'] = SM_CP_CREATE_DATASET
cp.drtDescriptor['misc'] = sm_dict[SM_CP_CREATE_DATASET]
cp.save_descriptor()
cp.state = SM_TP_MODEL_TRAINING
dct['ControlPlane'] = cp
subject.state = SM_TP_MODEL_TRAINING
message = f"""
Finished state : {cp.drtDescriptor['state']}
State descriptor : {sm_dict[cp.drtDescriptor['state']]}
Next state : {subject.state}
Next state descriptor : {sm_dict[subject.state]}
"""
msg2log(self.update.__name__, message, self.f)
return
elif subject.state == SM_CP_UPDATE_DATASET:
subject.notification_received += 1
status = self.updateDataset(dct)
cp = dct['ControlPlane']
cp.drtDescriptor["csvDataset"] = cp.csv_path
cp.drtDescriptor['state'] = SM_CP_UPDATE_DATASET
cp.drtDescriptor['misc'] = sm_dict[SM_CP_UPDATE_DATASET]
dct['ControlPlane'] = cp
if status == 0:
self.wait_index = 0
self.updateControlPlane(dct)
dmwdg = dct['DataFrame']
cp = dct['ControlPlane']
cp.drtDescriptor['lastTime'] = dmwdg.last_time
cp.state = SM_PP_PREDICTING
subject.state = SM_PP_PREDICTING
cp.save_descriptor()
dct['DataFrame'] = dmwdg
dct['ControlPlane'] = cp
else:
cp = dct['ControlPlane']
cp.state = SM_CP_DATA_WAITING
subject.state = SM_CP_DATA_WAITING
cp.save_descriptor()
dct['ControlPlane'] = cp
message = f"""
Finished state : {cp.drtDescriptor['state']}
State descriptor : {sm_dict[cp.drtDescriptor['state']]}
Next state : {subject.state}
Next state descriptor : {sm_dict[subject.state]}
"""
msg2log(self.update.__name__, message, self.f)
return
elif subject.state == SM_CP_DATA_WAITING:
subject.notification_received += 1
cp = dct['ControlPlane']
cp.drtDescriptor['state'] = SM_CP_DATA_WAITING
cp.drtDescriptor['misc'] = sm_dict[SM_CP_DATA_WAITING]
sleep(cp.discret * 30)
self.wait_index += 1
if self.wait_index > self.wait_max_index:
msg = "Can not get an update data for time series after {} attempts.\n".format(self.wait_max_index)
msg2log(self.update.__name__, msg, self.f)
cp.drtDescriptor['state'] = SM_INVALID_STATE
cp.drtDescriptor['misc'] = sm_dict[SM_INVALID_STATE]
subject.state = SM_INVALID_STATE
message = f"""
Finished state : {cp.drtDescriptor['state']}
State descriptor : {sm_dict[cp.drtDescriptor['state']]}
Next state : {subject.state}
Next state descriptor : {sm_dict[subject.state]}
State-machine is stopping....
"""
msg2log(self.update.__name__, message, self.f)
cp.save_descriptor()
dct['ControlPlane'] = cp
return
cp.save_descriptor()
subject.state = SM_CP_UPDATE_DATASET
message = f"""
Finished state : {cp.drtDescriptor['state']}
State descriptor : {sm_dict[cp.drtDescriptor['state']]}
Next state : {subject.state}
Next state descriptor : {sm_dict[subject.state]}
"""
msg2log(self.update.__name__, message, self.f)
dct['ControlPlane'] = cp
return
return
def createDataset(self, dct):
pass
scaled_data = False
start_time = "2020-08-30 00:00:00"
end_time_t = datetime.now()
end_time = end_time_t.strftime(cSFMT)
start_time_t = end_time_t - td(days=ControlPlane.get_ts_duration_days())
start_time = start_time_t.strftime(cSFMT)
dmwdg = DemandWidget(scaled_data, start_time, end_time, 'hour', None, None, self.f)
dmwdg.set_url()
print(dmwdg.url)
requested_widget = dmwdg.getDemandRT(None)
print("Requested widget has type {}".format(type(requested_widget)))
if ControlPlane.get_modeImbalance():
(imbalance_dset, programmed_dst, demand_dset) = ControlPlane.get_modeImbalanceNames()
cp = dct['ControlPlane']
cp.rcpower_dset = imbalance_dset
dct['ControlPlane'] = cp
dmwdg.plot_ts(os.getcwd(), False)
dmwdg.autocorr_show(os.getcwd(), False)
dct['DataFrame'] = dmwdg # 'ControlPlane': cp, 'Dataset': ds]
return
def updateDataset(self, dct) -> int:
nRet = 1
cp = dct['ControlPlane']
cp.csv_path = cp.drtDescriptor["csvDataset"]
scaled_data = False
start_time = incDateStr(cp.drtDescriptor['lastTime'], minutes=cp.discret)
end_time = datetime.now().strftime(cSFMT)
dmwdg = DemandWidget(scaled_data, start_time, end_time, 'hour', None, None, self.f)
dmwdg.set_url()
print(dmwdg.url)
requested_widget = dmwdg.getDemandRT(None)
if requested_widget is None:
nRet = -1
if ControlPlane.get_modeImbalance():
(imbalance_dset, programmed_dst, demand_dset) = ControlPlane.get_modeImbalanceNames()
cp.rcpower_dset = imbalance_dset
if dmwdg.ts_size > 0:
df_new = dmwdg.concat_with_df_from_csv(cp.drtDescriptor["csvDataset"])
bak_csv_str = str(cp.drtDescriptor["csvDataset"]).replace('.csv', "_" +
DemandWidget.ISO8601toDateTime(
cp.drtDescriptor['lastTime']).strftime(
"%Y_%m_%d_%H_%M")
+ '.bak.csv')
bak_csv_file = Path(bak_csv_str)
# Path(cp.drtDescriptor["csvDataset"]).copy(bak_csv_file)
copyfile(str(Path(cp.drtDescriptor["csvDataset"])), str(bak_csv_file))
message = f"""
Dataset aging...
Size of updated dataset : {len(df_new)}
"""
msg2log(self.updateDataset.__name__, message, self.f)
df_new.drop(df_new.index[:len(dmwdg.df)],
inplace=True) # aging mechanizm: drop len(dmwdg.df) first rows in dataset
message = f"""
Dataset has been aged.
Size of the dataset : {len(df_new)}
"""
msg2log(self.updateDataset.__name__, message, self.f)
df_new.to_csv(Path(cp.drtDescriptor["csvDataset"]), index=False)
dmwdg.df = df_new
dmwdg.ts_size = len(df_new)
dct['DataFrame'] = dmwdg
dct['ControlPlane'] = cp
nRet = 0
return nRet
def updateControlPlane(self, dct):
dmwdg = dct['DataFrame']
cp = dct['ControlPlane']
ds = dct['Dataset']
cp.dt_dset = dmwdg.names[0]
if ControlPlane.get_modeImbalance():
cp.rcpower_dset = dmwdg.names[4]
else:
cp.rcpower_dset = dmwdg.names[1]
suffics = ".csv"
# file_csv = Path(cp.folder_control_log, cp.rcpower_dset + "_" +
# Path(__file__).stem).with_suffix(suffics)
dataset_folder = Path(cp.folder_rt_datasets) / str(dmwdg.one_word_title)
Path(dataset_folder).mkdir(parents=True, exist_ok=True)
file_csv = Path(dataset_folder, cp.rcpower_dset + "_" + Path(__file__).stem).with_suffix(suffics)
cp.csv_path = dmwdg.to_csv(file_csv)
ds = Dataset(cp.csv_path, cp.dt_dset, cp.rcpower_dset, cp.discret, cp.fc) # create dataset
msg = "Dataset (csv-file) created"
msg2log(self.updateControlPlane.__name__, msg, self.f)
prepareDataset(cp, ds, cp.fc)
cp.ts_analysis(ds)
dct["Dataset"] = ds
dct['ControlPlane'] = cp
dct['DataFrame'] = dmwdg
return
class TrainPlaneObserver(IObserver):
'''
This concrete Observer class listens a notification from Observable UpdateProvider.
'''
def __init__(self, f=None):
self.f = f
def update(self, subject, dct) -> None:
msg = '{} {} : Reached to the event. The training models is carried out'.format(
datetime.now().strftime(cSFMT), self.__str__())
if subject.state == SM_TP_MODEL_TRAINING:
subject.notification_received += 1
msg2log(self.update.__name__, msg, self.f)
self.TrainModels(dct)
if subject.state == SM_TP_MODEL_UPDATING:
subject.notification_received += 1
msg2log(self.update.__name__, msg, self.f)
self.UpdateModels(dct)
subject.state = SM_PP_PREDICTING
cp = dct['ControlPlane']
message = f"""
Finished state : {cp.drtDescriptor['state']}
State descriptor : {sm_dict[cp.drtDescriptor['state']]}
Next state : {subject.state}
Next state descriptor : {sm_dict[subject.state]}
"""
msg2log(self.update.__name__, message, self.f)
dct['ControlPlane'] = cp
return
def TrainModels(self, dct) -> None:
# dmwdg = dct['DataFrame']
cp = dct['ControlPlane']
ds = dct['Dataset']
msg = '\nThe deep learning (DL) and statistical time series (STS) estimation started...\n\n'
msg2log(self.TrainModels.__name__, msg, self.f)
drive_train(cp, ds)
cp.drtDescriptor['modelRepository'] = Path(cp.path_repository) / Path(cp.rcpower_dset)
cp.drtDescriptor['state'] = SM_TP_MODEL_TRAINING # current state
cp.drtDescriptor['misc'] = sm_dict[SM_TP_MODEL_TRAINING]
cp.save_descriptor()
msg = "\nThe DL and STS finished.\n\n"
msg2log(self.TrainModels.__name__, msg, self.f)
dct['ControlPlane'] = cp
dct['Dataset'] = ds
return
def UpdateModels(self, dct) -> None:
cp = dct['ControlPlane']
ds = dct['Dataset']
msg = '\nDP and STS model re-estimation started ...\n\n'
msg2log(self.TrainModels.__name__, msg, self.f)
tsARIMA.set_ARIMA((-1, -1, -1))
tsARIMA.set_SARIMA((-1, -1, -1, -1, -1, -1))
cp.ts_analysis(ds)
drive_train(cp, ds)
cp.drtDescriptor['modelRepository'] = Path(cp.path_repository) / Path(cp.rcpower_dset)
cp.drtDescriptor['state'] = SM_TP_MODEL_UPDATING # current state
cp.drtDescriptor['misc'] = sm_dict[SM_TP_MODEL_UPDATING]
cp.save_descriptor()
msg = "\nDP and STS model re-estimation finished.\n\n"
msg2log(self.TrainModels.__name__, msg, self.f)
dct['ControlPlane'] = cp
dct['Dataset'] = ds
return
class PredictPlaneObserver(IObserver):
'''
This concrete Observer class listens a notification from Observable UpdateProvider.
'''
def __init__(self, f=None):
self.f = f
self.predict_index = 0
self.predict_index_max = PERIOD_MODEL_RETRAIN
def update(self, subject, dct) -> None:
msg = '{} {} : Reached to the event. The predict by models is carried out'.format(
datetime.now().strftime(cSFMT), self.__str__())
if subject.state == SM_PP_PREDICTING:
subject.notification_received += 1
msg2log(self.update.__name__, msg, self.f)
self.PredictByModels(dct)
else:
msg2log(self.update.__name__, "Inconsistent state on predicting, we continue to update dataset", self.f)
subject.state = SM_CP_UPDATE_DATASET
cp = dct['ControlPlane']
if self.predict_index % self.predict_index_max == 0:
subject.state = SM_TP_MODEL_UPDATING # next state
message = f"""
Finished state : {cp.drtDescriptor['state']}
State descriptor : {sm_dict[cp.drtDescriptor['state']]}
Next state : {subject.state}
Next state descriptor : {sm_dict[subject.state]}
"""
msg2log(self.update.__name__, message, self.f)
dct['ControlPlane'] = cp
return
def PredictByModels(self, dct) -> None:
pass
cp = dct['ControlPlane']
ds = dct['Dataset']
msg2log(self.PredictByModels.__name__, '\nThe prediction started...\n', self.f)
drive_predict(cp, ds)
self.predict_index += 1
cp.drtDescriptor['state'] = SM_PP_PREDICTING
cp.drtDescriptor['misc'] = sm_dict[SM_PP_PREDICTING]
cp.save_descriptor()
msg2log(self.PredictByModels.__name__, '\nThe prediction finished.\n', self.f)
dct['ControlPlane'] = cp
dct['Dataset'] = ds
return
@exec_time
def drive_auto(cp, ds):
'''
This drive_auto() function manages data processing flow in auto real-time mode.
:param cp: ControlPlan object (class implementation
:param ds: Dataset object
:return:
'''
pass
subject = UpdateChecker(cp.fa)
subject.setState = 0
observer_a = ControlPlaneObserver(cp.fa)
subject.attach(observer_a)
observer_b = TrainPlaneObserver(cp.fa)
subject.attach(observer_b)
observer_c = PredictPlaneObserver(cp.fa)
subject.attach(observer_c)
start_time = datetime.now()
msg = "\nFirst time to run UpdateChecker {}\n".format(start_time.strftime(cSFMT))
msg2log(drive_auto.__name__, msg, cp.fa)
subject.dct["ControlPlane"] = cp
subject.dct["dataset"] = ds
nrun = 1
next_run_time = start_time + td(minutes=cp.discret)
while 1:
# subject.drive(cp,ds)
subject.drive()
nrun += 1
curr_time = datetime.now()
while curr_time < next_run_time:
deltat = next_run_time - curr_time
sleep_in_sec = deltat.seconds
# some transitions are immediately carried out
if subject.state == SM_TP_MODEL_TRAINING or subject.state == SM_PP_PREDICTING or subject.state == SM_TP_MODEL_UPDATING:
sleep_in_sec = 0
next_run_time = curr_time
message = f"""
Current time : {curr_time.strftime(cSFMT)}
Code current state : {subject.state}
Current state : {sm_dict[subject.state]}
Next state-machine transition number : {nrun}
Wait till next transition, sec : {sleep_in_sec}
"""
if sleep_in_sec > 0:
msg2log(drive_auto.__name__, message, cp.fa)
sleep(sleep_in_sec)
curr_time = datetime.now()
continue
next_run_time = next_run_time + td(minutes=cp.discret)
message = f"""
Current time : {curr_time.strftime(cSFMT)}
State-machine should be run at : {next_run_time.strftime(cSFMT)}
"""
msg2log(drive_auto.__name__, message, cp.fa)
continue
subject.detach(observer_a)
subject.detach(observer_b)
subject.detach(observer_c)
return
@exec_time
def drive_train(cp, ds):
""" This function performs the following actions when training the model on the given dataset:
-checks the list of used models;
-creates the models from templates;
-updates the model parameters like as n_steps and others;
-compiles models;
-performs fitting models on given training and evaluating datasets;
-saves trained models in the repository/
:param cp: ControlPlane object
:param ds: dataset object
:return:
"""
d_models = {}
for keyType, valueList in cp.all_models.items():
print('{}->{}'.format(keyType, valueList)) # MLP->[(0,'mlp_1'),(1,'mlp_2)], CNN->[(2, 'univar_cnn')]
# LSTM->[(3, 'vanilla_lstm'), (4, 'stacked_lstm'), (5, 'bidir_lstm'), (6, 'cnn_lstm')]
status = d_models_assembly(d_models, keyType, valueList, cp, ds)
print(d_models)
if cp.fc is not None:
cp.fc.write("\n Actual Neuron Net and Statistical Time Series Models\n")
for k, v in d_models.items():
cp.fc.write("{} - > {}\n".format(k, v))
histories = fit_models(d_models, cp, ds)
save_modeles_in_repository(d_models, cp)
return
@exec_time
def drive_predict(cp, ds):
""" This function performs the out-of-sample one-step forecast:
-checks existing models ( in repository);
-loads models;
-predicts on given numbe periods for foreast;
-logs the predict results.
:param cp: ControlPlane object
:param ds: dataset object
:return:
"""
if ds is None or ds.rcpower is None or len(ds.rcpower) == 0:
msg2log(drive_predict.__name__, " The dataset is empty now. The predict step is skipping", cp.fa)
return
ds.data_for_predict = cp.n_steps
ds.predict_date = None
predict_history = copy.copy(ds.data_for_predict)
dict_model = get_list_trained_models(cp)
n_predict = 4
dict_predict = predict_model(dict_model, cp, ds, n_predict)
# title = '{} (Short Term Predict on {} steps)'.format(cp.rcpower_dset, n_predict)
title = 'Short Term Predict'
chart_predict(dict_predict, n_predict, cp, ds, title, cp.rcpower_dset)
tbl_predict(dict_predict, n_predict, cp, ds, title)
cp.forecast_number_step = cp.forecast_number_step + 1
if cp.predictDF is None:
cp.createPredictDF(dict_predict, cp.getPredictDate(ds))
else:
cp.updatePreductDF(dict_predict, cp.getPredictDate(ds), cp.getlastReceivedData(ds))
cp.logPredictDF()
cp.plotPredictDF()
return
def drive_control(cp, ds):
pass
|
the-stack_0_15652 | # coding: utf-8
"""
Shutterstock API Reference
The Shutterstock API provides access to Shutterstock's library of media, as well as information about customers' accounts and the contributors that provide the media. # noqa: E501
OpenAPI spec version: 1.0.11
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class InstrumentList(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'data': 'list[str]'
}
attribute_map = {
'data': 'data'
}
def __init__(self, data=None): # noqa: E501
"""InstrumentList - a model defined in Swagger""" # noqa: E501
self._data = None
self.discriminator = None
if data is not None:
self.data = data
@property
def data(self):
"""Gets the data of this InstrumentList. # noqa: E501
List of instruments # noqa: E501
:return: The data of this InstrumentList. # noqa: E501
:rtype: list[str]
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this InstrumentList.
List of instruments # noqa: E501
:param data: The data of this InstrumentList. # noqa: E501
:type: list[str]
"""
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(InstrumentList, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InstrumentList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_15653 | import gurobipy as gurobi
import numpy as np
from copy import deepcopy as copy
import matplotlib.pyplot as plt
# success !
# need test for multi-area-multi-time
# TODO: why the ub and lb doesn't apply to the voltage_square ????
# TODO: p2 always be zero, don't know why?
# first i change the generator cost
# second i change the load_conference
def print_info(info):
print(info.this_voltage_square)
print(info.that_voltage_square)
print(info.power_flow)
print(info.react_flow)
print(info.this_node_pressure)
print(info.that_node_pressure)
print(info.gas_flow_in)
print(info.gas_flow_out)
# TODO : get_dual 里面原始的代码可能是错的
class connection_line_info:
def __init__(self):
self.this_voltage_square = 0.0
self.that_voltage_square = 0.0
self.power_flow = 0.0 # out -> to the connected node
self.react_flow = 0.0 # out -> to the connected node
self.this_node_pressure = 0.0
self.that_node_pressure = 0.0
self.gas_flow_in = 0.0
self.gas_flow_out = 0.0
# branch flow model ! success
T = 4
power_price = [[5] * 5] * 5
g_link = 1
g_connection = [
[1],
[0]
]
player_num = 2
g_tao = 100
PUNISH = 30 * 3
OUTER_LOOP = 700
OUTER_LOOP = [800] + [800, 500, 500] + [500] * 50
PCCP_COUNT = 1
# TODO: one_line_0_with_1 one_line_0_with_2
# [ [ [info_0 info_1 ... info_T], [info_0 info_1 ... info_T], ] <===== area_0
# one_line_1_with_0 one_line_1_with_2 one_line_1_with_3
# [ [info_0 info_1 ... info_T], [info_0 info_1 ... info_T], [info_0 info_1 ... info_T],] <===== area_1
# one_line_2_with_0 one_line_2_with_1
# [ [info_0 info_1 ... info_T], [info_0 info_1 ... info_T],] ] <===== area_2
# g_info[ 0 ] [ 0 ] [ 0 ]
# area_0 0_with_i time_0 ====> connection_line_info
g_info = [
[
[
connection_line_info()
for ____time in range(T)
] for ____line in range(len(g_connection[____area]))
] for ____area in range(len(g_connection))
]
# TODO: g_lam format:
# [ [line1 [<> <> <> <> <> <> <> <>] | [<> <> <> <> <> <> <> <>] |...T...],
# [line2 [<> <> <> <> <> <> <> <>] | [<> <> <> <> <> <> <> <>] |...T...],
# [line3 [<> <> <> <> <> <> <> <>] | [<> <> <> <> <> <> <> <>] |...T...],
# ...
# [linen [<> <> <> <>] | [<> <> <> <>] |...T...]]
# g_lam [0] [0] = [x, x, x, x , x, x, x, x]
# 0-line 0-time this_v that_v power_f_in react_f_in this_pressure that_pressure flow_IN flow_OUT
g_lam = [
[
[1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]
for _____time in range(T)
] for ______line in range(g_link)
]
# g_lam_index format
# [ area1[line, line, line ...],
# area2[line, line, line ...],
# ...]
# g_lam_index [0]
# all index for 0-area
g_lam_index = [
[0],
[0]
]
g_gas_price_aux = [1, -1, 1]
G_K = 2
abcd = []
class PowerNet:
def __init__(self, system_info, node_info, line_info, gas_node_info, gas_line_info):
self.index = system_info['index']
self.T = system_info['T']
# ------------- generator of non-gas ----------------------
self.gen_num = node_info['gen_num'] # add virtual node at last as the connected node
self.gen_index = node_info['gen_index']
self.gen_power_min = node_info['gen_power_min']
self.gen_power_max = node_info['gen_power_max']
self.gen_react_min = node_info['gen_react_min']
self.gen_react_max = node_info['gen_react_max']
self.gen_cost_a = node_info['gen_cost_a']
self.gen_cost_b = node_info['gen_cost_b']
self.gen_cost_c = node_info['gen_cost_c']
# ---------------- power bus node ---------------------------
self.bus_num = node_info['bus_num'] # add virtual node at last
self.bus_voltage_min = node_info['bus_voltage_min']
self.bus_voltage_max = node_info['bus_voltage_max']
# ----------------- power load -------------------------------
self.load_num = node_info['load_num']
self.load_index = node_info['load_index']
self.load_power_min = node_info['load_power_min']
self.load_power_max = node_info['load_power_max']
self.load_react_min = node_info['load_react_min']
self.load_react_max = node_info['load_react_max']
# --------------- power connection info -----------------------
self.bus_num_outside = node_info['bus_num_outside']
self.connection_area = system_info['connection_area']
self.connection_index = node_info['connection_index']
# ------------------- power line info -------------------------
self.line_num = line_info['line_num']
self.line_current_capacity = line_info['line_current_capacity']
self.line_start_point = line_info['line_start_point']
self.line_end_point = line_info['line_end_point']
self.line_resistance = line_info['line_resistance']
self.line_reactance = line_info['line_reactance']
# ------------------ gas node info -------------------------
self.gas_node_num = gas_node_info['gas_node_num']
self.node_pressure_min = gas_node_info['node_pressure_min']
self.node_pressure_max = gas_node_info['node_pressure_max']
# ------------------ gas well info -------------------------
self.well_num = gas_node_info['gas_well_num']
self.well_index = gas_node_info['well_index'] # [0,0,4,5]
self.well_output_min = gas_node_info['well_output_min']
self.well_output_max = gas_node_info['well_output_max']
# ------------------ gas load info -------------------------
self.gas_load_index = gas_node_info['load_index']
self.gas_load_min = gas_node_info['gas_load_min']
self.gas_load_max = gas_node_info['gas_load_max']
self.gas_load_num = gas_node_info['gas_load_num']
# ----------------- gas generator --------------------------
self.gen_gas_num = gas_node_info['gen_gas_num']
self.gen_gas_index = gas_node_info['gen_gas_index']
self.gen_gas_index_power = gas_node_info['gen_gas_index_power']
self.gen_gas_min = gas_node_info['gen_gas_min']
self.gen_gas_max = gas_node_info['gen_gas_max']
self.gen_gas_efficiency = gas_node_info['gen_gas_efficiency']
# ----------------- gas line info -------------------------
self.weymouth = gas_line_info['weymouth'] # for easy, it should contain all line(include active line)
self.gas_line_num = gas_line_info['gas_line_num']
self.gas_line_start_point = gas_line_info['gas_line_start_point'] # gas flow out
self.gas_line_end_point = gas_line_info['gas_line_end_point'] # gas flow in
self.gas_line_pack_coefficient = gas_line_info['gas_line_pack_coefficient']
self.gas_line_pack_initial = gas_line_info['gas_line_pack_initial']
self.gas_line_active = gas_line_info['gas_line_active']
self.gas_flow_in_max = gas_line_info['gas_flow_in_max']
self.gas_flow_out_max = gas_line_info['gas_flow_out_max']
# ------------------- gas compressor info ------------------
self.compressor_num = gas_line_info['compressor_num']
self.compressor_start_point = gas_line_info['compressor_start_point']
self.compressor_end_point = gas_line_info['compressor_end_point']
self.compressor_coefficient = gas_line_info['compressor_coefficient']
self.compressor_max_flow = gas_line_info['compressor_max_flow']
self.compressor_energy_consumption = gas_line_info['compressor_energy_consumption']
# ----------------------------------------gas information end
# ------------------model------------------------------------
self.model = gurobi.Model()
self.basic_objective = None
self.addition_objective = None
self.objective = None
self.constrain_update = []
self.objs = []
self.lams = []
self.dual = []
self.dual_addition = 0
self.norm_addition = 0
# -------------------- power system var -------------------
self.power_gen = None
self.react_gen = None
self.voltage_square = None
self.line_current_square = None
self.line_power_flow = None
self.line_react_flow = None
self.power_load = None
self.react_load = None
# -------------------- gas system var ----------------------
self.node_pressure = None
self.well_output = None
self.gas_load = None
self.gen_gas_power = None
self.gas_flow_in = None
self.gas_flow_out = None
self.linepack = None
self.compressor_out = None
self.compressor_in = None
self.gas_source = None
self.pccp = None
# ------------------------ old info -------------------------
self.info = [
[
connection_line_info()
for _ in range(self.T)
] for __ in range(len(self.connection_area))
]
# TODO: self.info [0] [0]
# self.index_with_i at time_0
self.old_value = [
[
connection_line_info()
for _ in range(self.T)
] for __ in range(len(self.connection_area))
]
# TODO: self.old_value [0] [0]
# self.index_with_i at time_0
self.gas_flow_in_old = [
[
0.2 for _ in range(self.T)
] for __ in range(self.gas_line_num)
]
self.gas_flow_out_old = [
[
0.2 for _ in range(self.T)
] for __ in range(self.gas_line_num)
]
self.node_pressure_old = [
[
0.2 for _ in range(self.T)
] for __ in range(self.gas_node_num)
]
# ---------- power system ---------------------------------------
def power_gen_connected_with(self, node):
result = np.where(np.array(self.gen_index) == node)
if result[0].size == 0:
return np.array([[0] * self.T])
result_list = []
for i in result[0]:
per = []
for time in range(self.T):
per.append(self.power_gen[i, time])
result_list.append(per)
return np.array(result_list)
def react_gen_connected_with(self, node):
result = np.where(np.array(self.gen_index) == node)
if result[0].size == 0:
return np.array([[0] * self.T])
result_list = []
for i in result[0]:
per = []
for time in range(self.T):
per.append(self.react_gen[i, time])
result_list.append(per)
return np.array(result_list)
def load_power_connected_with(self, node):
result = np.where(np.array(self.load_index) == node)
if result[0].size == 0:
return np.array([[0] * self.T])
result_list = []
for i in result[0]:
per = []
for time in range(self.T):
per.append(self.power_load[i, time])
result_list.append(per)
return np.array(result_list)
def load_react_connected_with(self, node):
result = np.where(np.array(self.load_index) == node)
if result[0].size == 0:
return np.array([[0] * self.T])
result_list = []
for i in result[0]:
per = []
for time in range(self.T):
per.append(self.react_load[i, time])
result_list.append(per)
return np.array(result_list)
# power flow in/out of the node
def power_flow_in_connected_with(self, node):
result = np.where(np.array(self.line_end_point) == node)
if result[0].size == 0:
return np.array([[0] * self.T])
result_list = []
for i in result[0]:
per = []
for time in range(self.T):
per.append(self.line_power_flow[i, time])
result_list.append(per)
return np.array(result_list)
def power_flow_out_connected_with(self, node):
result = np.where(np.array(self.line_start_point) == node)
if result[0].size == 0:
return np.array([[0] * self.T])
result_list = []
for i in result[0]:
per = []
for time in range(self.T):
per.append(self.line_power_flow[i, time])
result_list.append(per)
return np.array(result_list)
def raect_flow_in_connected_with(self, node):
result = np.where(np.array(self.line_end_point) == node)
if result[0].size == 0:
return np.array([[0] * self.T])
result_list = []
for i in result[0]:
per = []
for time in range(self.T):
per.append(self.line_react_flow[i, time])
result_list.append(per)
return np.array(result_list)
def react_flow_out_connected_with(self, node):
result = np.where(np.array(self.line_start_point) == node)
if result[0].size == 0:
return np.array([[0] * self.T])
result_list = []
for i in result[0]:
per = []
for time in range(self.T):
per.append(self.line_react_flow[i, time])
result_list.append(per)
return np.array(result_list)
def current_in_connected_with(self, node):
result = np.where(np.array(self.line_end_point) == node)
if result[0].size == 0:
return np.array([[0] * self.T])
result_list = []
for i in result[0]:
per = []
for time in range(self.T):
per.append(self.line_current_square[i, time])
result_list.append(per)
return np.array(result_list)
def resistance_in_connected_with(self, node):
result = np.where(np.array(self.line_end_point) == node)
if result[0].size == 0:
return np.array([[0] * self.T])
result_list = []
for i in result[0]:
per = []
for time in range(self.T):
per.append(self.line_resistance[i])
result_list.append(per)
return np.array(result_list)
def reactance_in_connected_with(self, node):
result = np.where(np.array(self.line_end_point) == node)
if result[0].size == 0:
return np.array([[0] * self.T])
result_list = []
for i in result[0]:
per = []
for time in range(self.T):
per.append(self.line_reactance[i])
result_list.append(per)
return np.array(result_list)
# ---------- gas system -----------------------------------------
def well_connected_with(self, node):
result = np.where(np.array(self.well_index) == node)
if result[0].size == 0:
return np.array([[0] * self.T])
result_list = []
for i in result[0]:
per_well = []
for time in range(self.T):
per_well.append(self.well_output[i, time])
result_list.append(per_well)
return np.array(result_list)
def load_connected_with(self, node):
result = np.where(np.array(self.gas_load_index) == node)
if result[0].size == 0:
return np.array([[0] * self.T])
result_list = []
for i in result[0]:
per_load = []
for time in range(self.T):
per_load.append(self.gas_load[i, time])
result_list.append(per_load)
return np.array(result_list)
def p2g_connected_with(self, node):
return np.array([[0] * self.T])
def gas_flow_out_connected_with(self, node):
result = np.where(np.array(self.gas_line_end_point) == node)
if result[0].size == 0:
return np.array([[0] * self.T])
result_list = []
for i in result[0]:
per_out = []
for time in range(self.T):
per_out.append(self.gas_flow_out[i, time])
result_list.append(per_out)
return np.array(result_list)
def gas_flow_in_connected_with(self, node):
result = np.where(np.array(self.gas_line_start_point) == node)
if result[0].size == 0:
return np.array([[0] * self.T])
result_list = []
for i in result[0]:
per_in = []
for time in range(self.T):
per_in.append(self.gas_flow_in[i, time])
result_list.append(per_in)
return np.array(result_list)
def gen_connected_with(self, node): # list of expression
result = np.where(np.array(self.gen_gas_index) == node) # this node is gas node
if result[0].size == 0:
return np.array([[0] * self.T])
result_list = []
for i in result[0]:
per_gen = []
for time in range(self.T):
per_gen.append(self.gen_gas_power[i, time] / self.gen_gas_efficiency[i]) # change to gas
result_list.append(per_gen)
return np.array(result_list)
def gas_to_power_connected_with(self, node):
result = np.where(np.array(self.gen_gas_index_power) == node) # this node is power node
if result[0].size == 0:
return np.array([[0] * self.T])
result_list = []
for i in result[0]:
per_gen = []
for time in range(self.T):
per_gen.append(self.gen_gas_power[i, time]) # just power
result_list.append(per_gen)
return np.array(result_list)
# ----------- auxiliary key function ----------------------------
def get_dual(self, this_info, that_info, start_point):
# this or that of two areas is same!
# 这里 我们 认为 that_info 始终遵循 一个 全局的 方向 即 0 的 this that 与 1 的 this that 始终 是 相同 的
# 但是 this_info 却把 内部的节点 作为 this, 外部 作为 that
if start_point != 0:
diff1 = this_info.this_voltage_square - that_info.this_voltage_square
diff2 = -1 * this_info.this_voltage_square + that_info.this_voltage_square
diff3 = this_info.that_voltage_square - that_info.that_voltage_square
diff4 = -1 * this_info.that_voltage_square + that_info.that_voltage_square
else:
diff1 = this_info.that_voltage_square - that_info.this_voltage_square
diff2 = -1 * this_info.that_voltage_square + that_info.this_voltage_square
diff3 = this_info.this_voltage_square - that_info.that_voltage_square
diff4 = -1 * this_info.this_voltage_square + that_info.that_voltage_square
if start_point != 0: # this is start point
diff5 = this_info.power_flow - that_info.power_flow
diff6 = -1 * this_info.power_flow + that_info.power_flow
diff7 = this_info.react_flow - that_info.react_flow
diff8 = -1 * this_info.react_flow + that_info.react_flow
else:
diff5 = -1 * this_info.power_flow - that_info.power_flow
diff6 = 1 * this_info.power_flow + that_info.power_flow
diff7 = -1 * this_info.react_flow - that_info.react_flow
diff8 = 1 * this_info.react_flow + that_info.react_flow
#
if start_point != 0:
diff9 = this_info.this_node_pressure - that_info.this_node_pressure
diff10 = -1 * this_info.this_node_pressure + that_info.this_node_pressure
diff11 = this_info.that_node_pressure - that_info.that_node_pressure
diff12 = -1 * this_info.that_node_pressure + that_info.that_node_pressure
else:
diff9 = this_info.that_node_pressure - that_info.this_node_pressure
diff10 = -1 * this_info.that_node_pressure + that_info.this_node_pressure
diff11 = this_info.this_node_pressure - that_info.that_node_pressure
diff12 = -1 * this_info.this_node_pressure + that_info.that_node_pressure
diff13 = this_info.gas_flow_in - that_info.gas_flow_in
diff14 = -1 * this_info.gas_flow_in + that_info.gas_flow_in
diff15 = this_info.gas_flow_out - that_info.gas_flow_out
diff16 = -1 * this_info.gas_flow_out + that_info.gas_flow_out
return [diff1, diff2, diff3, diff4, diff5, diff6, diff7, diff8,
diff9, diff10, diff11, diff12, diff13, diff14, diff15, diff16]
def get_sub(self, this_info, this_info_old, start_point):
# this_info_old 应该遵守全局 的 顺序
diff = 0
if start_point != 0: # this is start point
diff = diff + \
(this_info.this_voltage_square - this_info_old.this_voltage_square) * \
(this_info.this_voltage_square - this_info_old.this_voltage_square) + \
(this_info.that_voltage_square - this_info_old.that_voltage_square) * \
(this_info.that_voltage_square - this_info_old.that_voltage_square) + \
(this_info.power_flow - this_info_old.power_flow) * \
(this_info.power_flow - this_info_old.power_flow) + \
(this_info.react_flow - this_info_old.react_flow) * \
(this_info.react_flow - this_info_old.react_flow) + \
(this_info.this_node_pressure - this_info_old.this_node_pressure) * \
(this_info.this_node_pressure - this_info_old.this_node_pressure) + \
(this_info.that_node_pressure - this_info_old.that_node_pressure) * \
(this_info.that_node_pressure - this_info_old.that_node_pressure) + \
(this_info.gas_flow_in - this_info_old.gas_flow_in) * \
(this_info.gas_flow_in - this_info_old.gas_flow_in) + \
(this_info.gas_flow_out - this_info_old.gas_flow_out) * \
(this_info.gas_flow_out - this_info_old.gas_flow_out)
else:
diff = diff + \
(this_info.that_voltage_square - this_info_old.this_voltage_square) * \
(this_info.that_voltage_square - this_info_old.this_voltage_square) + \
(this_info.this_voltage_square - this_info_old.that_voltage_square) * \
(this_info.this_voltage_square - this_info_old.that_voltage_square) + \
(-1 * this_info.power_flow - this_info_old.power_flow) * \
(-1 * this_info.power_flow - this_info_old.power_flow) + \
(-1 * this_info.react_flow - this_info_old.react_flow) * \
(-1 * this_info.react_flow - this_info_old.react_flow) + \
(this_info.that_node_pressure - this_info_old.this_node_pressure) * \
(this_info.that_node_pressure - this_info_old.this_node_pressure) + \
(this_info.this_node_pressure - this_info_old.that_node_pressure) * \
(this_info.this_node_pressure - this_info_old.that_node_pressure) + \
(this_info.gas_flow_in - this_info_old.gas_flow_in) * \
(this_info.gas_flow_in - this_info_old.gas_flow_in) + \
(this_info.gas_flow_out - this_info_old.gas_flow_out) * \
(this_info.gas_flow_out - this_info_old.gas_flow_out)
return diff
def get_lam(self, index, start_point):
lam = g_lam[index]
lam_T = []
for i in range(self.T):
lam_copy = lam[i].copy()
lam_t = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
if start_point != 0: # this is start_point
lam_t = lam_copy
else:
lam_t[0] = lam_copy[1]
lam_t[1] = lam_copy[0]
lam_t[2] = lam_copy[3]
lam_t[3] = lam_copy[2]
lam_t[4] = lam_copy[5]
lam_t[5] = lam_copy[4]
lam_t[6] = lam_copy[7]
lam_t[7] = lam_copy[6]
lam_t[8] = lam_copy[9]
lam_t[9] = lam_copy[8]
lam_t[10] = lam_copy[11]
lam_t[11] = lam_copy[10]
lam_t[12] = lam_copy[13]
lam_t[13] = lam_copy[12]
lam_t[14] = lam_copy[15]
lam_t[15] = lam_copy[14]
lam_T.extend(lam_t)
return lam_T
def build_model(self):
# add var
self.power_gen = self.model.addVars(
self.gen_num, self.T,
lb=[[self.gen_power_min[i]] * self.T for i in range(self.gen_num)],
ub=[[self.gen_power_max[i]] * self.T for i in range(self.gen_num)],
name='power_gene')
self.react_gen = self.model.addVars(
self.gen_num, self.T,
lb=[[self.gen_react_min[i]] * self.T for i in range(self.gen_num)],
ub=[[self.gen_react_max[i]] * self.T for i in range(self.gen_num)],
name='reactive_gene')
self.power_load = self.model.addVars(
self.load_num, self.T,
lb= self.load_power_min, #[[self.load_power_min[i]] * self.T for i in range(self.load_num)],
ub= self.load_power_max, #[[self.load_power_max[i]] * self.T for i in range(self.load_num)],
name='power_load')
self.react_load = self.model.addVars(
self.load_num, self.T,
lb=self.load_react_min, #[[self.load_react_min[i]] * self.T for i in range(self.load_num)],
ub=self.load_react_max, #[[self.load_react_max[i]] * self.T for i in range(self.load_num)],
name='react_load')
self.voltage_square = self.model.addVars(
self.bus_num, self.T,
lb=[[self.bus_voltage_min[i] * self.bus_voltage_min[i]] * self.T
for i in range(self.bus_num)],
ub=[[self.bus_voltage_max[i] * self.bus_voltage_max[i]] * self.T
for i in range(self.bus_num)],
name='bus_voltage_square')
self.line_current_square = self.model.addVars(
self.line_num, self.T,
ub=[[self.line_current_capacity[i] * self.line_current_capacity[i]] * self.T
for i in range(self.line_num)],
name='line_current_square')
self.line_power_flow = self.model.addVars(
self.line_num, self.T,
lb=-10, ub=10, # TODO: key error, core error
name='line_power_flow')
self.line_react_flow = self.model.addVars(
self.line_num, self.T,
lb=-10, ub=10,
name='line_react_flow')
self.well_output = self.model.addVars(self.well_num, self.T,
lb=[[self.well_output_min[i]] * self.T for i in range(self.well_num)],
ub=[[self.well_output_max[i]] * self.T for i in range(self.well_num)],
name='gas_well_outputs')
self.node_pressure = self.model.addVars(self.gas_node_num, self.T,
lb=[[self.node_pressure_min[i]] * self.T for i in
range(self.gas_node_num)],
ub=[[self.node_pressure_max[i]] * self.T for i in
range(self.gas_node_num)],
name='node_pressure')
self.gas_flow_in = self.model.addVars(self.gas_line_num, self.T,
ub=[[self.gas_flow_in_max[i]] * self.T for i in range(self.gas_line_num)],
lb=[[-1 * self.gas_flow_in_max[i]] * self.T for i in
range(self.gas_line_num)],
name='gas_flow_in')
self.gas_flow_out = self.model.addVars(self.gas_line_num, self.T,
ub=[[self.gas_flow_out_max[i]] * self.T for i in
range(self.gas_line_num)],
lb=[[-1 * self.gas_flow_out_max[i]] * self.T for i in
range(self.gas_line_num)],
name='gas_flow_out')
self.gas_load = self.model.addVars(self.gas_load_num, self.T,
lb=self.gas_load_min, ub=self.gas_load_max,
name='gas_load')
self.gen_gas_power = self.model.addVars(self.gen_gas_num, self.T,
lb=[[self.gen_gas_min[i]] * self.T for i in range(self.gen_gas_num)],
ub=[[self.gen_gas_max[i]] * self.T for i in range(self.gen_gas_num)],
name='gen_gas_power')
self.linepack = self.model.addVars(self.gas_line_num, self.T, name='gas_linepack')
self.pccp = self.model.addVars(self.gas_line_num, self.T,
lb=0, name='pccp')
self.model.update()
# ----------- construct the info structure --------------------------------
for i in range(len(self.connection_area)):
line_T = []
line_start = self.line_num - len(self.connection_area) # 5-2 = 3 3 4
bus_start = self.bus_num - len(self.connection_area)
gas_node_start = self.gas_node_num - len(self.connection_area)
gas_line_start = self.gas_line_num - len(self.connection_area)
this_index = self.connection_index[i]
this_index_gas = self.connection_index[i] # TODO: we assume gas and power have the same connection index
for time in range(self.T):
line_t = connection_line_info()
line_t.power_flow = self.line_power_flow[i + line_start, time]
line_t.react_flow = self.line_react_flow[i + line_start, time]
line_t.this_voltage_square = self.voltage_square[this_index, time]
line_t.that_voltage_square = self.voltage_square[i + bus_start, time]
line_t.this_node_pressure = self.node_pressure[this_index_gas, time]
line_t.that_node_pressure = self.node_pressure[i + gas_node_start, time]
line_t.gas_flow_in = self.gas_flow_in[i + gas_line_start, time]
line_t.gas_flow_out = self.gas_flow_out[i + gas_line_start, time]
line_T.append(line_t)
self.info[i] = line_T
# ----------- node power balance -----------------
for node in range(self.bus_num):
Power = self.power_gen_connected_with(node)
React = self.react_gen_connected_with(node)
Power_Load = self.load_power_connected_with(node)
React_Load = self.load_react_connected_with(node)
Power_In = self.power_flow_in_connected_with(node)
Power_Out = self.power_flow_out_connected_with(node)
React_In = self.raect_flow_in_connected_with(node)
React_Out = self.react_flow_out_connected_with(node)
Current_In = self.current_in_connected_with(node)
resistance = self.resistance_in_connected_with(node)
reactance = self.reactance_in_connected_with(node)
G2P = self.gas_to_power_connected_with(node)
for time in range(self.T):
self.model.addConstr(
lhs=sum(Power[:, time]) + sum(G2P[:, time]) +
sum(Power_In[:, time] - resistance[:, time] * Current_In[:, time]),
rhs=sum(Power_Load[:, time]) + sum(Power_Out[:, time]),
sense=gurobi.GRB.EQUAL,
name='power_balance')
self.model.addConstr(
lhs=sum(React[:, time]) +
sum(React_In[:, time] - reactance[:, time] * Current_In[:, time]),
rhs=sum(React_Load[:, time]) + sum(React_Out[:, time]),
sense=gurobi.GRB.EQUAL,
name='react_balance')
# ----------- line voltage drop ------------------
for i in range(self.line_num):
start_point = self.line_start_point[i]
end_point = self.line_end_point[i]
resistance = self.line_resistance[i]
reactance = self.line_reactance[i]
impedance_square = reactance * reactance + resistance * resistance
for time in range(self.T):
self.model.addConstr(
lhs=self.voltage_square[end_point, time] -
self.voltage_square[start_point, time],
rhs=impedance_square * self.line_current_square[i, time] -
2 * (resistance * self.line_power_flow[i, time] +
reactance * self.line_react_flow[i, time]),
sense=gurobi.GRB.EQUAL,
name='voltage_drop')
self.model.addConstr(
lhs=self.line_power_flow[i, time] * self.line_power_flow[i, time] +
self.line_react_flow[i, time] * self.line_react_flow[i, time],
rhs=self.line_current_square[i, time] * self.voltage_square[start_point, time],
sense=gurobi.GRB.LESS_EQUAL,
# sense=gurobi.GRB.EQUAL,
name='flow_relax')
# ----------- gas node balance ------------------
for node in range(self.gas_node_num):
# for all passive and active # use numpy !!!! return [[]] format
Well = self.well_connected_with(node) # 节点node对应的well变量
Load = self.load_connected_with(node)
Gen = self.gen_connected_with(node) # this change Power to Gas # considered efficiency !!!!!!!
P2G = self.p2g_connected_with(node) # this is just gas
Line_Out = self.gas_flow_out_connected_with(node)
Line_In = self.gas_flow_in_connected_with(node)
for time in range(self.T):
self.model.addConstr(
lhs=sum(Well[:, time]) + sum(P2G[:, time]) + sum(Line_Out[:, time]), # source
rhs=sum(Gen[:, time]) + sum(Load[:, time]) + sum(Line_In[:, time]), # load
sense=gurobi.GRB.EQUAL,
name='gas_nodal_balance_node')
# ----------- line pack passive ------------------
for line in range(self.gas_line_num):
if line not in self.gas_line_active:
start_point = self.gas_line_start_point[line]
end_point = self.gas_line_end_point[line]
linepack_coefficient = self.gas_line_pack_coefficient[line]
for time in range(self.T):
self.model.addConstr(
lhs=self.linepack[line, time],
rhs=linepack_coefficient *
(self.node_pressure[start_point, time] + self.node_pressure[end_point, time]),
sense=gurobi.GRB.EQUAL,
name='linePack')
# ----------- passive Pack-T ---------------------
for line in range(self.gas_line_num):
if line not in self.gas_line_active:
for time in range(self.T):
if time == 0:
self.model.addConstr(
lhs=self.linepack[line, 0] - self.linepack[line, self.T - 1],
rhs=self.gas_flow_in[line, 0] - self.gas_flow_out[line, 0],
sense=gurobi.GRB.EQUAL,
name='linepack_with_time_' + str(time) + '_line' + str(line))
else:
self.model.addConstr(
lhs=self.linepack[line, time] - self.linepack[line, time - 1],
rhs=self.gas_flow_in[line, time] - self.gas_flow_out[line, time],
sense=gurobi.GRB.EQUAL,
name='linepack_with_time_' + str(time) + '_line' + str(line))
# ----------- Pack Less Init ---------------------
linepack_sum = 0 # ? passive or active
for line in range(self.gas_line_num):
if line not in self.gas_line_active:
linepack_sum = linepack_sum + self.linepack[line, self.T - 1]
self.model.addConstr(linepack_sum <= self.gas_line_pack_initial)
# -------- active pressure-increase ---------------------
# ---------active gas-consume ---------------------------
for line in range(self.gas_line_num):
if line in self.gas_line_active:
thisIndex = self.gas_line_active.index(line)
compressor_coeff = self.compressor_coefficient[thisIndex]
start_point = self.gas_line_start_point[line]
end_point = self.gas_line_end_point[line]
max_flow = self.compressor_max_flow[thisIndex]
energy_consumption = 1 - self.compressor_energy_consumption[thisIndex]
for time in range(self.T):
# self.model.addConstr(self.gas_flow_in[line, time] <= max_flow)
self.model.addConstr(self.node_pressure[end_point, time] <=
compressor_coeff * self.node_pressure[start_point, time])
# add flow quantities for gas compressors
self.model.addConstr(self.gas_flow_out[line, time] ==
energy_consumption * self.gas_flow_in[line, time])
# ------------- weymouth passive ------------------------
for line in range(self.gas_line_num):
if line not in self.gas_line_active:
start_point = self.gas_line_start_point[line]
end_point = self.gas_line_end_point[line]
weymouth = self.weymouth[line]
for time in range(self.T):
self.model.addConstr(
lhs=((self.gas_flow_in[line, time] + self.gas_flow_out[line, time]) / 2) *
((self.gas_flow_in[line, time] + self.gas_flow_out[line, time]) / 2),
rhs=weymouth * (self.node_pressure[start_point, time] *
self.node_pressure[start_point, time] -
self.node_pressure[end_point, time] *
self.node_pressure[end_point, time]),
sense=gurobi.GRB.LESS_EQUAL,
name='weymouth')
self.constrain_update.append(
self.model.addConstr(
lhs=weymouth * self.node_pressure[start_point, time] *
self.node_pressure[start_point, time] - (
(self.gas_flow_in_old[line][time] + self.gas_flow_out_old[line][time]) *
(self.gas_flow_in[line, time] + self.gas_flow_out[line, time]) / 2 -
(self.gas_flow_in_old[line][time] + self.gas_flow_out_old[line][time]) *
(self.gas_flow_in_old[line][time] + self.gas_flow_out_old[line][time]) / 4 -
weymouth * self.node_pressure[end_point, time] *
self.node_pressure[end_point, time] +
2 * weymouth * self.node_pressure[end_point, time] *
self.node_pressure_old[end_point][time]
),
rhs=self.pccp[line, time],
sense=gurobi.GRB.LESS_EQUAL,
name='pccp_less'
)
)
# ------------- gas system end --------------------------
# ------------- construct object ------------------------
first_line = self.line_num - len(self.connection_area)
self.objs = []
for gen in range(self.gen_num - len(self.connection_area)):
per = 0
for time in range(self.T):
per = per + \
self.power_gen[gen, time] * self.gen_cost_a[gen] + \
self.power_gen[gen, time] * self.power_gen[gen, time] * self.gen_cost_b[gen] + \
self.gen_cost_c[gen]
self.objs.append(per)
for load in range(self.load_num - len(self.connection_area)):
per = 0
for time in range(self.T):
load_ref = (self.load_power_max[load][time] + self.load_power_min[load][time]) / 2
per = per + \
0.1 * (self.power_load[load, time] - load_ref) * \
(self.power_load[load, time] - load_ref)
self.objs.append(per)
for line in range(len(self.connection_area)):
# for every area
connect_to = self.connection_area[line]
per_area = 0
for time in range(self.T):
per_area = per_area + self.line_power_flow[first_line + line, time] * \
power_price[self.index][connect_to]
self.objs.append(per_area)
line_num = self.gas_line_num - len(self.connection_area)
for conn in range(len(self.connection_area)):
for time in range(T):
self.objs.append(3 * # gas_buy_price 购气成本
self.gas_flow_in[line_num + conn, time] * g_gas_price_aux[self.index])
objective = sum(self.objs)
self.basic_objective = objective
def update_model(self, tao):
global g_info
self.lams = []
# obtain the lam of this player
for i, index in enumerate(g_lam_index[self.index]):
connect_to = self.connection_area[i]
is_start_point = 0
if connect_to > self.index:
is_start_point = 1
self.lams.extend(self.get_lam(index, is_start_point))
# construct the dual object
self.dual = [] # [ ---time---, ---time--- ]
for i in range(len(self.connection_area)):
for time in range(self.T):
connect_to = self.connection_area[i]
line_index = g_connection[connect_to].index(self.index)
that_info = g_info[connect_to][line_index][time]
this_info = self.info[i][time]
is_start_point = 0
if connect_to > self.index:
is_start_point = 1
self.dual.extend(self.get_dual(this_info, that_info, is_start_point))
self.dual_addition = sum([PUNISH * a * b for a, b in zip(self.dual, self.lams)])
# construct the norm object
self.norm_addition = 0
for i in range(len(self.connection_area)):
connect_to = self.connection_area[i]
is_start_point = 0
if connect_to > self.index:
is_start_point = 1
for time in range(self.T):
self.norm_addition = self.norm_addition + \
self.get_sub(self.info[i][time], self.old_value[i][time], is_start_point)
self.addition_objective = self.dual_addition + tao / 2 * self.norm_addition
self.objective = self.basic_objective + self.addition_objective
def optimize(self):
self.model.Params.OutputFlag = 0
self.model.setObjective(self.objective + gurobi.quicksum(self.pccp) * G_K)
self.model.optimize()
for line in range(self.gas_line_num):
for time in range(self.T):
self.gas_flow_in_old[line][time] = self.gas_flow_in[line, time].getAttr('X')
for line in range(self.gas_line_num):
for time in range(self.T):
self.gas_flow_out_old[line][time] = self.gas_flow_out[line, time].getAttr('X')
for node in range(self.gas_node_num):
for time in range(self.T):
self.node_pressure_old[node][time] = self.node_pressure[node, time].getAttr('X')
for i in range(len(self.connection_area)):
for time in range(self.T):
this_index = self.connection_index[i]
connect_to = self.connection_area[i]
this_index_gas = self.connection_index[i] # we assume gas and power have the same index
is_start_point = 0
if connect_to > self.index:
is_start_point = 1
line = connection_line_info()
line_start = self.line_num - len(self.connection_area)
bus_start = self.bus_num - len(self.connection_area)
gas_node_start = self.gas_node_num - len(self.connection_area)
gas_line_start = self.gas_line_num - len(self.connection_area)
# ---------- update power flow --------------
if is_start_point != 0: # this is start point
line.power_flow = self.line_power_flow[i + line_start, time].getAttr('X')
line.react_flow = self.line_react_flow[i + line_start, time].getAttr('X')
else:
line.power_flow = self.line_power_flow[i + line_start, time].getAttr('X') * (-1)
line.react_flow = self.line_react_flow[i + line_start, time].getAttr('X') * (-1)
# -------- update voltage ------------
if is_start_point != 0: # this is start point
line.this_voltage_square = self.voltage_square[this_index, time].getAttr('X')
line.that_voltage_square = self.voltage_square[i + bus_start, time].getAttr('X')
else:
line.this_voltage_square = self.voltage_square[i + bus_start, time].getAttr('X')
line.that_voltage_square = self.voltage_square[this_index, time].getAttr('X')
# ------- update pressure -----------
if is_start_point != 0: # this is start point
line.this_node_pressure = self.node_pressure[this_index_gas, time].getAttr('X')
line.that_node_pressure = self.node_pressure[i + gas_node_start, time].getAttr('X')
else:
line.this_node_pressure = self.node_pressure[i + gas_node_start, time].getAttr('X')
line.that_node_pressure = self.node_pressure[this_index_gas, time].getAttr('X')
# -------- update gas flow -----------
line.gas_flow_in = self.gas_flow_in[i + gas_line_start, time].getAttr('X')
line.gas_flow_out = self.gas_flow_out[i + gas_line_start, time].getAttr('X')
# u p d a t e g _ i n f o
g_info[self.index][i][time] = line
def set_old_value(self, old): # old_value should be consisted with the g_info
for area in range(len(self.connection_area)):
for time in range(self.T):
self.old_value[area][time].this_voltage_square = old[area][time].this_voltage_square
self.old_value[area][time].that_voltage_square = old[area][time].that_voltage_square
self.old_value[area][time].power_flow = old[area][time].power_flow
self.old_value[area][time].react_flow = old[area][time].react_flow
self.old_value[area][time].this_node_pressure = old[area][time].this_node_pressure
self.old_value[area][time].that_node_pressure = old[area][time].that_node_pressure
self.old_value[area][time].gas_flow_in = old[area][time].gas_flow_in
self.old_value[area][time].gas_flow_out = old[area][time].gas_flow_out
def cal_gap(self):
result = []
for line in range(self.gas_line_num):
for time in range(self.T):
self.gas_flow_in_old[line][time] = self.gas_flow_in[line, time].getAttr('X')
for line in range(self.gas_line_num):
for time in range(self.T):
self.gas_flow_out_old[line][time] = self.gas_flow_out[line, time].getAttr('X')
for node in range(self.gas_node_num):
for time in range(self.T):
self.node_pressure_old[node][time] = self.node_pressure[node, time].getAttr('X')
for line in range(self.gas_line_num):
if line not in self.gas_line_active:
start_point = self.gas_line_start_point[line]
end_point = self.gas_line_end_point[line]
weymouth = self.weymouth[line]
for time in range(self.T):
lhs = ((self.gas_flow_in_old[line][time] + self.gas_flow_out_old[line][time]) / 2) * \
((self.gas_flow_in_old[line][time] + self.gas_flow_out_old[line][time]) / 2)
rhs = weymouth * (self.node_pressure_old[start_point][time] *
self.node_pressure_old[start_point][time] -
self.node_pressure_old[end_point][time] *
self.node_pressure_old[end_point][time])
result.append(abs(lhs - rhs) / abs(lhs))
return max(result)
def update_outer_model(self):
self.model.remove(self.constrain_update)
self.constrain_update = []
# weymouth for passive line
for line in range(self.gas_line_num):
if line not in self.gas_line_active:
start_point = self.gas_line_start_point[line]
end_point = self.gas_line_end_point[line]
weymouth = self.weymouth[line]
for time in range(self.T):
self.constrain_update.append(
self.model.addConstr(
lhs=weymouth * self.node_pressure[start_point, time] *
self.node_pressure[start_point, time] - (
(self.gas_flow_in_old[line][time] + self.gas_flow_out_old[line][time]) *
(self.gas_flow_in[line, time] + self.gas_flow_out[line, time]) / 2 -
(self.gas_flow_in_old[line][time] + self.gas_flow_out_old[line][time]) *
(self.gas_flow_in_old[line][time] + self.gas_flow_out_old[line][time]) / 4 -
weymouth * self.node_pressure[end_point, time] *
self.node_pressure[end_point, time] +
2 * weymouth * self.node_pressure[end_point, time] *
self.node_pressure_old[end_point][time]
),
rhs=self.pccp[line, time],
sense=gurobi.GRB.LESS_EQUAL,
name='weymouth_relax'
))
pccp_value = []
for i in range(self.gas_line_num):
pccp_value.append(self.pccp[i, 0].getAttr('X'))
if abs(max(pccp_value)) < 0.005:
print('a_a_a_a_a_a_a_a_a_a_a_a_a_a_a_a_a_amazing_______________________')
class PlayerN1:
def __init__(self):
self.gx = []
self.old_value = [0] * g_link * T * 16
self.dual_express = 0
self.norm_express = 0
self.objective = 0
def sub(self, lhs, rhs):
gx = []
gx.append(lhs.this_voltage_square - rhs.this_voltage_square)
gx.append(-1 * lhs.this_voltage_square + rhs.this_voltage_square)
gx.append(lhs.that_voltage_square - rhs.that_voltage_square)
gx.append(-1 * lhs.that_voltage_square + rhs.that_voltage_square)
gx.append(lhs.power_flow - rhs.power_flow)
gx.append(-1 * lhs.power_flow + rhs.power_flow)
gx.append(lhs.react_flow - rhs.react_flow)
gx.append(-1 * lhs.react_flow + rhs.react_flow)
gx.append(lhs.this_node_pressure - rhs.this_node_pressure)
gx.append(-1 * lhs.this_node_pressure + rhs.this_node_pressure)
gx.append(lhs.that_node_pressure - rhs.that_node_pressure)
gx.append(-1 * lhs.that_node_pressure + rhs.that_node_pressure)
gx.append(lhs.gas_flow_in - rhs.gas_flow_in)
gx.append(-1 * lhs.gas_flow_in + rhs.gas_flow_in)
gx.append(lhs.gas_flow_out - rhs.gas_flow_out)
gx.append(-1 * lhs.gas_flow_out + rhs.gas_flow_out)
return gx
def optimize(self, tao):
model = gurobi.Model()
self.dual_express = 0
self.norm_express = 0
self.objective = 0
self.gx = []
for i in range(len(g_connection)):
for connect_to in g_connection[i]:
if i < connect_to:
for time in range(T):
lhs = g_info[i][g_connection[i].index(connect_to)][time]
rhs = g_info[connect_to][g_connection[connect_to].index(i)][time]
self.gx.extend(self.sub(lhs, rhs))
duals = model.addVars(len(self.gx))
self.dual_express = gurobi.quicksum(
1 * duals[i] * self.gx[i] for i in range(len(self.gx))
)
self.norm_express = gurobi.quicksum(
(duals[i] - self.old_value[i]) * (duals[i] - self.old_value[i])
for i in range(len(self.gx)))
self.objective = -1 * self.dual_express + tao / 2 * self.norm_express
model.setObjective(self.objective)
model.Params.OutputFlag = 0
model.optimize()
dual_value = []
pos = 0
for line in range(g_link):
lam_T = []
for time in range(T):
lam_t = []
for _m_m_ in range(16):
lam_t.append(duals[pos].getAttr('X'))
pos = pos + 1
lam_T.append(lam_t)
dual_value.append(lam_T)
return copy(dual_value)
def set_old_value(self, old_value):
self.old_value = copy(old_value)
def getPowerNet():
# -------------- p l a y e r 0 ----------------
player_index = 0
system_info_0 = {
'index': player_index,
'T': T,
'connection_area': g_connection[player_index]
}
node_info_0 = {
'gen_num': 3 + 1, # the outside node as node-12 connected with index-3 with line-12
'gen_index': [0, 0, 5, 12],
'gen_power_min': [0, 0, 0, 0], # 0
'gen_power_max': [0.3, 0.3, 0.4, 10], # 1.2
'gen_react_min': [0, 0, 0, 0], # 0
'gen_react_max': [0.3, 0.3, 0.4, 10], # 1.2
'gen_cost_a': [0.1, 0.0013, 0.09, 0.5],
'gen_cost_b': [0.01, 0.0001, 0.01, 0],
'gen_cost_c': [0.1, 0.1, 0.1, 0],
'bus_num': 12 + 1,
'bus_voltage_min': [0.8 * 1] * (12 + 1),
'bus_voltage_max': [1.2 * 1] * (12 + 1),
'load_num': 8 + 1,
'load_index': [2, 3, 4, 7, 8, 9, 10, 11, 12],
'load_power_min': np.array(
[[0.10, 0.11, 0.12, 0.10, 0.09, 0.12, 0.10, 0.12, 0.12, 0.12],
[0.20, 0.22, 0.23, 0.24, 0.25, 0.26, 0.27, 0.25, 0.22, 0.20],
[0.20, 0.19, 0.18, 0.19, 0.20, 0.20, 0.20, 0.19, 0.22, 0.20],
[0.10, 0.12, 0.10, 0.10, 0.10, 0.13, 0.12, 0.10, 0.09, 0.09],
[0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30],
[0.10, 0.11, 0.12, 0.12, 0.13, 0.14, 0.15, 0.14, 0.13, 0.12],
[0.20, 0.20, 0.18, 0.20, 0.22, 0.22, 0.22, 0.20, 0.20, 0.20],
[0.10, 0.11, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10],
[0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00]])[:, 0:T].tolist(), # 1.3
'load_power_max': np.array(
[[0.15, 0.16, 0.17, 0.18, 0.17, 0.15, 0.14, 0.14, 0.15, 0.15],
[0.25, 0.26, 0.27, 0.26, 0.27, 0.25, 0.26, 0.24, 0.23, 0.25],
[0.25, 0.23, 0.24, 0.26, 0.27, 0.28, 0.29, 0.27, 0.26, 0.25],
[0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15],
[0.35, 0.40, 0.42, 0.37, 0.37, 0.37, 0.37, 0.37, 0.37, 0.37],
[0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15],
[0.25, 0.26, 0.27, 0.28, 0.29, 0.24, 0.23, 0.20, 0.20, 0.20],
[0.15, 0.16, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15],
[10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0]])[:, 0:T].tolist(), # 1.7
'load_react_min': np.array(
[[0.10, 0.11, 0.12, 0.10, 0.09, 0.12, 0.10, 0.12, 0.12, 0.12],
[0.20, 0.22, 0.23, 0.24, 0.25, 0.26, 0.27, 0.25, 0.22, 0.20],
[0.20, 0.19, 0.18, 0.19, 0.20, 0.20, 0.20, 0.19, 0.22, 0.20],
[0.10, 0.12, 0.10, 0.10, 0.10, 0.13, 0.12, 0.10, 0.09, 0.09],
[0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30],
[0.10, 0.11, 0.12, 0.12, 0.13, 0.14, 0.15, 0.14, 0.13, 0.12],
[0.20, 0.20, 0.18, 0.20, 0.22, 0.22, 0.22, 0.20, 0.20, 0.20],
[0.10, 0.11, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10],
[0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00]])[:, 0:T].tolist(),
'load_react_max': np.array(
[[0.15, 0.16, 0.17, 0.18, 0.17, 0.15, 0.14, 0.14, 0.15, 0.15],
[0.25, 0.26, 0.27, 0.26, 0.27, 0.25, 0.26, 0.24, 0.23, 0.25],
[0.25, 0.23, 0.24, 0.26, 0.27, 0.28, 0.29, 0.27, 0.26, 0.25],
[0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15],
[0.35, 0.40, 0.42, 0.37, 0.37, 0.37, 0.37, 0.37, 0.37, 0.37],
[0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15],
[0.25, 0.26, 0.27, 0.28, 0.29, 0.24, 0.23, 0.20, 0.20, 0.20],
[0.15, 0.16, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15],
[10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0]])[:, 0:T].tolist(), # 1.7
'bus_num_outside': 1,
'connection_index': [1], # the outer area power/gas connect with this index
}
line_info_0 = {
'line_num': 11 + 1,
'line_current_capacity': [10] * (11 + 1),
'line_start_point': [0, 1, 0, 3, 0, 5, 8, 5, 6, 5, 6, 12],
'line_end_point': [1, 2, 3, 4, 5, 8, 9, 6, 7, 10, 11, 1],
'line_resistance': (np.array([.1, .1, .1, .1, .1, .1, .1, .1, .1, .1, .1, .1]) / 10).tolist(),
'line_reactance': (np.array([.1, .1, .1, .1, .1, .1, .1, .1, .1, .1, .1, .1]) / 10).tolist()
}
gas_node_info_0 = {
'gas_node_num': 3 + 3 + 1,
'node_pressure_min': [0] * (3 + 3 + 1),
'node_pressure_max': [20] * (3 + 3 + 1),
'gas_well_num': 0 + 1,
'well_index': [3 + 3],
'well_output_min': [0],
'well_output_max': [2],
'gas_load_num': 2 + 2,
'load_index': [0, 2, 3, 5],
'gas_load_min': (np.array([[0.1, 0.11, 0.12, 0.11, 0.12, 0.11, 0.12, 0.11, 0.10, 0.10],
[0.1, 0.09, 0.08, 0.09, 0.10, 0.11, 0.11, 0.11, 0.09, 0.08],
(np.array([0.1, 0.09, 0.09, 0.09, 0.10, 0.10, 0.10, 0.09, 0.08, 0.07]) * 0.1).tolist(),
(np.array([0.1, 0.10, 0.10, 0.10, 0.11, 0.11, 0.11, 0.13, 0.12, 0.10]) * 0.1).tolist()])[:, 0:T]).tolist(),
'gas_load_max': (np.array([[0.2, 0.21, 0.22, 0.21, 0.23, 0.24, 0.21, 0.26, 0.23, 0.24],
[0.2, 0.22, 0.20, 0.20, 0.20, 0.20, 0.20, 0.20, 0.20, 0.20],
(np.array([0.2, 0.19, 0.18, 0.20, 0.22, 0.22, 0.22, 0.22, 0.22, 0.22]) * 0.1).tolist(),
(np.array([0.2, 0.20, 0.20, 0.20, 0.20, 0.20, 0.20, 0.20, 0.20, 0.20]) * 0.1).tolist()])[:, 0:T]).tolist(),
'gen_gas_num': 1,
'gen_gas_index': [2], # the gas generator index in the gas system
'gen_gas_index_power': [3], # the gas generator index in the power system
'gen_gas_min': [0], # this is power
'gen_gas_max': [1.5], # this is power
'gen_gas_efficiency': [10], # 0.05 gas => 0.5 power
}
gas_line_info_0 = {
'weymouth': [15] * (2 + 3 + 1),
'gas_line_num': 2 + 3 + 1,
'gas_line_start_point': [1, 1, 1, 4, 4, 6], # gas flow out
'gas_line_end_point': [0, 2, 4, 3, 5, 1], # gas flow in
'gas_line_pack_coefficient': [1] * (2 + 3 + 1),
'gas_line_pack_initial': 20,
'gas_flow_in_max': [5] * (2 + 3 + 1), # unused
'gas_flow_out_max': [5] * (2 + 3 + 1), # unused
'gas_line_active': [],
'compressor_num': 0,
'compressor_start_point': [],
'compressor_end_point': [],
'compressor_coefficient': [],
'compressor_max_flow': [],
'compressor_energy_consumption': [],
}
player_index = player_index + 1
# -------------- p l a y e r 1 ----------------
system_info_1 = {
'index': player_index,
'T': T,
'connection_area': g_connection[player_index]
}
node_info_1 = {
'gen_num': 3 + 1, # the outside node as node-12 connected with index-3 with line-12
'gen_index': [0, 0, 5, 12],
'gen_power_min': [0.5, 0.4, 0.6, 0], # 0 - 3
'gen_power_max': [1, 0.8, 1.2, 10],
'gen_react_min': [0.5, 0.4, 0.6, 0], # 0 - 3
'gen_react_max': [1, 0.8, 1.2, 10],
'gen_cost_a': [0.1, 0.13, 0.09, 0.5],
'gen_cost_b': [0.01, 0.01, 0.01, 0],
'gen_cost_c': [0.1, 0.1, 0.1, 0],
'bus_num': 12 + 1,
'bus_voltage_min': [0.8 * 1] * (12 + 1),
'bus_voltage_max': [1.2 * 1] * (12 + 1),
'load_num': 8 + 1,
'load_index': [2, 3, 4, 7, 8, 9, 10, 11, 12],
'load_power_min': (np.array(
[[0.10, 0.10, 0.10, 0.10, 0.09, 0.12, 0.10, 0.12, 0.12, 0.12],
[0.20, 0.22, 0.23, 0.24, 0.25, 0.26, 0.27, 0.25, 0.22, 0.20],
[0.20, 0.19, 0.18, 0.19, 0.20, 0.20, 0.20, 0.19, 0.22, 0.20],
[0.10, 0.12, 0.10, 0.10, 0.10, 0.13, 0.12, 0.10, 0.09, 0.09],
[0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30],
[0.10, 0.11, 0.12, 0.12, 0.13, 0.14, 0.15, 0.14, 0.13, 0.12],
[0.20, 0.20, 0.18, 0.20, 0.22, 0.22, 0.22, 0.20, 0.20, 0.20],
[0.10, 0.11, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10],
[0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00]])[:, 0:T] * 2).tolist(), # 1.3
'load_power_max': (np.array(
[[0.15, 0.16, 0.17, 0.18, 0.17, 0.15, 0.14, 0.14, 0.15, 0.15],
[0.25, 0.26, 0.27, 0.26, 0.27, 0.25, 0.26, 0.24, 0.23, 0.25],
[0.25, 0.23, 0.24, 0.26, 0.27, 0.28, 0.29, 0.27, 0.26, 0.25],
[0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15],
[0.35, 0.40, 0.42, 0.37, 0.37, 0.37, 0.37, 0.37, 0.37, 0.37],
[0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15],
[0.25, 0.26, 0.27, 0.28, 0.29, 0.24, 0.23, 0.20, 0.20, 0.20],
[0.15, 0.16, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15],
[10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0]])[:, 0:T] * 2).tolist(), # 1.7
'load_react_min': (np.array(
[[0.10, 0.11, 0.12, 0.10, 0.09, 0.12, 0.10, 0.12, 0.12, 0.12],
[0.20, 0.22, 0.23, 0.24, 0.25, 0.26, 0.27, 0.25, 0.22, 0.20],
[0.20, 0.19, 0.18, 0.19, 0.20, 0.20, 0.20, 0.19, 0.22, 0.20],
[0.10, 0.12, 0.10, 0.10, 0.10, 0.13, 0.12, 0.10, 0.09, 0.09],
[0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30, 0.30],
[0.10, 0.11, 0.12, 0.12, 0.13, 0.14, 0.15, 0.14, 0.13, 0.12],
[0.20, 0.20, 0.18, 0.20, 0.22, 0.22, 0.22, 0.20, 0.20, 0.20],
[0.10, 0.11, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10, 0.10],
[0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00]])[:, 0:T] * 2).tolist(),
'load_react_max': (np.array(
[[0.15, 0.16, 0.17, 0.18, 0.17, 0.15, 0.14, 0.14, 0.15, 0.15],
[0.25, 0.26, 0.27, 0.26, 0.27, 0.25, 0.26, 0.24, 0.23, 0.25],
[0.25, 0.23, 0.24, 0.26, 0.27, 0.28, 0.29, 0.27, 0.26, 0.25],
[0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15],
[0.35, 0.40, 0.42, 0.37, 0.37, 0.37, 0.37, 0.37, 0.37, 0.37],
[0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15],
[0.25, 0.26, 0.27, 0.28, 0.29, 0.24, 0.23, 0.20, 0.20, 0.20],
[0.15, 0.16, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15],
[10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0]])[:, 0:T] * 2).tolist(),
'bus_num_outside': 1,
'connection_index': [1], # the outer area power/gas connect with this index
}
line_info_1 = {
'line_num': 11 + 1,
'line_current_capacity': [10] * (11 + 1),
'line_start_point': [0, 1, 0, 3, 0, 5, 8, 5, 6, 5, 6, 12],
'line_end_point': [1, 2, 3, 4, 5, 8, 9, 6, 7, 10, 11, 1],
'line_resistance': (np.array([.1, .1, .1, .1, .1, .1, .1, .1, .1, .1, .1, .1]) / 10).tolist(),
'line_reactance': (np.array([.1, .1, .1, .1, .1, .1, .1, .1, .1, .1, .1, .1]) / 10).tolist()
}
gas_node_info_1 = {
'gas_node_num': 3 + 1,
'node_pressure_min': [0] * (3 + 1),
'node_pressure_max': [20] * (3 + 1),
'gas_well_num': 1,
'well_index': [0],
'well_output_min': [0],
'well_output_max': [1.5],
'gas_load_num': 1 + 1,
'load_index': [2, 3],
'gas_load_min': (np.array([[0.1, 0.11, 0.12, 0.11, 0.12, 0.11, 0.12, 0.11, 0.10, 0.10],
[0.1, 0.09, 0.08, 0.09, 0.10, 0.11, 0.11, 0.11, 0.09, 0.08]])[:, 0:T]).tolist(),
'gas_load_max': (np.array([[0.2, 0.21, 0.22, 0.21, 0.23, 0.24, 0.21, 0.26, 0.23, 0.24],
[0.2, 0.22, 0.20, 0.20, 0.20, 0.20, 0.20, 0.20, 0.20, 0.20]])[:, 0:T]).tolist(),
'gen_gas_num': 1,
'gen_gas_index': [2], # the gas generator index in the gas system
'gen_gas_index_power': [2], # the gas generator index in the power system
'gen_gas_min': [0], # this is power
'gen_gas_max': [1.5],
'gen_gas_efficiency': [10],
}
gas_line_info_1 = {
'weymouth': [15] * (2 + 1),
'gas_line_num': 2 + 1,
'gas_line_start_point': [0, 1, 1], # gas flow out
'gas_line_end_point': [1, 2, 3], # gas flow in
'gas_line_pack_coefficient': [1] * (2 + 1),
'gas_line_pack_initial': 20,
'gas_flow_in_max': [5] * (2 + 1), # unused
'gas_flow_out_max': [5] * (2 + 1), # unused
'gas_line_active': [],
'compressor_num': 0,
'compressor_start_point': [],
'compressor_end_point': [],
'compressor_coefficient': [],
'compressor_max_flow': [],
'compressor_energy_consumption': [],
}
player_index = player_index + 1
p1 = PowerNet(system_info_0, node_info_0, line_info_0, gas_node_info_0, gas_line_info_0)
p2 = PowerNet(system_info_1, node_info_1, line_info_1, gas_node_info_1, gas_line_info_1)
return [p1, p2, PlayerN1()]
def update_old_value():
for i, player in enumerate(g_players):
player.set_old_value(copy(g_info[i]))
temp_lam = []
for line in range(g_link):
for time in range(T):
for index in range(16):
temp_lam.append(g_lam[line][time][index])
g_playerN1.set_old_value(copy(temp_lam))
def sub_info(a, b):
return (a.this_voltage_square - b.this_voltage_square) * (a.this_voltage_square - b.this_voltage_square) + \
(a.that_voltage_square - b.that_voltage_square) * (a.that_voltage_square - b.that_voltage_square) + \
(a.power_flow - b.power_flow) * (a.power_flow - b.power_flow) + \
(a.react_flow - b.react_flow) * (a.react_flow - b.react_flow) + \
(a.this_node_pressure - b.this_node_pressure) * (a.this_node_pressure - b.this_node_pressure) + \
(a.that_node_pressure - b.that_node_pressure) * (a.that_voltage_square - b.that_voltage_square) + \
(a.gas_flow_in - b.gas_flow_in) * (a.gas_flow_in - b.gas_flow_in) + \
(a.gas_flow_out - b.gas_flow_out) * (a.gas_flow_out - b.gas_flow_out)
def sub_norm(a, b):
sum = 0
for i in range(len(g_connection)):
for j in range(len(g_connection[i])):
for k in range(T):
sum += sub_info(a[i][j][k], b[i][j][k])
return sum
def calculate_NE():
global g_lam
count_best_response = 0
old_info = 0
while count_best_response < 30:
old_info = copy(g_info)
for i, player in enumerate(g_players):
# get the data for the player i
player.update_model(g_tao) # 填充x_i 以及lam_i
player.optimize()
# update the lam_dual variable
g_lam = copy(g_playerN1.optimize(g_tao))
# update the response
if sub_norm(old_info, copy(g_info)) < 0.0001:
print(count_best_response + 1)
break
count_best_response = count_best_response + 1
def calculate_GNE(iijj):
outer_loop_count = 0
global result_plt
global result_plt1
global result_plt2
global result_plt3
global result_plt4
global result_plt5
global result_plt6
global result_plt7
result_plt = []
result_plt1 = []
result_plt2 = []
result_plt3 = []
result_plt4 = []
result_plt5 = []
result_plt6 = []
result_plt7 = []
global gap1
global gap2
gap1 = []
gap2 = []
while outer_loop_count < OUTER_LOOP[iijj]:
print(outer_loop_count)
# give xn, lam_n, calculate the equilibrium
calculate_NE()
# 现在我们得到了一个新的NE,我们应该把这个NE设为参照值
update_old_value()
outer_loop_count = outer_loop_count + 1
result_plt.append(g_info[0][0][0].this_voltage_square - g_info[1][0][0].this_voltage_square)
result_plt1.append(g_info[0][0][0].that_voltage_square - g_info[1][0][0].that_voltage_square)
result_plt2.append(g_info[0][0][0].power_flow - g_info[1][0][0].power_flow)
result_plt3.append(g_info[0][0][0].react_flow - g_info[1][0][0].react_flow)
result_plt4.append(g_info[0][0][0].this_node_pressure - g_info[1][0][0].this_node_pressure)
result_plt5.append(g_info[0][0][0].that_node_pressure - g_info[1][0][0].that_node_pressure)
result_plt6.append(g_info[0][0][0].gas_flow_in - g_info[1][0][0].gas_flow_in)
result_plt7.append(g_info[0][0][0].gas_flow_out - g_info[1][0][0].gas_flow_out)
if p1.cal_gap() > 1000:
gap1.append(-10)
else:
gap1.append(p1.cal_gap())
if p2.cal_gap() > 1000:
gap2.append(-10)
else:
gap2.append(p2.cal_gap())
plt.plot(result_plt, label='diff0')
plt.plot(result_plt1, label='diff1')
plt.plot(result_plt2, label='diff2')
plt.plot(result_plt3, label='diff3')
plt.plot(result_plt4, label='diff4')
plt.plot(result_plt5, label='diff5')
plt.plot(result_plt6, label='diff6')
plt.plot(result_plt7, label='diff7')
plt.legend(loc='best')
plt.savefig('diff' + str(iijj) + '.svg')
plt.cla()
plt.plot(gap1)
plt.plot(gap2)
plt.savefig('gap' + str(iijj) + '.svg')
plt.cla()
def start():
global g_info
global result_plt
global result_plt1
global result_plt2
global gap1
global gap2
result_plt = []
result_plt1 = []
result_plt2 = []
gap1 = []
gap2 = []
outer_loop_count = 2
for player in g_players:
player.build_model()
while outer_loop_count < OUTER_LOOP:
print(outer_loop_count)
calculate_NE()
update_old_value()
outer_loop_count = outer_loop_count + 1
result_plt.append(g_info[0][0][0].power_flow)
result_plt1.append(g_info[1][0][0].power_flow)
result_plt2.append(g_info[0][0][0].power_flow - g_info[1][0][0].power_flow)
g_info = [[[connection_line_info() for ____time in range(T)]
for ____line in range(len(g_connection[____area]))]
for ____area in range(len(g_connection))]
plt.plot(result_plt, label='0->1')
plt.plot(result_plt1, '-r', label='1->0')
plt.plot(result_plt2, '-g', label='diff')
plt.legend(loc='best')
plt.show()
def calculate_pccp():
global abcd
abcd = []
global G_K
G_K = 1.4
pccp_loop = 0
while pccp_loop < PCCP_COUNT:
pccp_loop = pccp_loop + 1
G_K = G_K * 1.4
calculate_GNE(pccp_loop)
for player in g_players:
player.update_outer_model()
print('player gap : ' + str(player.cal_gap()))
abcd.append([p1.cal_gap(), p2.cal_gap()])
plt.plot(abcd)
plt.show()
if __name__ == '__main__':
result_plt = []
result_plt1 = []
result_plt2 = []
result_plt3 = []
result_plt4 = []
result_plt5 = []
result_plt6 = []
result_plt7 = []
gap1 = []
gap2 = []
all_players = getPowerNet()
g_players = all_players[:player_num]
g_playerN1 = all_players[player_num]
[p1, p2] = g_players
pn = g_playerN1
for player_g in g_players:
player_g.build_model()
calculate_pccp()
pycharm_debug = 2
|
the-stack_0_15657 | from deepdab.ai import *
from deepdab import *
import tensorflow as tf
import numpy as np
class TDOneGradientPolicyCNNV2c(Policy):
"""
Adds padding to the initial convolutional layer, followed by max-pooling.
"""
def __init__(self, board_size):
self._sess = tf.Session()
self._board_size = board_size
edge_matrix = init_edge_matrix(board_size)
self._n_input_rows = edge_matrix.shape[0]
self._n_input_cols = edge_matrix.shape[1]
self._n_hidden = 300
self._n_output = 1
self._input = tf.placeholder("float", [self._n_input_rows, self._n_input_cols], name="input")
self._target = tf.placeholder("float", [1, self._n_output], name="target")
self._error = tf.placeholder("float", shape=[], name="error")
self._lr = tf.placeholder("float", shape=[], name="learning_rate")
self._sum_grad_W_in = tf.placeholder("float", shape=[3 * 3 * 12, self._n_hidden], name="sum_grad_W_in")
self._sum_grad_b_in = tf.placeholder("float", shape=[self._n_hidden], name="sum_grad_b_in")
self._sum_grad_W_out = tf.placeholder("float", shape=[self._n_hidden, 1], name="sum_grad_W_out")
self._sum_conv2d_kernel = tf.placeholder("float", shape=[3, 3, 1, 12], name="sum_conv2d_kernel")
self._sum_conv2d_bias = tf.placeholder("float", shape=[12], name="sum_conv2d_bias")
self._W_in = tf.Variable(tf.random_normal([3 * 3 * 12, self._n_hidden], 0.0, 0.1), name="W_in")
self._b_in = tf.Variable(tf.zeros([self._n_hidden]), name="b_in")
self._W_out = tf.Variable(tf.random_normal([self._n_hidden, self._n_output], 0.0, 0.1), name="W_out")
self._input_reshaped = tf.reshape(self._input, shape=[1, self._n_input_rows, self._n_input_cols, 1])
# Convolutional Layer
# Computes 12 features using a 3x3 filter with ReLU activation.
# Padding is added to preserve width and height.
# Input Tensor Shape (for the 2x2 board): [1, 5, 5, 1] (batch size, width, height, channels)
# Output Tensor Shape: [1, 5, 5, 12]
self._conv = tf.layers.conv2d(
inputs=self._input_reshaped,
filters=12,
kernel_size=[3, 3],
strides=(1, 1),
padding="same",
kernel_initializer=tf.random_normal_initializer(0.0, 0.1),
activation=tf.nn.relu)
# Pooling Layer
# Max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [1, 5, 5, 12]
# Output Tensor Shape: [1, 3, 3, 12]
self._pool = tf.layers.max_pooling2d(
inputs=self._conv,
pool_size=[3, 3],
strides=1)
self._conv_flat = tf.reshape(self._pool, [1, 3 * 3 * 12])
dense_layer = tf.nn.tanh(tf.matmul(self._conv_flat, self._W_in) + self._b_in)
self._prediction = tf.nn.sigmoid(tf.matmul(dense_layer, self._W_out))
self._conv2d_kernel = [v for v in tf.global_variables() if v.name == 'conv2d/kernel:0'][0]
self._conv2d_bias = [v for v in tf.global_variables() if v.name == 'conv2d/bias:0'][0]
self._gradients = tf.gradients(self._prediction, [self._W_in, self._b_in, self._W_out,
self._conv2d_kernel, self._conv2d_bias])
self._update_W_in = self._W_in.assign(self._W_in + self._lr * self._error * self._sum_grad_W_in)
self._update_b_in = self._b_in.assign(self._b_in + self._lr * self._error * self._sum_grad_b_in)
self._update_W_out = self._W_out.assign(self._W_out + self._lr * self._error * self._sum_grad_W_out)
self._update_conv2d_kernel = self._conv2d_kernel.assign(self._conv2d_kernel + self._lr * self._error * self._sum_conv2d_kernel)
self._update_conv2d_bias = self._conv2d_bias.assign(self._conv2d_bias + self._lr * self._error * self._sum_conv2d_bias)
self._sess.run(tf.global_variables_initializer())
self.reset_history_buffer()
def get_architecture(self):
return "5x5-conv(3x3, relu, 12)-maxpool(3x3)-tanh(300)-sigmoid(1)"
def reset_history_buffer(self):
self._prediction_buffer = []
self._prediction_gradient_buffer = []
def get_last_prediction(self):
if len(self._prediction_buffer) > 0:
return self._prediction_buffer[-1]
def get_last_prediction_gradient(self):
if len(self._prediction_gradient_buffer) > 0:
return self._prediction_gradient_buffer[-1]
def select_edge(self, board_state):
zero_indices = []
for i in range(len(board_state)):
if board_state[i] == 0:
zero_indices.append(i)
if random.random() < self._epsilon:
random_index = random.choice(zero_indices)
# store history
new_state = [x for x in board_state]
new_state[random_index] = 1
new_state = convert_board_state_to_edge_matrix(self._board_size, new_state)
new_state_value, gradients = self._sess.run([self._prediction, self._gradients],
feed_dict={self._input: new_state})
self._prediction_buffer.append(new_state_value[0][0])
self._prediction_gradient_buffer.append(gradients)
return random_index
else:
best_value = 0.0
best_value_gradient = None
best_state_index = zero_indices[0]
for zero_index in zero_indices:
new_state = [x for x in board_state]
new_state[zero_index] = 1
new_state = convert_board_state_to_edge_matrix(self._board_size, new_state)
new_state_value, gradients = self._sess.run([self._prediction, self._gradients],
feed_dict={self._input: new_state})
if new_state_value >= best_value:
best_value = new_state_value
best_value_gradient = gradients
best_state_index = zero_index
# store history
self._prediction_buffer.append(best_value[0][0])
self._prediction_gradient_buffer.append(best_value_gradient)
return best_state_index
def get_epsilon(self):
return self._epsilon
def set_epsilon(self, eps):
self._epsilon = eps
def get_learning_rate(self):
return self._learning_rate
def set_learning_rate(self, lr):
self._learning_rate = lr
def update(self, prediction_history, prediction_gradient_history):
if len(prediction_history) > 1:
error = prediction_history[-1] - prediction_history[-2]
sum_grad_W_in = np.sum(prediction_gradient_history[:-1], axis=0)[0]
sum_grad_b_in = np.sum(prediction_gradient_history[:-1], axis=0)[1]
sum_grad_W_out = np.sum(prediction_gradient_history[:-1], axis=0)[2]
sum_conv2d_kernel = np.sum(prediction_gradient_history[:-1], axis=0)[3]
sum_conv2d_bias = np.sum(prediction_gradient_history[:-1], axis=0)[4]
self._update_params(error, sum_grad_W_in, sum_grad_b_in, sum_grad_W_out, sum_conv2d_kernel, sum_conv2d_bias)
def update_terminal(self, prediction_history, prediction_gradient_history, target):
error = target - prediction_history[-1]
sum_grad_W_in = np.sum(prediction_gradient_history, axis=0)[0]
sum_grad_b_in = np.sum(prediction_gradient_history, axis=0)[1]
sum_grad_W_out = np.sum(prediction_gradient_history, axis=0)[2]
sum_conv2d_kernel = np.sum(prediction_gradient_history, axis=0)[3]
sum_conv2d_bias = np.sum(prediction_gradient_history, axis=0)[4]
self._update_params(error, sum_grad_W_in, sum_grad_b_in, sum_grad_W_out, sum_conv2d_kernel, sum_conv2d_bias)
def update_offline(self, prediction_history, prediction_gradient_history, target):
if len(prediction_history) > 0:
for i in range(1, len(prediction_history) + 1):
prev = prediction_history[i - 1]
last = prediction_history[i] if i < len(prediction_history) else target
error = last - prev
sum_grad_W_in = np.sum(prediction_gradient_history[:i], axis=0)[0]
sum_grad_b_in = np.sum(prediction_gradient_history[:i], axis=0)[1]
sum_grad_W_out = np.sum(prediction_gradient_history[:i], axis=0)[2]
sum_conv2d_kernel = np.sum(prediction_gradient_history[:i], axis=0)[3]
sum_conv2d_bias = np.sum(prediction_gradient_history[:i], axis=0)[4]
self._update_params(error, sum_grad_W_in, sum_grad_b_in, sum_grad_W_out, sum_conv2d_kernel, sum_conv2d_bias)
def _update_params(self, error, sum_grad_W_in, sum_grad_b_in, sum_grad_W_out, sum_conv2d_kernel, sum_conv2d_bias):
self._sess.run([self._update_W_in, self._update_b_in, self._update_W_out,
self._update_conv2d_kernel, self._update_conv2d_bias],
feed_dict={self._lr: self._learning_rate, self._error: error,
self._sum_grad_W_in: sum_grad_W_in, self._sum_grad_b_in: sum_grad_b_in,
self._sum_grad_W_out: sum_grad_W_out, self._sum_conv2d_kernel: sum_conv2d_kernel,
self._sum_conv2d_bias: sum_conv2d_bias})
def print_params(self, f):
params = self._sess.run([self._W_in])
f.write("W_in: %s\n" % params[0].tolist())
params = self._sess.run([self._b_in])
f.write("b_in: %s\n" % params[0].tolist())
params = self._sess.run([self._W_out])
f.write("W_out: %s\n" % params[0].tolist())
params = self._sess.run([self._conv2d_kernel])
f.write("conv2d_kernel: %s\n" % params[0].tolist())
params = self._sess.run([self._conv2d_bias])
f.write("conv2d_bias: %s\n" % params[0].tolist())
def print_gradients(self):
print(self._prediction_gradient_buffer)
|
the-stack_0_15660 | import numpy as np
import find_best_threshold as fbt
def random_booster(X, y, T):
"""
The function ``random_booster`` uses random thresholds and indices to train a
classifier. It performs ``T`` rounds of boosted decision stumps to classify
the data ``X``, which is an m-by-n matrix of m training examples of dimension n.
The returned parameters are ``theta``, the parameter vector in ``T`` dimensions,
the feature_inds, which are indices of the features (a ``T``-dimensional vector
taking values in ``{1, 2, ..., n}``), and ``thresholds``, which are real-valued
thresholds. The resulting classifier may be computed on an n-dimensional training
example.
``
theta' * sgn(x(feature_inds) - thresholds)
``
"""
rows, cols = X.shape
p_dist = np.ones(rows)
p_dist = p_dist / np.sum(p_dist)
thetas = np.zeros(T)
feature_indices = np.zeros(T, dtype='int')
thresholds = np.zeros(T)
# We use floor instead of ceil because we are indexing from zero,
# whereas MATLAB indexes from one.
for t in range(T):
index_t = int(np.floor(cols * np.random.random()))
threshold_t = X[int(np.floor(rows * np.random.random())), index_t] + 1e-8 * np.random.random()
Wplus = p_dist.T.dot(y * np.sign(X[:, index_t] - threshold_t) == 1)
Wminus = p_dist.T.dot(y * np.sign(X[:, index_t] - threshold_t) == -1)
theta_t = 0.5 * np.log(Wplus / Wminus)
thetas[t] = theta_t
feature_indices[t] = index_t
thresholds[t] = threshold_t
thresholds_per_example = np.repeat(thresholds[:(t+1)].T, rows).reshape((rows,t+1))
p_dist = np.exp(-y * (thetas[:(t+1)].T.dot(np.sign(X[:, feature_indices[:(t+1)]] - thresholds_per_example).T)))
p_dist = p_dist / np.sum(p_dist)
return (thetas, feature_indices, thresholds)
|
the-stack_0_15661 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="monk_gluon_cuda102", # Replace with your own username
version="0.0.1",
author="Tessellate Imaging",
author_email="[email protected]",
description="Monk Classification Library - Cuda102 - backends - mxnet-gluon",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Tessellate-Imaging/monk_v1",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Environment :: GPU :: NVIDIA CUDA :: 10.2",
],
install_requires=[
'scipy',
'scikit-learn',
'scikit-image',
'opencv-python',
'pillow==6.0.0',
'tqdm',
'gpustat',
'psutil',
'pandas',
'GPUtil',
'mxnet-cu102==1.6.0',
'gluoncv==0.6',
'torch==1.4.0',
'tabulate',
'netron',
'networkx',
'matplotlib',
'pylg',
'ipywidgets'
],
python_requires='>=3.6',
)
|
the-stack_0_15664 | import json
from code import EmotionModeltrainer, EmotionClassifier
from code import get_emotion_trainingdata, get_testdata, get_models, write_classification_report
# PROMPT TRAINING SETTINGS FROM USER
modelnr = input('What is the model number? (type number)\n')
teid = True if input('Do you want to extent the MELD with the TEID? (y/n)\n') == 'y' else False
classifier = input('Which classifier do you want to use? (naivebayes/svm)\n')
representations = input('What representations do you want to use? (embedding/bow/tfidf)\n')
frequency_threshold = int(input('What frequency threshold do you want to use? (type number)\n'))
if representations == 'embedding':
embedding_model = input('What embedding model do you want to use? (glove-wiki-gigaword-300/glove-twitter-200/word2vec-google-news-300)\n')
else:
embedding_model = None
if embedding_model == 'glove-twitter-200':
dimensions = 200
else:
dimensions = 300
stopwords = True if input('Do you want to exclude stopwords? (y/n)\n') == 'y' else False
balanced_data = True if input('Do you want to balance (over-sampling) the training data? (y/n)\n') == 'y' else False
# WRITE SETTINGS
settings = {'modelnr': modelnr, 'teid': teid, 'classifier': classifier, 'representations': representations, \
'frequency_threshold': frequency_threshold, 'embedding_model': embedding_model, 'dimensions': dimensions, \
'stopwords': stopwords, 'balance_data': balanced_data}
models_path = f"./models/emotion/{modelnr}"
settings_path = f"{models_path}/settings_{modelnr}.json"
with open(settings_path, 'w') as outfile:
json.dump(settings, outfile)
# TRAIN MODEL
print('Training model...')
training_texts, training_labels = get_emotion_trainingdata(MELD_path='./data/MELD/train_sent_emo.csv', TEID=teid, TEID_path='./data/TEID')
modeltrainer = EmotionModeltrainer(training_texts, training_labels, settings, models_path)
modeltrainer.run()
# GET TEST DATA
texts, labels, topics = get_testdata(filepath='./data/test_set.csv')
# OPTION TO GET BASELINE CLASSIFICATION REPORT
if input("Do you want to generate a baseline classification report before testing a model? (y/n)\n") == 'y':
baseline_result = ['neutral' for i in range(len(labels))]
write_classification_report(labels, baseline_result, './models/emotion/baseline_classification_report.csv')
# TEST MODEL
print('Testing model ...')
models = get_models(models_path, settings)
predictions = []
for text in texts:
emotion_classifier = EmotionClassifier(text, settings, models)
prediction = emotion_classifier.predict()
predictions.append(prediction)
write_classification_report(labels, predictions, f'{models_path}/classification_report.csv')
print(f"Classification report is saved in '{models_path}/classification_report.csv'!")
|
the-stack_0_15666 | # coding: utf-8
import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-info_screen',
version='0.10',
description='Django simple info screen application',
author='Olli-Pekka Puolitaival',
author_email='[email protected]',
url='https://github.com/OPpuolitaival/django-info_screen',
license='MIT',
long_description=README,
packages=['info_screen'],
install_requires=[
],
classifiers=[
'Framework :: Django',
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Engineering :: Visualization',
],
include_package_data=True,
)
|
the-stack_0_15667 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
### TODO
## a label can have multiple representative points (e.g., arrangement on a torus)
from __future__ import print_function
import numpy as np
import pandas as pd
import matplotlib as mpl
mpl.use('Agg')
import sys
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import training,datasets,iterators,Variable
from chainer.training import extensions
from chainer.dataset import dataset_mixin, convert, concat_examples
#from chainerui.extensions import CommandsExtension
from chainerui.utils import save_args
import os,glob,random,datetime,argparse
from consts import optim,dtypes
from arrangement import *
from cosshift import CosineShift
def plot_log(f,a,summary):
a.set_yscale('log')
# evaluator
class Evaluator(extensions.Evaluator):
name = "myval"
def __init__(self, *args, **kwargs):
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
params = kwargs.pop('params')
super(Evaluator, self).__init__(*args, **kwargs)
self.args = params['args']
self.hist_top_n = params['top_n']
self.val_ranking = sub_ranking(params['ranking'][:,1:], self.args.focus_labels) # remove ID column
self.val_hist = rank_hist(self.val_ranking,self.hist_top_n)
self.count = 0
def evaluate(self, save=False):
coords = self.get_target('coords')
if self.eval_hook:
self.eval_hook(self)
if(self.args.gpu>-1):
pdat = coords.xp.asnumpy(coords.W.data)
else:
pdat = coords.W.data
cinstance = pdat[self.args.nlabel:]
clabel = pdat[:self.args.nlabel]
if self.args.focus_labels is not None:
clabel = clabel[self.args.focus_labels]
ranking = reconst_ranking(cinstance,clabel)
acc = compare_rankings(ranking,self.val_ranking)
hist,err = estimate_vol(clabel,self.hist_top_n)
corr = np.corrcoef(hist.ravel(),self.val_hist.ravel())[0,1]
KL = symmetrisedKL(hist.ravel(),self.val_hist.ravel())
with open(os.path.join(self.args.outdir,"accuracy.txt"), 'a') as f:
print("accuracy: {}, corr: {}, KL: {} \n".format(acc,corr,KL), file=f)
self.count += 1
loss_radius = F.average(coords.W ** 2)
if self.args.save_evaluation or save:
np.savetxt(os.path.join(self.args.outdir,"labels{:0>4}.csv".format(self.count)), pdat[:self.args.nlabel], fmt='%1.5f', delimiter=",")
np.savetxt(os.path.join(self.args.outdir,"instances{:0>4}.csv".format(self.count)), cinstance, fmt='%1.5f', delimiter=",")
full_ranking = np.insert(ranking, 0, np.arange(self.args.ninstance), axis=1) ## add instance id
np.savetxt(os.path.join(self.args.outdir,"ranking{:0>4}.csv".format(self.count)), full_ranking, fmt='%d', delimiter=",")
#plot_arrangements(pdat[:self.args.nlabel],cinstance,fname=os.path.join(self.args.outdir,"count{:0>4}.jpg".format(self.count)),size=5)
save_plot(pdat[:self.args.nlabel],cinstance,os.path.join(self.args.outdir,"count{:0>4}.jpg".format(self.count)))
print("accuracy: {}, corr: {}, KL: {} \n".format(acc,corr,KL))
return {"myval/radius":loss_radius, "myval/corr": corr, "myval/acc1": acc[0], "myval/acc2": acc[1], "myval/accN": acc[-1], "myval/KL": KL}
## updater
class Updater(chainer.training.StandardUpdater):
def __init__(self, *args, **kwargs):
self.coords = kwargs.pop('models')
params = kwargs.pop('params')
super(Updater, self).__init__(*args, **kwargs)
self.args = params['args']
self.adjust_start = int(self.args.repel_start*self.args.epoch) # adjusting repel weight after this epoch
self.lambda_repel_instance = 0 # self.args.lambda_repel_instance
self.lambda_repel_label = self.args.lambda_repel_label
self.pairwise = params['pairwise']
def update_core(self):
opt = self.get_optimizer('opt')
batch = self.get_iterator('main').next()
# dat = self.converter(batch)
# print(pid)
pid,b1,b2 = self.pairwise[batch,0],self.pairwise[batch,1],self.pairwise[batch,2]
label = self.coords.W[:self.args.nlabel]
instance = self.coords.W[self.args.nlabel:]
xp = self.coords.xp
loss,loss_repel_b, loss_repel_p, loss_box = 0,0,0,0
# interpolation of repelling force among instances and among labels
if self.is_new_epoch and self.epoch>=self.adjust_start:
if self.adjust_start>=0:
t = (self.epoch-self.adjust_start) / (self.args.epoch-self.adjust_start) # [0,1]
self.lambda_repel_instance = self.args.lambda_repel_instance * np.cos(0.5*np.pi*(1-t)) # increase
self.lambda_repel_label = self.args.lambda_repel_label * np.cos(0.5*np.pi*(t)) # decrease
else:
self.lambda_repel_instance = self.args.lambda_repel_instance
self.lambda_repel_label = self.args.lambda_repel_label
chainer.report({'lambda_repel': self.lambda_repel_instance}, self.coords)
## order consistency loss
# arccos (spherical)
# dpos = F.arccos(F.sum(instance[pid]*label[b1],axis=1))
# dneg = F.arccos(F.sum(instance[pid]*label[b2],axis=1))
# dpos = -F.sum(instance[pid]*label[b1],axis=1)
# dneg = -F.sum(instance[pid]*label[b2],axis=1)
# loss_ord = F.average(F.relu(dpos-dneg+self.args.margin))
# Euclidean order consistency
loss_ord = F.triplet(instance[pid],label[b1],label[b2], margin=self.args.margin )
chainer.report({'loss_ord': loss_ord}, self.coords)
loss += self.args.lambda_ord * loss_ord
# repelling force among instances
if self.args.lambda_repel_instance>0:
p = np.random.choice(self.args.ninstance,min(self.args.batchsize,self.args.ninstance), replace=False)
# loss_repel_p = F.average((F.matmul(instance[p],instance[p],transb=True)+1)**2) # spherical
# loss_repel_p = F.average(F.relu(F.matmul(instance[p],instance[p],transb=True)-self.args.repel_margin))
dist_mat = F.sum((F.expand_dims(instance[p],axis=0) - F.expand_dims(instance[p],axis=1))**2,axis=2) # distance squared
loss_repel_p = F.average( xp.tri(len(p),k=-1)/(dist_mat+1e-6) ) # strictly lower triangular
chainer.report({'loss_p': loss_repel_p}, self.coords)
# repelling force among labels
if self.args.lambda_repel_label>0:
# loss_repel_b = F.average((F.matmul(label,label,transb=True)+1)**2)
# loss_repel_b = F.average(F.relu(F.matmul(label,label,transb=True)-self.args.repel_margin)) # spherical
dist_mat = F.sum((F.expand_dims(label,axis=0) - F.expand_dims(label,axis=1))**2,axis=2)
# dist_mat += self.args.nlabel*xp.eye(self.args.nlabel)
loss_repel_b = F.average( xp.tri(self.args.nlabel,k=-1)/(dist_mat+1e-6) )
chainer.report({'loss_b': loss_repel_b}, self.coords)
loss += self.lambda_repel_instance * loss_repel_p + self.lambda_repel_label * loss_repel_b
# loss_radius = F.average(self.instance.W ** 2)
# chainer.report({'loss_R': loss_radius}, self.instance)
## force from boundary
if self.args.lambda_ball>0: # coordinates should be in the unit ball
loss_domain = F.average(F.relu(F.sum(label**2, axis=1)-1)) # for labels
p = np.random.choice(self.args.ninstance,min(self.args.batchsize,self.args.ninstance), replace=False)
loss_domain += F.average(F.relu(F.sum(instance[p]**2, axis=1)-1)) # for instances
chainer.report({'loss_domain': loss_domain}, self.coords)
loss += self.args.lambda_ball * loss_domain
elif self.args.lambda_box>0: # coordinates should be in [-1, 1]
loss_domain = F.average(F.relu(label-1)+F.relu(-label-1)) # for labels
p = np.random.choice(self.args.ninstance,min(self.args.batchsize,self.args.ninstance), replace=False)
loss_domain += F.average(F.relu(instance[p]-1)+F.relu(-instance[p]-1)) # for randomly selected instances
chainer.report({'loss_domain': loss_domain}, self.coords)
loss += self.args.lambda_box * loss_domain
self.coords.cleargrads()
loss.backward()
opt.update(loss=loss)
## normalise to norm=1 for spherical
# self.coords.W.data /= xp.sqrt(xp.sum(self.coords.W.data**2,axis=1,keepdims=True))
## clip to the unit box
# self.coords.W.data = xp.clip(self.coords.W.data, -1 ,1)
def main():
# command line argument parsing
parser = argparse.ArgumentParser(description='Ranking learning')
parser.add_argument('train', help='Path to ranking csv file')
parser.add_argument('--val', default=None, help='Path to ranking csv file')
parser.add_argument('--label', '-l', help='Path to initial label coordinates csv file')
parser.add_argument('--instance', '-i', help='Path to initial point coordinates csv file')
parser.add_argument('--outdir', '-o', default='result', help='Directory to output the result')
#
parser.add_argument('--top_n', '-tn', type=int, default=99,
help='Use only top n rankings for each person')
parser.add_argument('--batchsize', '-bs', type=int, default=50,
help='Number of samples in each mini-batch')
parser.add_argument('--epoch', '-e', type=int, default=100,
help='Number of sweeps over the dataset to train')
parser.add_argument('--gpu', '-g', type=int, default=-1,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--dim', '-d', type=int, default=2,
help='Output dimension')
parser.add_argument('--margin', '-m', type=float, default=0.01,
help='margin to the hyperplane boundary')
parser.add_argument('--learning_rate', '-lr', type=float, default=1e-2,
help='learning rate')
parser.add_argument('--learning_rate_drop', '-ld', type=float, default=1,
help='how many times to half learning rate')
parser.add_argument('--learning_rate_annealing', '-la', type=str, choices=['cos','exp','none'], default='cos',
help='annealing strategy')
parser.add_argument('--optimizer', '-op',choices=optim.keys(),default='Adam',help='optimizer')
parser.add_argument('--lambda_ord', '-lo', type=float, default=10,
help='weight for order consistency')
parser.add_argument('--lambda_repel_instance', '-lri', type=float, default=1,
help='weight for repelling force between instances')
parser.add_argument('--lambda_repel_label', '-lrl', type=float, default=0,
help='weight for repelling force between labels')
parser.add_argument('--lambda_box', type=float, default=0,help='box domain containment loss')
parser.add_argument('--lambda_ball', '-lb', type=float, default=1,help='ball domain containment loss')
parser.add_argument('--repel_start', '-rs', type=float, default=0.3,
help='start increasing repelling weight after this times the total epochs')
# validation
parser.add_argument('--val_top_n', '-vtn', type=int, default=5,
help='Use only top n rankings for each person in the evaluation')
parser.add_argument('--focus_labels', '-fl', default=None, type=int, nargs="*", help='indices of focusing labels for validation')
parser.add_argument('--vis_freq', '-vf', type=int, default=10,
help='evaluation frequency in epochs')
parser.add_argument('--save_evaluation', '-se', action='store_true',help='output evaluation results')
parser.add_argument('--mpi', action='store_true',help='parallelise with MPI')
args = parser.parse_args()
args.outdir = os.path.join(args.outdir, datetime.datetime.now().strftime('%m%d_%H%M'))
chainer.config.autotune = True
## instance id should be 0,1,2,...,m-1
## label id should be 0,1,2,...,n-1
ranking = np.loadtxt(args.train,delimiter=",").astype(np.int32)
if args.val:
val_ranking = np.loadtxt(args.val,delimiter=",").astype(np.int32)
else:
val_ranking = ranking
pairwise_comparisons = make_pairwise_comparison(ranking, args.top_n)
args.nlabel = int(max(np.max(pairwise_comparisons[:,1]),np.max(pairwise_comparisons[:,2]))+1)
args.ninstance = int(np.max(pairwise_comparisons[:,0])+1)
if args.batchsize <= 0:
args.batchsize = min(pairwise_comparisons//100, 200)
## ChainerMN
if args.mpi:
import chainermn
if args.gpu >= 0:
comm = chainermn.create_communicator('hierarchical')
chainer.cuda.get_device(comm.intra_rank).use()
else:
comm = chainermn.create_communicator('naive')
if comm.rank == 0:
primary = True
print(args)
chainer.print_runtime_info()
else:
primary = False
#print("process {}".format(comm.rank))
else:
primary = True
print(args)
chainer.print_runtime_info()
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use()
if primary:
print("#labels {}, #instances {}, #ineq {}".format(args.nlabel,args.ninstance,len(pairwise_comparisons)))
save_args(args, args.outdir)
# if args.mpi:
# if comm.rank == 0:
# train = chainermn.scatter_dataset(train, comm, shuffle=True)
# else:
# train = chainermn.scatter_dataset(None, comm, shuffle=True)
#train_iter = chainermn.iterators.create_multi_node_iterator(iterators.SerialIterator(train, args.batchsize), comm)
train_iter = iterators.SerialIterator(range(len(pairwise_comparisons)), args.batchsize, shuffle=True)
## initialise the parameters
if args.label:
xb = np.loadtxt(args.label, delimiter=",")
#print("initial label coordinates loaded from: ", args.label)
elif args.lambda_box>0:
xb = random_from_box(args.dim,args.nlabel)
else:
xb = random_from_ball(args.dim,args.nlabel)
#xb = random_from_sphere(args.dim,args.nlabel, norm=0.9)
if args.instance:
xpl = np.loadtxt(args.instance, delimiter=",")
#print("initial instance coordinates loaded from: ", args.instance)
elif args.lambda_box>0:
xb = random_from_box(args.dim,args.ninstance)
else:
xpl = random_from_ball(args.dim,args.ninstance)
X = np.concatenate([xb,xpl])
# X /= np.sqrt(np.sum(X**2,axis=1,keepdims=True)) # spherical
coords = L.Parameter(X.astype(np.float32))
# Set up an optimizer
optimizer = optim[args.optimizer](args.learning_rate)
if args.mpi:
optimizer = chainermn.create_multi_node_optimizer(optimizer, comm)
optimizer.setup(coords)
if args.gpu >= 0:
coords.to_gpu()
updater = Updater(
models=coords,
iterator={'main': train_iter},
optimizer={'opt': optimizer},
device=args.gpu,
params={'args': args, 'pairwise': pairwise_comparisons}
)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.outdir)
if primary:
evaluator = Evaluator(train_iter, {'coords':coords}, params={'args': args, 'top_n': args.val_top_n, 'ranking': val_ranking}, device=args.gpu)
if args.vis_freq > 0:
trainer.extend(evaluator,trigger=(args.vis_freq, 'epoch'))
log_interval = max(50000//args.batchsize,10), 'iteration'
if extensions.PlotReport.available():
trainer.extend(extensions.PlotReport(['opt/loss_ord','opt/loss_p','opt/loss_b','opt/loss_domain','opt/loss_R'], #,'myval/radius'],
'epoch', file_name='loss.jpg',postprocess=plot_log))
trainer.extend(extensions.PlotReport(['myval/corr','myval/acc1','myval/acc2','myval/accN'],
'epoch', file_name='loss_val.jpg'))
trainer.extend(extensions.PlotReport(['myval/KL'],
'epoch', file_name='loss_val_KL.jpg'))
trainer.extend(extensions.PrintReport([
'epoch', 'lr','opt/loss_ord', 'opt/loss_p', 'opt/loss_b','opt/loss_domain','myval/corr', 'myval/acc1', 'myval/accN', 'myval/KL' #'elapsed_time', 'opt/lambda_repel',
]),trigger=log_interval)
trainer.extend(extensions.LogReport(trigger=(1, 'epoch')))
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.extend(extensions.observe_lr('opt'), trigger=(1, 'epoch'))
# trainer.extend(extensions.ParameterStatistics(coords))
## annealing
if args.learning_rate_annealing=='cos':
if args.optimizer in ['Adam','AdaBound','Eve']:
lr_target = 'eta'
else:
lr_target = 'lr'
trainer.extend(CosineShift(lr_target, args.epoch//args.learning_rate_drop, optimizer=optimizer), trigger=(1, 'epoch'))
elif args.learning_rate_annealing=='exp':
if args.optimizer in ['SGD','Momentum','CMomentum','AdaGrad','RMSprop','NesterovAG']:
trainer.extend(extensions.ExponentialShift('lr', 0.5, optimizer=optimizer), trigger=(args.epoch/args.learning_rate_drop, 'epoch'))
elif args.optimizer in ['Adam','AdaBound','Eve']:
trainer.extend(extensions.ExponentialShift("alpha", 0.5, optimizer=optimizer), trigger=(args.epoch/args.learning_rate_drop, 'epoch'))
with open(os.path.join(args.outdir,"args.txt"), 'w') as fh:
fh.write(" ".join(sys.argv))
trainer.run()
if primary:
evaluator.evaluate(save=True)
if __name__ == '__main__':
main()
|
the-stack_0_15671 | # -*- coding: utf-8 -*-
import logging
from discord.ext import commands
from handlers.calendar import Calendar, Event
from infra.manager import SecretManager
class EventMeBot(commands.Cog):
def __init__(self, bot_client):
self._bot = bot_client
self._subcommands = ['new']
self._cal_service = Calendar(SecretManager())
self.__logger = logging.getLogger(__name__)
@commands.group(name='ev', help='create a calendar event')
async def ev(self, ctx):
await ctx.channel.send(f'Subcommands: {", ".join(self._subcommands)}')
@ev.command(name='new')
async def new(self, ctx, name, start, end):
"""create a new event: -ev new <name> <date in format YYYYMMDDHHMM> <end in format XH or XM where H is hour and M is minutes>
Example: -ev new test 202104222230 1H
"""
event = None
try:
event = Event(name=name, start=start, end=end)
except ValueError as e:
self.__logger.error(f'Exception ocurred with the event: {e}')
await ctx.channel.send('Unrecognized event')
if not event:
return
try:
result = self._cal_service.create_event(event)
except Exception as e:
self.__logger.error(f'Exception ocurred: {e}')
await ctx.channel.send('Error creating event in calendar')
else:
await ctx.channel.send(f'Event created! see it in your calendar on {result["htmlLink"]}')
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
self.__logger.error(f'Something wrong happened in discord: {error}. Context: {ctx}')
def setup(client):
client.add_cog(EventMeBot(client))
|
the-stack_0_15672 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for unit-testing Keras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import numpy as np
from tensorflow.python import keras
from tensorflow.python import tf2
from tensorflow.python.eager import context
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.keras.optimizer_v2 import adadelta as adadelta_v2
from tensorflow.python.keras.optimizer_v2 import adagrad as adagrad_v2
from tensorflow.python.keras.optimizer_v2 import adam as adam_v2
from tensorflow.python.keras.optimizer_v2 import adamax as adamax_v2
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.python.keras.optimizer_v2 import nadam as nadam_v2
from tensorflow.python.keras.optimizer_v2 import rmsprop as rmsprop_v2
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
def get_test_data(train_samples,
test_samples,
input_shape,
num_classes,
random_seed=None):
"""Generates test data to train a model on.
Arguments:
train_samples: Integer, how many training samples to generate.
test_samples: Integer, how many test samples to generate.
input_shape: Tuple of integers, shape of the inputs.
num_classes: Integer, number of classes for the data and targets.
random_seed: Integer, random seed used by numpy to generate data.
Returns:
A tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
if random_seed is not None:
np.random.seed(random_seed)
num_sample = train_samples + test_samples
templates = 2 * num_classes * np.random.random((num_classes,) + input_shape)
y = np.random.randint(0, num_classes, size=(num_sample,))
x = np.zeros((num_sample,) + input_shape, dtype=np.float32)
for i in range(num_sample):
x[i] = templates[y[i]] + np.random.normal(loc=0, scale=1., size=input_shape)
return ((x[:train_samples], y[:train_samples]),
(x[train_samples:], y[train_samples:]))
@test_util.use_deterministic_cudnn
def layer_test(layer_cls, kwargs=None, input_shape=None, input_dtype=None,
input_data=None, expected_output=None,
expected_output_dtype=None, expected_output_shape=None,
validate_training=True, adapt_data=None):
"""Test routine for a layer with a single input and single output.
Arguments:
layer_cls: Layer class object.
kwargs: Optional dictionary of keyword arguments for instantiating the
layer.
input_shape: Input shape tuple.
input_dtype: Data type of the input data.
input_data: Numpy array of input data.
expected_output: Numpy array of the expected output.
expected_output_dtype: Data type expected for the output.
expected_output_shape: Shape tuple for the expected shape of the output.
validate_training: Whether to attempt to validate training on this layer.
This might be set to False for non-differentiable layers that output
string or integer values.
adapt_data: Optional data for an 'adapt' call. If None, adapt() will not
be tested for this layer. This is only relevant for PreprocessingLayers.
Returns:
The output data (Numpy array) returned by the layer, for additional
checks to be done by the calling code.
Raises:
ValueError: if `input_shape is None`.
"""
if input_data is None:
if input_shape is None:
raise ValueError('input_shape is None')
if not input_dtype:
input_dtype = 'float32'
input_data_shape = list(input_shape)
for i, e in enumerate(input_data_shape):
if e is None:
input_data_shape[i] = np.random.randint(1, 4)
input_data = 10 * np.random.random(input_data_shape)
if input_dtype[:5] == 'float':
input_data -= 0.5
input_data = input_data.astype(input_dtype)
elif input_shape is None:
input_shape = input_data.shape
if input_dtype is None:
input_dtype = input_data.dtype
if expected_output_dtype is None:
expected_output_dtype = input_dtype
# instantiation
kwargs = kwargs or {}
layer = layer_cls(**kwargs)
# Test adapt, if data was passed.
if adapt_data is not None:
layer.adapt(adapt_data)
# test get_weights , set_weights at layer level
weights = layer.get_weights()
layer.set_weights(weights)
# test and instantiation from weights
if 'weights' in tf_inspect.getargspec(layer_cls.__init__):
kwargs['weights'] = weights
layer = layer_cls(**kwargs)
# test in functional API
x = keras.layers.Input(shape=input_shape[1:], dtype=input_dtype)
y = layer(x)
if keras.backend.dtype(y) != expected_output_dtype:
raise AssertionError('When testing layer %s, for input %s, found output '
'dtype=%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__,
x,
keras.backend.dtype(y),
expected_output_dtype,
kwargs))
def assert_shapes_equal(expected, actual):
"""Asserts that the output shape from the layer matches the actual shape."""
if len(expected) != len(actual):
raise AssertionError(
'When testing layer %s, for input %s, found output_shape='
'%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__, x, actual, expected, kwargs))
for expected_dim, actual_dim in zip(expected, actual):
if isinstance(expected_dim, tensor_shape.Dimension):
expected_dim = expected_dim.value
if isinstance(actual_dim, tensor_shape.Dimension):
actual_dim = actual_dim.value
if expected_dim is not None and expected_dim != actual_dim:
raise AssertionError(
'When testing layer %s, for input %s, found output_shape='
'%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__, x, actual, expected, kwargs))
if expected_output_shape is not None:
assert_shapes_equal(tensor_shape.TensorShape(expected_output_shape),
y.shape)
# check shape inference
model = keras.models.Model(x, y)
computed_output_shape = tuple(
layer.compute_output_shape(
tensor_shape.TensorShape(input_shape)).as_list())
computed_output_signature = layer.compute_output_signature(
tensor_spec.TensorSpec(shape=input_shape, dtype=input_dtype))
actual_output = model.predict(input_data)
actual_output_shape = actual_output.shape
assert_shapes_equal(computed_output_shape, actual_output_shape)
assert_shapes_equal(computed_output_signature.shape, actual_output_shape)
if computed_output_signature.dtype != actual_output.dtype:
raise AssertionError(
'When testing layer %s, for input %s, found output_dtype='
'%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__, x, actual_output.dtype,
computed_output_signature.dtype, kwargs))
if expected_output is not None:
np.testing.assert_allclose(actual_output, expected_output,
rtol=1e-3, atol=1e-6)
# test serialization, weight setting at model level
model_config = model.get_config()
recovered_model = keras.models.Model.from_config(model_config)
if model.weights:
weights = model.get_weights()
recovered_model.set_weights(weights)
output = recovered_model.predict(input_data)
np.testing.assert_allclose(output, actual_output, rtol=1e-3, atol=1e-6)
# test training mode (e.g. useful for dropout tests)
# Rebuild the model to avoid the graph being reused between predict() and
# See b/120160788 for more details. This should be mitigated after 2.0.
if validate_training:
model = keras.models.Model(x, layer(x))
if _thread_local_data.run_eagerly is not None:
model.compile(
'rmsprop',
'mse',
weighted_metrics=['acc'],
run_eagerly=should_run_eagerly())
else:
model.compile('rmsprop', 'mse', weighted_metrics=['acc'])
model.train_on_batch(input_data, actual_output)
# test as first layer in Sequential API
layer_config = layer.get_config()
layer_config['batch_input_shape'] = input_shape
layer = layer.__class__.from_config(layer_config)
# Test adapt, if data was passed.
if adapt_data is not None:
layer.adapt(adapt_data)
model = keras.models.Sequential()
model.add(layer)
actual_output = model.predict(input_data)
actual_output_shape = actual_output.shape
for expected_dim, actual_dim in zip(computed_output_shape,
actual_output_shape):
if expected_dim is not None:
if expected_dim != actual_dim:
raise AssertionError(
'When testing layer %s **after deserialization**, '
'for input %s, found output_shape='
'%s but expected to find inferred shape %s.\nFull kwargs: %s' %
(layer_cls.__name__,
x,
actual_output_shape,
computed_output_shape,
kwargs))
if expected_output is not None:
np.testing.assert_allclose(actual_output, expected_output,
rtol=1e-3, atol=1e-6)
# test serialization, weight setting at model level
model_config = model.get_config()
recovered_model = keras.models.Sequential.from_config(model_config)
if model.weights:
weights = model.get_weights()
recovered_model.set_weights(weights)
output = recovered_model.predict(input_data)
np.testing.assert_allclose(output, actual_output, rtol=1e-3, atol=1e-6)
# for further checks in the caller function
return actual_output
_thread_local_data = threading.local()
_thread_local_data.model_type = None
_thread_local_data.run_eagerly = None
_thread_local_data.experimental_run_tf_function = None
@tf_contextlib.contextmanager
def model_type_scope(value):
"""Provides a scope within which the model type to test is equal to `value`.
The model type gets restored to its original value upon exiting the scope.
Arguments:
value: model type value
Yields:
The provided value.
"""
previous_value = _thread_local_data.model_type
try:
_thread_local_data.model_type = value
yield value
finally:
# Restore model type to initial value.
_thread_local_data.model_type = previous_value
@tf_contextlib.contextmanager
def run_eagerly_scope(value):
"""Provides a scope within which we compile models to run eagerly or not.
The boolean gets restored to its original value upon exiting the scope.
Arguments:
value: Bool specifying if we should run models eagerly in the active test.
Should be True or False.
Yields:
The provided value.
"""
previous_value = _thread_local_data.run_eagerly
try:
_thread_local_data.run_eagerly = value
yield value
finally:
# Restore model type to initial value.
_thread_local_data.run_eagerly = previous_value
def should_run_eagerly():
"""Returns whether the models we are testing should be run eagerly."""
if _thread_local_data.run_eagerly is None:
raise ValueError('Cannot call `should_run_eagerly()` outside of a '
'`run_eagerly_scope()` or `run_all_keras_modes` '
'decorator.')
return _thread_local_data.run_eagerly and context.executing_eagerly()
@tf_contextlib.contextmanager
def experimental_run_tf_function_scope(value):
"""Provides a scope within which we compile models to run with distribution.
The boolean gets restored to its original value upon exiting the scope.
Arguments:
value: Bool specifying if we should run models with default distribution
in the active test. Should be True or False.
Yields:
The provided value.
"""
previous_value = _thread_local_data.experimental_run_tf_function
try:
_thread_local_data.experimental_run_tf_function = value
yield value
finally:
# Restore model type to initial value.
_thread_local_data.experimental_run_tf_function = previous_value
def should_run_tf_function():
"""Returns whether the models we are testing should be run distributed."""
if _thread_local_data.experimental_run_tf_function is None:
raise ValueError(
'Cannot call `should_run_tf_function()` outside of a '
'`experimental_run_tf_function_scope()` or `run_all_keras_modes` '
'decorator.')
return (_thread_local_data.experimental_run_tf_function and
context.executing_eagerly())
def get_model_type():
"""Gets the model type that should be tested."""
if _thread_local_data.model_type is None:
raise ValueError('Cannot call `get_model_type()` outside of a '
'`model_type_scope()` or `run_with_all_model_types` '
'decorator.')
return _thread_local_data.model_type
def get_small_sequential_mlp(num_hidden, num_classes, input_dim=None):
model = keras.models.Sequential()
if input_dim:
model.add(keras.layers.Dense(num_hidden, activation='relu',
input_dim=input_dim))
else:
model.add(keras.layers.Dense(num_hidden, activation='relu'))
activation = 'sigmoid' if num_classes == 1 else 'softmax'
model.add(keras.layers.Dense(num_classes, activation=activation))
return model
def get_small_functional_mlp(num_hidden, num_classes, input_dim):
inputs = keras.Input(shape=(input_dim,))
outputs = keras.layers.Dense(num_hidden, activation='relu')(inputs)
activation = 'sigmoid' if num_classes == 1 else 'softmax'
outputs = keras.layers.Dense(num_classes, activation=activation)(outputs)
return keras.Model(inputs, outputs)
class _SmallSubclassMLP(keras.Model):
"""A subclass model based small MLP."""
def __init__(self, num_hidden, num_classes):
super(_SmallSubclassMLP, self).__init__()
self.layer_a = keras.layers.Dense(num_hidden, activation='relu')
activation = 'sigmoid' if num_classes == 1 else 'softmax'
self.layer_b = keras.layers.Dense(num_classes, activation=activation)
def call(self, inputs, **kwargs):
x = self.layer_a(inputs)
return self.layer_b(x)
class _SmallSubclassMLPCustomBuild(keras.Model):
"""A subclass model small MLP that uses a custom build method."""
def __init__(self, num_hidden, num_classes):
super(_SmallSubclassMLPCustomBuild, self).__init__()
self.layer_a = None
self.layer_b = None
self.num_hidden = num_hidden
self.num_classes = num_classes
def build(self, input_shape):
self.layer_a = keras.layers.Dense(self.num_hidden, activation='relu')
activation = 'sigmoid' if self.num_classes == 1 else 'softmax'
self.layer_b = keras.layers.Dense(self.num_classes, activation=activation)
def call(self, inputs, **kwargs):
x = self.layer_a(inputs)
return self.layer_b(x)
def get_small_subclass_mlp(num_hidden, num_classes):
return _SmallSubclassMLP(num_hidden, num_classes)
def get_small_subclass_mlp_with_custom_build(num_hidden, num_classes):
return _SmallSubclassMLPCustomBuild(num_hidden, num_classes)
def get_small_mlp(num_hidden, num_classes, input_dim):
"""Get a small mlp of the model type specified by `get_model_type`."""
model_type = get_model_type()
if model_type == 'subclass':
return get_small_subclass_mlp(num_hidden, num_classes)
if model_type == 'subclass_custom_build':
return get_small_subclass_mlp_with_custom_build(num_hidden, num_classes)
if model_type == 'sequential':
return get_small_sequential_mlp(num_hidden, num_classes, input_dim)
if model_type == 'functional':
return get_small_functional_mlp(num_hidden, num_classes, input_dim)
raise ValueError('Unknown model type {}'.format(model_type))
class _SubclassModel(keras.Model):
"""A Keras subclass model."""
def __init__(self, layers):
super(_SubclassModel, self).__init__()
# Note that clone and build doesn't support lists of layers in subclassed
# models. Adding each layer directly here.
for i, layer in enumerate(layers):
setattr(self, self._layer_name_for_i(i), layer)
self.num_layers = len(layers)
def _layer_name_for_i(self, i):
return 'layer{}'.format(i)
def call(self, inputs, **kwargs):
x = inputs
for i in range(self.num_layers):
layer = getattr(self, self._layer_name_for_i(i))
x = layer(x)
return x
class _SubclassModelCustomBuild(keras.Model):
"""A Keras subclass model that uses a custom build method."""
def __init__(self, layer_generating_func):
super(_SubclassModelCustomBuild, self).__init__()
self.all_layers = None
self._layer_generating_func = layer_generating_func
def build(self, input_shape):
layers = []
for layer in self._layer_generating_func():
layers.append(layer)
self.all_layers = layers
def call(self, inputs, **kwargs):
x = inputs
for layer in self.all_layers:
x = layer(x)
return x
def get_model_from_layers(layers, input_shape=None, input_dtype=None):
"""Builds a model from a sequence of layers."""
model_type = get_model_type()
if model_type == 'subclass':
return _SubclassModel(layers)
if model_type == 'subclass_custom_build':
layer_generating_func = lambda: layers
return _SubclassModelCustomBuild(layer_generating_func)
if model_type == 'sequential':
model = keras.models.Sequential()
if input_shape:
model.add(keras.layers.InputLayer(input_shape=input_shape,
dtype=input_dtype))
for layer in layers:
model.add(layer)
return model
if model_type == 'functional':
if not input_shape:
raise ValueError('Cannot create a functional model from layers with no '
'input shape.')
inputs = keras.Input(shape=input_shape, dtype=input_dtype)
outputs = inputs
for layer in layers:
outputs = layer(outputs)
return keras.Model(inputs, outputs)
raise ValueError('Unknown model type {}'.format(model_type))
class _MultiIOSubclassModel(keras.Model):
"""Multi IO Keras subclass model."""
def __init__(self, branch_a, branch_b, shared_input_branch=None,
shared_output_branch=None):
super(_MultiIOSubclassModel, self).__init__()
self._shared_input_branch = shared_input_branch
self._branch_a = branch_a
self._branch_b = branch_b
self._shared_output_branch = shared_output_branch
def call(self, inputs, **kwargs):
if self._shared_input_branch:
for layer in self._shared_input_branch:
inputs = layer(inputs)
a = inputs
b = inputs
else:
a, b = inputs
for layer in self._branch_a:
a = layer(a)
for layer in self._branch_b:
b = layer(b)
outs = [a, b]
if self._shared_output_branch:
for layer in self._shared_output_branch:
outs = layer(outs)
return outs
class _MultiIOSubclassModelCustomBuild(keras.Model):
"""Multi IO Keras subclass model that uses a custom build method."""
def __init__(self, branch_a_func, branch_b_func,
shared_input_branch_func=None,
shared_output_branch_func=None):
super(_MultiIOSubclassModelCustomBuild, self).__init__()
self._shared_input_branch_func = shared_input_branch_func
self._branch_a_func = branch_a_func
self._branch_b_func = branch_b_func
self._shared_output_branch_func = shared_output_branch_func
self._shared_input_branch = None
self._branch_a = None
self._branch_b = None
self._shared_output_branch = None
def build(self, input_shape):
if self._shared_input_branch_func():
self._shared_input_branch = self._shared_input_branch_func()
self._branch_a = self._branch_a_func()
self._branch_b = self._branch_b_func()
if self._shared_output_branch_func():
self._shared_output_branch = self._shared_output_branch_func()
def call(self, inputs, **kwargs):
if self._shared_input_branch:
for layer in self._shared_input_branch:
inputs = layer(inputs)
a = inputs
b = inputs
else:
a, b = inputs
for layer in self._branch_a:
a = layer(a)
for layer in self._branch_b:
b = layer(b)
outs = a, b
if self._shared_output_branch:
for layer in self._shared_output_branch:
outs = layer(outs)
return outs
def get_multi_io_model(
branch_a,
branch_b,
shared_input_branch=None,
shared_output_branch=None):
"""Builds a multi-io model that contains two branches.
The produced model will be of the type specified by `get_model_type`.
To build a two-input, two-output model:
Specify a list of layers for branch a and branch b, but do not specify any
shared input branch or shared output branch. The resulting model will apply
each branch to a different input, to produce two outputs.
The first value in branch_a must be the Keras 'Input' layer for branch a,
and the first value in branch_b must be the Keras 'Input' layer for
branch b.
example usage:
```
branch_a = [Input(shape=(2,), name='a'), Dense(), Dense()]
branch_b = [Input(shape=(3,), name='b'), Dense(), Dense()]
model = get_multi_io_model(branch_a, branch_b)
```
To build a two-input, one-output model:
Specify a list of layers for branch a and branch b, and specify a
shared output branch. The resulting model will apply
each branch to a different input. It will then apply the shared output
branch to a tuple containing the intermediate outputs of each branch,
to produce a single output. The first layer in the shared_output_branch
must be able to merge a tuple of two tensors.
The first value in branch_a must be the Keras 'Input' layer for branch a,
and the first value in branch_b must be the Keras 'Input' layer for
branch b.
example usage:
```
input_branch_a = [Input(shape=(2,), name='a'), Dense(), Dense()]
input_branch_b = [Input(shape=(3,), name='b'), Dense(), Dense()]
shared_output_branch = [Concatenate(), Dense(), Dense()]
model = get_multi_io_model(input_branch_a, input_branch_b,
shared_output_branch=shared_output_branch)
```
To build a one-input, two-output model:
Specify a list of layers for branch a and branch b, and specify a
shared input branch. The resulting model will take one input, and apply
the shared input branch to it. It will then respectively apply each branch
to that intermediate result in parallel, to produce two outputs.
The first value in the shared_input_branch must be the Keras 'Input' layer
for the whole model. Branch a and branch b should not contain any Input
layers.
example usage:
```
shared_input_branch = [Input(shape=(2,), name='in'), Dense(), Dense()]
output_branch_a = [Dense(), Dense()]
output_branch_b = [Dense(), Dense()]
model = get_multi_io_model(output__branch_a, output_branch_b,
shared_input_branch=shared_input_branch)
```
Args:
branch_a: A sequence of layers for branch a of the model.
branch_b: A sequence of layers for branch b of the model.
shared_input_branch: An optional sequence of layers to apply to a single
input, before applying both branches to that intermediate result. If set,
the model will take only one input instead of two. Defaults to None.
shared_output_branch: An optional sequence of layers to merge the
intermediate results produced by branch a and branch b. If set,
the model will produce only one output instead of two. Defaults to None.
Returns:
A multi-io model of the type specified by `get_model_type`, specified
by the different branches.
"""
# Extract the functional inputs from the layer lists
if shared_input_branch:
inputs = shared_input_branch[0]
shared_input_branch = shared_input_branch[1:]
else:
inputs = branch_a[0], branch_b[0]
branch_a = branch_a[1:]
branch_b = branch_b[1:]
model_type = get_model_type()
if model_type == 'subclass':
return _MultiIOSubclassModel(branch_a, branch_b, shared_input_branch,
shared_output_branch)
if model_type == 'subclass_custom_build':
return _MultiIOSubclassModelCustomBuild((lambda: branch_a),
(lambda: branch_b),
(lambda: shared_input_branch),
(lambda: shared_output_branch))
if model_type == 'sequential':
raise ValueError('Cannot use `get_multi_io_model` to construct '
'sequential models')
if model_type == 'functional':
if shared_input_branch:
a_and_b = inputs
for layer in shared_input_branch:
a_and_b = layer(a_and_b)
a = a_and_b
b = a_and_b
else:
a, b = inputs
for layer in branch_a:
a = layer(a)
for layer in branch_b:
b = layer(b)
outputs = a, b
if shared_output_branch:
for layer in shared_output_branch:
outputs = layer(outputs)
return keras.Model(inputs, outputs)
raise ValueError('Unknown model type {}'.format(model_type))
_V2_OPTIMIZER_MAP = {
'adadelta': adadelta_v2.Adadelta,
'adagrad': adagrad_v2.Adagrad,
'adam': adam_v2.Adam,
'adamax': adamax_v2.Adamax,
'nadam': nadam_v2.Nadam,
'rmsprop': rmsprop_v2.RMSprop,
'sgd': gradient_descent_v2.SGD
}
def get_v2_optimizer(name, **kwargs):
"""Get the v2 optimizer requested.
This is only necessary until v2 are the default, as we are testing in Eager,
and Eager + v1 optimizers fail tests. When we are in v2, the strings alone
should be sufficient, and this mapping can theoretically be removed.
Args:
name: string name of Keras v2 optimizer.
**kwargs: any kwargs to pass to the optimizer constructor.
Returns:
Initialized Keras v2 optimizer.
Raises:
ValueError: if an unknown name was passed.
"""
try:
return _V2_OPTIMIZER_MAP[name](**kwargs)
except KeyError:
raise ValueError(
'Could not find requested v2 optimizer: {}\nValid choices: {}'.format(
name, list(_V2_OPTIMIZER_MAP.keys())))
def get_expected_metric_variable_names(var_names, name_suffix=''):
"""Returns expected metric variable names given names and prefix/suffix."""
if tf2.enabled() or context.executing_eagerly():
# In V1 eager mode and V2 variable names are not made unique.
return [n + ':0' for n in var_names]
# In V1 graph mode variable names are made unique using a suffix.
return [n + name_suffix + ':0' for n in var_names]
|
the-stack_0_15673 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import print_function, unicode_literals
import frappe
no_cache = 1
def get_context(context):
if frappe.flags.in_migrate:
return
context.http_status_code = 500
try:
context["button_color"]=frappe.get_doc("Website Settings").button_color
except:
context["button_color"]="#2595ec"
if not context["button_color"]:
context["button_color"]="#2595ec"
print(frappe.get_traceback())
return {"error": frappe.get_traceback().replace("<", "<").replace(">", ">")}
|
the-stack_0_15677 | from typing import List
import databases
import sqlalchemy
from fastapi import FastAPI
from pydantic import BaseModel
# SQLAlchemy specific code, as with any other app
DATABASE_URL = "sqlite:///./test.db"
# DATABASE_URL = "postgresql://user:password@postgresserver/db"
database = databases.Database(DATABASE_URL)
metadata = sqlalchemy.MetaData()
notes = sqlalchemy.Table(
"notes",
metadata,
sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column("text", sqlalchemy.String),
sqlalchemy.Column("completed", sqlalchemy.Boolean),
)
engine = sqlalchemy.create_engine(
DATABASE_URL, connect_args={"check_same_thread": False}
)
metadata.create_all(engine)
class NoteIn(BaseModel):
text: str
completed: bool
class Note(BaseModel):
id: int
text: str
completed: bool
app = FastAPI()
@app.on_event("startup")
async def startup():
await database.connect()
@app.on_event("shutdown")
async def shutdown():
await database.disconnect()
@app.get("/notes/", response_model=List[Note])
async def read_notes():
query = notes.select()
return await database.fetch_all(query)
@app.post("/notes/", response_model=Note)
async def create_note(note: NoteIn):
query = notes.insert().values(text=note.text, completed=note.completed)
last_record_id = await database.execute(query)
return {**note.dict(), "id": last_record_id} |
the-stack_0_15679 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from inference_pass_test import InferencePassTest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.core import PassVersionChecker
from paddle.fluid.core import AnalysisConfig
class TRTNearestInterpTest(InferencePassTest):
def setUp(self):
self.set_params()
with fluid.program_guard(self.main_program, self.startup_program):
if self.data_layout == 'NCHW':
shape = [
-1, self.channels, self.origin_shape[0],
self.origin_shape[1]
]
else:
shape = [
-1, self.origin_shape[0], self.origin_shape[1],
self.channels
]
data = fluid.data(name='data', shape=shape, dtype='float32')
resize_out = self.append_nearest_interp(data)
out = fluid.layers.batch_norm(resize_out, is_test=True)
if self.data_layout == 'NCHW':
shape = [
self.bs, self.channels, self.origin_shape[0],
self.origin_shape[1]
]
else:
shape = [
self.bs, self.origin_shape[0], self.origin_shape[1],
self.channels
]
self.feeds = {'data': np.random.random(shape).astype('float32'), }
self.enable_trt = True
self.trt_parameters = TRTNearestInterpTest.TensorRTParam(
1 << 30, self.bs, 1, AnalysisConfig.Precision.Float32, False, False)
self.fetch_list = [out]
def set_params(self):
self.bs = 4
self.scale = 1
self.channels = 3
self.origin_shape = (32, 32) # HW
self.resize_shape = (64, 64) # HW
self.align_corners = True
self.data_layout = 'NCHW'
def append_nearest_interp(self, data):
if self.scale > 0.:
return fluid.layers.resize_nearest(
data,
scale=self.scale,
align_corners=self.align_corners,
data_format=self.data_layout)
return fluid.layers.resize_nearest(
data,
out_shape=self.resize_shape,
align_corners=self.align_corners,
data_format=self.data_layout)
def test_check_output(self):
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(use_gpu, flatten=True)
self.assertTrue(
PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))
class TRTNearestInterpTest1(TRTNearestInterpTest):
def set_params(self):
self.bs = 4
self.scale = -1
self.channels = 3
self.origin_shape = (32, 32) # HW
self.resize_shape = (64, 64) # HW
self.align_corners = True
self.data_layout = 'NCHW'
class TRTNearestInterpTest2(TRTNearestInterpTest):
def set_params(self):
self.bs = 4
self.scale = 2.
self.channels = 3
self.origin_shape = (32, 32) # HW
self.resize_shape = (64, 64) # HW
self.align_corners = False
self.data_layout = 'NCHW'
class TRTNearestInterpTest3(TRTNearestInterpTest):
def set_params(self):
self.bs = 4
self.scale = -1
self.channels = 3
self.origin_shape = (32, 32) # HW
self.resize_shape = (64, 64) # HW
self.align_corners = False
self.data_layout = 'NCHW'
class TRTNearestInterpTest4(TRTNearestInterpTest):
def set_params(self):
self.bs = 4
self.scale = -1
self.channels = 3
self.origin_shape = (32, 32) # HW
self.resize_shape = (47, 48) # HW
self.align_corners = False
self.data_layout = 'NCHW'
class TRTNearestInterpTest5(TRTNearestInterpTest):
def set_params(self):
self.bs = 4
self.scale = -1
self.channels = 3
self.origin_shape = (32, 32) # HW
self.resize_shape = (64, 64) # HW
self.align_corners = True
self.data_layout = 'NHWC'
class TRTNearestInterpTest6(TRTNearestInterpTest):
def set_params(self):
self.bs = 4
self.scale = 2.
self.channels = 3
self.origin_shape = (32, 32) # HW
self.resize_shape = (64, 64) # HW
self.align_corners = False
self.data_layout = 'NHWC'
class TRTNearestInterpTest7(TRTNearestInterpTest):
def set_params(self):
self.bs = 4
self.scale = -1
self.channels = 3
self.origin_shape = (32, 32) # HW
self.resize_shape = (64, 64) # HW
self.align_corners = False
self.data_layout = 'NHWC'
class TRTNearestInterpTest8(TRTNearestInterpTest):
def set_params(self):
self.bs = 4
self.scale = -1
self.channels = 3
self.origin_shape = (32, 32) # HW
self.resize_shape = (47, 48) # HW
self.align_corners = False
self.data_layout = 'NHWC'
class TRTNearestInterpTest9(TRTNearestInterpTest):
def set_params(self):
self.bs = 4
self.scale = -1
self.channels = 3
self.origin_shape = (32, 32) # HW
self.resize_shape = (47, 48) # HW
self.align_corners = False
self.data_layout = 'NHWC'
if __name__ == "__main__":
unittest.main()
|
the-stack_0_15680 | #from os import path
import tkinter as tk
from tkinter.filedialog import * # pour les gestions de fichiers
from PIL import Image as Img
from PIL import ImageTk
import lib.DataTool as DT
from tkinter import messagebox
def export_page(fileToExport):
"""
[Description]
Fonction permettant de générer la page Export data.
:param master: master se réfaire à la page parent
:return:
"""
if fileToExport == "vehicule":
DT.export_bdd(DT.dfv, "./data/CSV_export_vehicule.csv")
MsgboxText = """
/!\ La base de donnée {} a été exporté avec succès /!\
path:../Gestionnaire_Automonile/data/CSV_export_vehicule.csv
""".format(fileToExport)
messagebox.showwarning(title="Export base de donnée", message=MsgboxText)
elif fileToExport == "client":
DT.export_bdd(DT.dfc, "./data/CSV_export_client.csv")
MsgboxText = """
/!\ La base de donnée {} a été exporté avec succès /!\
path:../Gestionnaire_Automonile/data/CSV_export_client.csv
""".format(fileToExport)
messagebox.showwarning(title="Export base de donnée", message=MsgboxText)
elif fileToExport == "tarif":
DT.export_bdd(DT.dft, "./data/CSV_export_tarif.csv")
MsgboxText = """
/!\ La base de donnée {} a été exporté avec succès /!\
path:../Gestionnaire_Automonile/data/CSV_export_tarif.csv
""".format(fileToExport)
messagebox.showwarning(title="Export base de donnée", message=MsgboxText)
else:
pass
def import_page(master):
"""
[Description]
Fonction permettant de générer la page Import data.
:param master: master se réfaire à la page parent
:return:
"""
app = tk.Toplevel(master)
h = app.winfo_screenheight()
w = app.winfo_screenwidth()
screen = str(round(w*0.748)) +"x" + str(round(h*0.91)) + "+" + str(round(w*0.246)) + "+" + str(round(h*0.052))
app.geometry(screen)
app.attributes("-toolwindow", 1)# Supprime les boutons Réduire/Agrandir
app.transient(master)
app.resizable(False, False)
app.title("Import DataBase")
# - - - - - - - - - - - - - - - - - -
# Création de la fenêtre et des objets associés la fenêtre
# - - - - - - - - - - - - - - - - - -
file = tk.StringVar()
file.set("Pas de fichier pour l'instant")
# Création d'un Label nommé monAffichage
screen = tk.Label(app, textvariable=file, width=70)
screen.pack()
# Recherche de l'adresse du fichier-image voulu
filename = askopenfilename(title="Importer une Base de Donnée", filetypes=[('csv files', '.csv'), ('all files', '.*')])
# Mise à jour de monFichier
file.set(filename)
app.mainloop() |
the-stack_0_15682 | """Tests for instantiating new synchronized objects."""
# pylint: disable=unused-variable,singleton-comparison
from dataclasses import dataclass, field
from typing import Dict
from datafiles import Missing, datafile
from datafiles.utils import logbreak, write
from . import xfail_with_pep_563
@datafile("../tmp/sample.yml", manual=True)
class SampleWithDefaults:
foo: int = 1
bar: str = "a"
@dataclass
class NestedSample:
name: str
score: float
@datafile("../tmp/sample.yml", manual=True)
class SampleWithDefaultsAndNesting:
nested: NestedSample
name: str = ""
score: float = 0.0
@datafile("../tmp/sample.yml", manual=True)
class SampleWithFactoryDefaults:
a: float
b: float
c: float = field(default_factory=lambda: 42)
@datafile("../tmp/sample.yml", manual=True)
class SampleWithComputedDefaults:
a: float
b: float
c: float = field(init=False)
def __post_init__(self):
self.c = self.a + self.b
def describe_existing_file():
def it_wins_when_no_init_values(expect):
write(
"tmp/sample.yml",
"""
foo: 2
bar: b
""",
)
sample = SampleWithDefaults()
expect(sample.foo) == 2
expect(sample.bar) == "b"
def it_loses_against_init_values(expect):
write(
"tmp/sample.yml",
"""
foo: 3
bar: c
""",
)
sample = SampleWithDefaults(4, "d")
expect(sample.foo) == 4
expect(sample.bar) == "d"
def it_wins_against_default_init_values(expect):
write(
"tmp/sample.yml",
"""
bar: e
""",
)
sample = SampleWithDefaults(foo=5)
expect(sample.foo) == 5
expect(sample.bar) == "e"
def it_merges_with_nested_value(expect):
write(
"tmp/sample.yml",
"""
name: foo
score: 7
""",
)
sample = SampleWithDefaultsAndNesting(
name="", score=0.0, nested=NestedSample(name="bar", score=8)
)
expect(sample.name) == "foo"
expect(sample.score) == 7.0
expect(sample.nested.name) == "bar"
expect(sample.nested.score) == 8.0
def describe_nonexisting_file():
@datafile("../tmp/sample.yml")
class SampleAutomatic:
pass
SampleManual = SampleWithDefaults
def it_is_created_automatically_by_default(expect):
sample = SampleAutomatic()
expect(sample.datafile.exists).is_(True)
def it_is_not_created_automatically_when_manual(expect):
sample = SampleManual()
expect(sample.datafile.exists).is_(False)
def describe_factory_defaults():
def when_no_file(expect):
sample = SampleWithFactoryDefaults(1.2, 3.4)
expect(sample.a) == 1.2
expect(sample.b) == 3.4
expect(sample.c) == 42.0
def when_file_exists(expect):
write(
"tmp/sample.yml",
"""
a: 1.0
b: 2.0
c: 9.9
""",
)
sample = SampleWithFactoryDefaults(1.2, 3.4)
expect(sample.a) == 1.2
expect(sample.b) == 3.4
expect(sample.c) == 9.9
def describe_missing_attributes():
@xfail_with_pep_563
def when_dataclass(expect):
@dataclass
class Name:
value: str
@datafile("../tmp/samples/{self.key}.yml")
@dataclass
class Sample:
key: int
name: Name
value: float = 0.0
sample = Sample(42, Name("Widget"))
logbreak("Loading missing 'name' dataclass")
sample2 = Sample(42, Missing) # type: ignore
expect(sample2.name.value) == "Widget"
def with_none_defaults(expect):
@datafile("../tmp/sample.yml")
class Config:
name: str = None # type: ignore
channels: Dict[str, str] = None # type: ignore
config = Config.objects.get_or_create()
expect(config.name) == ""
expect(config.channels) == {}
expect(config.datafile.path.exists()) == True
|
the-stack_0_15685 | #!/usr/bin/env python3
"""
Retrieve datasets from Eurostat.
Allow for updates without checking metadata.
@author: giuseppeperonato
"""
import json
import logging
import os
import sys
import pandas as pd
import pandasdmx as sdmx
import requests
import utilities
# Constants
logging.basicConfig(level=logging.INFO)
ISRASTER = False
TRANSL = {"EL": "GR", "UK": "GB"}
QUERIES = {
6: dict(
provider="ESTAT",
stat_id="nrg_d_hhq",
dimensions=["SIEC", "NRG_BAL"],
filters={"UNIT": "GWH"},
),
9: dict(
provider="ESTAT",
stat_id="nrg_chddr2_m",
dimensions=["INDIC_NRG"],
filters={"UNIT": "NR"},
parameters={"startPeriod": 1970, "endPeriod": pd.Timestamp.today().year},
),
22: dict(
provider="ESTAT",
stat_id="nrg_ind_eff",
dimensions=["NRG_BAL"],
filters={"UNIT": "MTOE"},
),
42: dict(
provider="ESTAT", stat_id="cens_11dwob_r3", dimensions=["BUILDING", "HOUSING"]
),
47: dict(
provider="ESTAT",
stat_id="nrg_pc_204",
dimensions=["CONSOM", "TAX"],
filters={"UNIT": "KWH", "CURRENCY": "EUR"},
),
48: dict(
provider="ESTAT",
stat_id="nama_10_co3_p3",
dimensions=["COICOP"],
filters={"UNIT": "CP_MEUR", "COICOP": "CP045"},
),
49: dict(
provider="ESTAT",
stat_id="t2020_rd320",
dimensions=["SIEC"],
filters={"UNIT": "PC"},
),
50: dict(provider="ESTAT", stat_id="tgs00004", dimensions=[], filters={}),
}
QUERY_FIELDS = {
6: dict(
[],
), # empty list means all; None means do not use query fields.
9: dict(
[],
), # empty list means all; None means do not use query fields.
22: dict(
[],
), # empty list means all; None means do not use query fields.
42: dict(
[],
), # empty list means all; None means do not use query fields.
47: dict(
[],
), # empty list means all; None means do not use query fields.
48: dict(
[],
), # empty list means all; None means do not use query fields.
49: dict(
[],
), # empty list means all; None means do not use query fields.
50: dict(
[],
), # empty list means all; None means do not use query fields.
}
QUERY_PARAMETERS = {
6: {"temporal_granularity": "year", "is_tiled": False, "is_raster": False},
9: {
"temporal_granularity": "month",
"is_tiled": False,
"is_raster": False,
"levels": ["NUTS3", "NUTS2", "NUTS1", "country"],
},
22: {"temporal_granularity": "year", "is_tiled": False, "is_raster": False},
42: {
"temporal_granularity": "custom",
"is_tiled": False,
"is_raster": False,
"levels": ["NUTS3", "NUTS2", "NUTS1", "country"],
},
47: {"temporal_granularity": "semester", "is_tiled": False, "is_raster": False},
48: {"temporal_granularity": "year", "is_tiled": False, "is_raster": False},
49: {"temporal_granularity": "year", "is_tiled": False, "is_raster": False},
50: {"temporal_granularity": "year", "is_tiled": False, "is_raster": False},
}
DB_URL = utilities.DB_URL
def get(provider, stat_id, dimensions=[], filters={}, parameters={}):
"""
Query Euostat database.
Parameters
----------
eurostat_id : str
DESCRIPTION.
dimensions : list of string, optional
DESCRIPTION. Select dimensions to aggregate as variable name. The default is [].
filters : dictionary, optional
DESCRIPTION. Limit the queries to some values. The default is {}.
Returns
-------
enermaps_data : DataFrame
Data value following Enermaps schema.
"""
stat = sdmx.Request(provider)
metadata = stat.datastructure("DSD_{}".format(stat_id))
if len(filters) > 0:
if len(parameters) > 0:
dfs = []
for year in range(
int(parameters["startPeriod"]), int(parameters["endPeriod"]) + 1
):
logging.info("Retrieving year {}".format(year))
resp = stat.data(
stat_id,
key=filters,
params={"startPeriod": str(year), "endPeriod": str(year)},
)
data = resp.to_pandas()
if len(data) > 0:
dfs.append(data)
data = pd.concat(dfs, axis=0)
else:
resp = stat.data(stat_id, key=filters)
data = resp.to_pandas()
else:
try:
resp = stat.data(stat_id)
data = resp.to_pandas()
except requests.exceptions.HTTPError as e:
print(e)
if len(data) > 0:
# Remove multi-index
data = data.reset_index()
# Translate codes to names using metadata codelist
data_transl = data.copy()
if provider == "OECD":
pass
else:
for key in metadata.codelist.keys():
column = key.replace("CL_", "")
if column != "GEO":
try:
data_transl[column] = data_transl[column].replace(
sdmx.to_pandas(metadata.codelist[key]).to_dict()
)
except KeyError:
pass
# Remove lines with no values
data_transl = data_transl.dropna()
# Translate frequency to hours
if "FREQ" in data_transl.columns:
freq = {
"Daily": 24,
"Weekly": 168,
"Quarterly": 2190,
"Annual": 8760,
"Semi-annual": 4380,
"Monthly": 730,
"Half-year": 4380,
}
data_transl["FREQ"] = data_transl["FREQ"].replace(freq)
# Translate biannual strings to dates
data_transl["TIME_PERIOD"] = data_transl["TIME_PERIOD"].str.replace(
"B1", "01-01"
)
data_transl["TIME_PERIOD"] = data_transl["TIME_PERIOD"].str.replace(
"B2", "07-01"
)
# Create final EnerMaps data table
enermaps_data = pd.DataFrame(
columns=[
"start_at",
"fields",
"variable",
"value",
"ds_id",
"fid",
"dt",
"z",
"israster",
]
)
# Attribute the fields that remain the same
enermaps_data[["start_at", "dt", "fid", "unit", "value"]] = data_transl[
["TIME_PERIOD", "FREQ", "GEO", "UNIT", "value"]
]
# Aggregate the dimensions into a single variable
if len(dimensions) > 0:
enermaps_data["variable"] = data_transl[dimensions].agg(" : ".join, axis=1)
else:
enermaps_data["variable"] = "default"
# Year to datetime
enermaps_data["start_at"] = pd.to_datetime(enermaps_data["start_at"])
enermaps_data["israster"] = ISRASTER
# Country codes to ISO-3166
enermaps_data["fid"] = enermaps_data["fid"].replace(TRANSL)
return enermaps_data
else:
logging.error("No data returned.")
return None
if __name__ == "__main__":
datasets = pd.read_csv("datasets.csv", index_col=[0])
script_name = os.path.basename(sys.argv[0])
ds_ids, isForced = utilities.parser(script_name, datasets)
for ds_id in ds_ids:
logging.info(
"{} - {}".format(ds_id, datasets.loc[ds_id, "Title (with Hyperlink)"])
)
if utilities.datasetExists(
ds_id,
DB_URL,
):
if isForced:
utilities.removeDataset(ds_id, DB_URL)
logging.info("Removed existing dataset")
else:
logging.error("Dataset already existing. Use --force to replace it.")
if not utilities.datasetExists(
ds_id,
DB_URL,
):
data = get(**QUERIES[ds_id])
metadata = datasets.loc[ds_id].fillna("").to_dict()
# Add parameters as metadata
(
metadata["parameters"],
metadata["default_parameters"],
) = utilities.get_query_metadata(
data, QUERY_FIELDS[ds_id], QUERY_PARAMETERS[ds_id]
)
metadata = json.dumps(metadata)
dataset = pd.DataFrame(
[
{
"ds_id": ds_id,
"metadata": metadata,
"shared_id": datasets.loc[ds_id, "shared_id"],
}
]
)
utilities.toPostgreSQL(
dataset,
DB_URL,
schema="datasets",
)
data["ds_id"] = ds_id
data["israster"] = False
utilities.toPostgreSQL(
data,
DB_URL,
schema="data",
)
|
the-stack_0_15687 | """Code for bucketing annotations by time frame and document."""
import collections
import datetime
from urllib.parse import urlparse
import newrelic.agent
from pyramid import i18n
from h import links, presenters
_ = i18n.TranslationStringFactory(__package__)
class DocumentBucket:
def __init__(self, document, annotations=None):
self.annotations = []
self.tags = set()
self.users = set()
self.uri = None
self.title = document.title
presented_document = presenters.DocumentHTMLPresenter(document)
if presented_document.web_uri:
parsed = urlparse(presented_document.web_uri)
self.uri = parsed.geturl()
self.domain = parsed.netloc
else:
self.domain = _("Local file")
if annotations:
self.update(annotations)
@property
def annotations_count(self):
return len(self.annotations)
def incontext_link(self, request):
"""
Return a link to view this bucket's annotations in context.
The bouncer service and Hypothesis client do not currently provide
direct links to view a document's annotations without specifying a
specific annotation, so here we just link to the first annotation in the
document.
"""
if not len(self.annotations):
return None
return links.incontext_link(request, self.annotations[0])
def append(self, annotation):
self.annotations.append(annotation)
self.tags.update(set(annotation.tags))
self.users.add(annotation.userid)
def update(self, annotations):
for annotation in annotations:
self.append(annotation)
def __eq__(self, other):
return (
self.annotations == other.annotations
and self.tags == other.tags
and self.users == other.users
and self.uri == other.uri
and self.domain == other.domain
and self.title == other.title
)
class Timeframe:
"""
A timeframe into which annotations can be bucketed.
Any annotations that are added into a timeframe bucket will be further
bucketed by their documents, within the timeframe.
"""
def __init__(self, label, cutoff_time):
self.label = label
self.cutoff_time = cutoff_time
self.document_buckets = collections.OrderedDict()
@newrelic.agent.function_trace()
def append(self, annotation):
"""
Append an annotation to its document bucket in this timeframe.
This doesn't check whether the annotation's time is within this
timeframe, the caller is required to do that.
"""
document_bucket = self.document_buckets.get(annotation.document)
if document_bucket is None:
document_bucket = DocumentBucket(annotation.document)
self.document_buckets[annotation.document] = document_bucket
document_bucket.append(annotation)
def within_cutoff(self, annotation):
"""
Return True if annotation is within this timeframe's cutoff time.
Return ``True`` if the given annotation is newer than this timeframe's
cutoff time, meaning that the annotation can be bucketed into this
timeframe.
Return ``False`` if the given annotation is older than this timeframe's
cutoff time and the next timeframe needs to be generated in order to
bucket the annotation.
Note that this method returning ``True`` does not necessarily mean that
the annotation *should* be bucketed in this timeframe, since the
annotation may also be within the cutoff times of previous timeframes.
It's up to the caller to handle this.
"""
return annotation.updated >= self.cutoff_time
def __repr__(self):
return '{class_} "{label}" with {n} document buckets'.format(
class_=self.__class__, label=self.label, n=len(self.document_buckets)
)
class TimeframeGenerator:
def __init__(self):
self.timeframes = [
Timeframe(_("Last 7 days"), utcnow() - datetime.timedelta(days=7))
]
@newrelic.agent.function_trace()
def next(self, annotation):
"""
Return the next timeframe to be used for bucketing annotations.
:param annotation: the next annotation to be bucketed, the returned
timeframe is guaranteed to be the correct timeframe for this
annotation
"""
while self.timeframes:
timeframe = self.timeframes.pop(0)
if timeframe.within_cutoff(annotation):
return timeframe
cutoff_time = datetime.datetime(
year=annotation.updated.year, month=annotation.updated.month, day=1
)
timeframe = Timeframe(annotation.updated.strftime("%b %Y"), cutoff_time)
return timeframe
@newrelic.agent.function_trace()
def bucket(annotations):
"""
Return the given annotations bucketed by timeframe and document.
:param annotations: A chronologically-ordered list of annotations.
This list of annotations is assumed to be sorted most recently updated
annotation first, otherwise the bucketing algorithm will not return the
right results.
"""
if not annotations:
return []
generator = TimeframeGenerator()
timeframes = [generator.next(annotations[0])]
for annotation in annotations:
if not timeframes[-1].within_cutoff(annotation):
timeframes.append(generator.next(annotation))
timeframes[-1].append(annotation)
return timeframes
def utcnow():
return datetime.datetime.utcnow()
|
the-stack_0_15689 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Bot to find all pages on the wiki with mixed latin and cyrilic alphabets."""
#
# (C) Pywikibot team, 2006-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, print_function, unicode_literals
__version__ = '$Id: 1a58aab22d569f164b5ef034d9301f003cf2ab20 $'
import codecs
import os
import re
import sys
import pywikibot
from pywikibot import i18n
from pywikibot.data import api
from pywikibot.tools import first_lower, first_upper
from scripts.category import CategoryMoveRobot as CategoryMoveBot
if sys.version_info[0] > 2:
xrange = range
#
# Permutations code was taken from
# https://code.activestate.com/recipes/190465/
#
def xuniqueCombinations(items, n):
if n == 0:
yield []
else:
for i in xrange(len(items)):
for cc in xuniqueCombinations(items[i + 1:], n - 1):
yield [items[i]] + cc
# End of permutation code
#
#
# Windows Concole colors
# This code makes this script Windows ONLY!!!
# Feel free to adapt it to another platform
#
# Adapted from https://code.activestate.com/recipes/496901/
#
STD_OUTPUT_HANDLE = -11
FOREGROUND_BLUE = 0x01 # text color contains blue.
FOREGROUND_GREEN = 0x02 # text color contains green.
FOREGROUND_RED = 0x04 # text color contains red.
FOREGROUND_INTENSITY = 0x08 # text color is intensified.
BACKGROUND_BLUE = 0x10 # background color contains blue.
BACKGROUND_GREEN = 0x20 # background color contains green.
BACKGROUND_RED = 0x40 # background color contains red.
BACKGROUND_INTENSITY = 0x80 # background color is intensified.
FOREGROUND_WHITE = FOREGROUND_BLUE | FOREGROUND_GREEN | FOREGROUND_RED
try:
import ctypes
std_out_handle = ctypes.windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
except:
std_out_handle = None
def SetColor(color):
if std_out_handle:
try:
return ctypes.windll.kernel32.SetConsoleTextAttribute(
std_out_handle, color)
except:
pass
if color == FOREGROUND_BLUE:
print('(b:', end=' ')
if color == FOREGROUND_GREEN:
print('(g:', end=' ')
if color == FOREGROUND_RED:
print('(r:', end=' ')
# end of console code
class CaseChecker(object):
"""Case checker."""
# These words are always in one language, even though they could be typed
# in both
alwaysInLocal = [u'СССР', u'Как', u'как']
alwaysInLatin = [u'II', u'III']
localUpperLtr = u'ЁІЇЎАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯҐ'
localLowerLtr = u'ёіїўабвгдежзийклмнопрстуфхцчшщъыьэюяґ'
localLtr = localUpperLtr + localLowerLtr
localSuspects = u'АВЕКМНОРСТХІЁЇаеорсухіёї'
latinSuspects = u'ABEKMHOPCTXIËÏaeopcyxiëï'
# possibly try to fix one character mistypes in an alternative keyboard
# layout
localKeyboard = u'йцукенгшщзфывапролдячсмить'
latinKeyboard = u'qwertyuiopasdfghjklzxcvbnm'
romanNumChars = u'IVXLMC'
# all letters that may be used as suffixes after roman numbers: "Iый"
romannumSuffixes = localLowerLtr
romanNumSfxPtrn = re.compile(
u'^[' + romanNumChars + ']+[' + localLowerLtr + ']+$')
whitelists = {
'ru': u'ВП:КЛ/Проверенные',
}
latLtr = u'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
lclClrFnt = u'<font color=green>'
latClrFnt = u'<font color=brown>'
suffixClr = u'</font>'
wordBreaker = re.compile(r'[ _\-/\|#[\]():]')
stripChars = u' \t,'
titles = True
links = False
aplimit = None
apfrom = u''
title = None
replace = False
stopAfter = -1
wikilog = None
wikilogfile = 'wikilog.txt'
failedTitles = 'failedTitles.txt'
nosuggestions = 'nosuggestions.txt'
doFailed = False
titleList = None
autonomous = False
namespaces = []
filterredir = 'nonredirects'
def __init__(self):
for arg in pywikibot.handle_args():
if arg.startswith('-from'):
if arg.startswith('-from:'):
self.apfrom = arg[6:]
else:
self.apfrom = pywikibot.input(u'Which page to start from: ')
elif arg.startswith('-reqsize:'):
self.aplimit = int(arg[9:])
elif arg == '-links':
self.links = True
elif arg == '-linksonly':
self.links = True
self.titles = False
elif arg == '-replace':
self.replace = True
elif arg == '-redir':
self.filterredir = 'all'
elif arg == '-redironly':
self.filterredir = 'redirects'
elif arg.startswith('-limit:'):
self.stopAfter = int(arg[7:])
elif arg == '-autonomous' or arg == '-a':
self.autonomous = True
elif arg.startswith('-ns:'):
self.namespaces.append(int(arg[4:]))
elif arg.startswith('-wikilog:'):
self.wikilogfile = arg[9:]
elif arg.startswith('-failedlog:'):
self.failedTitles = arg[11:]
elif arg == '-failed':
self.doFailed = True
else:
pywikibot.output(u'Unknown argument %s.' % arg)
pywikibot.showHelp()
sys.exit()
if self.namespaces == [] and not self.doFailed:
if self.apfrom == u'':
# 0 should be after templates ns
self.namespaces = [14, 10, 12, 0]
else:
self.namespaces = [0]
if self.aplimit is None:
self.aplimit = 200 if self.links else 'max'
if not self.doFailed:
self.queryParams = {'action': 'query',
'generator': 'allpages',
'gaplimit': self.aplimit,
'gapfilterredir': self.filterredir}
else:
self.queryParams = {'action': 'query'}
if self.apfrom != u'':
pywikibot.output(u'Argument "-from" is ignored with "-failed"')
propParam = 'info'
if self.links:
propParam += '|links|categories'
self.queryParams['pllimit'] = 'max'
self.queryParams['cllimit'] = 'max'
self.queryParams['prop'] = propParam
self.site = pywikibot.Site()
if len(self.localSuspects) != len(self.latinSuspects):
raise ValueError(u'Suspects must be the same size')
if len(self.localKeyboard) != len(self.latinKeyboard):
raise ValueError(u'Keyboard info must be the same size')
if not os.path.isabs(self.wikilogfile):
self.wikilogfile = pywikibot.config.datafilepath(self.wikilogfile)
self.wikilog = self.OpenLogFile(self.wikilogfile)
if not os.path.isabs(self.failedTitles):
self.failedTitles = pywikibot.config.datafilepath(self.failedTitles)
if self.doFailed:
with codecs.open(self.failedTitles, 'r', 'utf-8') as f:
self.titleList = [self.Page(t) for t in f]
self.failedTitles += '.failed'
self.lclToLatDict = dict([(ord(self.localSuspects[i]),
self.latinSuspects[i])
for i in xrange(len(self.localSuspects))])
self.latToLclDict = dict([(ord(self.latinSuspects[i]),
self.localSuspects[i])
for i in xrange(len(self.localSuspects))])
if self.localKeyboard is not None:
self.lclToLatKeybDict = dict(
[(ord(self.localKeyboard[i]),
self.latinKeyboard[i])
for i in xrange(len(self.localKeyboard))])
self.latToLclKeybDict = dict(
[(ord(self.latinKeyboard[i]),
self.localKeyboard[i])
for i in xrange(len(self.localKeyboard))])
else:
self.lclToLatKeybDict = {}
self.latToLclKeybDict = {}
badPtrnStr = u'([%s][%s]|[%s][%s])' \
% (self.latLtr, self.localLtr, self.localLtr, self.latLtr)
self.badWordPtrn = re.compile(u'[%s%s]*%s[%s%s]*'
% (self.latLtr, self.localLtr,
badPtrnStr, self.latLtr,
self.localLtr))
# Get whitelist
self.knownWords = set()
self.seenUnresolvedLinks = set()
# TODO: handle "continue"
if self.site.code in self.whitelists:
wlpage = self.whitelists[self.site.code]
pywikibot.output(u'Loading whitelist from %s' % wlpage)
wlparams = {
'action': 'query',
'prop': 'links',
'titles': wlpage,
'redirects': '',
'indexpageids': '',
'pllimit': 'max',
}
req = api.Request(site=self.site, parameters=wlparams)
data = req.submit()
if len(data['query']['pageids']) == 1:
pageid = data['query']['pageids'][0]
links = data['query']['pages'][pageid]['links']
allWords = [nn for n in links
for nn in self.FindBadWords(n['title'])]
self.knownWords = set(allWords)
else:
raise ValueError(u'The number of pageids is not 1')
pywikibot.output(u'Loaded whitelist with %i items'
% len(self.knownWords))
if len(self.knownWords) > 0:
pywikibot.log(u'Whitelist: %s'
% u', '.join([self.MakeLink(i, False)
for i in self.knownWords]))
else:
pywikibot.output(u'Whitelist is not known for language %s'
% self.site.code)
def RunQuery(self, params):
while True:
# Get data
req = api.Request(**params)
data = req.submit()
# Process received data
yield data
# Clear any continuations first
if 'clcontinue' in params:
del params['clcontinue']
if 'plcontinue' in params:
del params['plcontinue']
if 'query-continue' not in data:
if 'gapcontinue' in params:
del params['gapcontinue']
break
qc = data['query-continue']
# First continue properties only, once done, continue with allpages
if 'categories' in qc or 'links' in qc:
if 'categories' in qc:
params.update(qc['categories'])
if 'links' in qc:
params.update(qc['links'])
elif 'allpages' in qc:
params.update(qc['allpages'])
else:
raise ValueError(u'Unexpected query-continue values: %s' % qc)
continue
def Run(self):
try:
self.lastLetter = ''
if not self.doFailed:
for namespace in self.namespaces:
self.currentTitle = None
self.queryParams['gapnamespace'] = namespace
self.queryParams['gapfrom'] = self.apfrom
for data in self.RunQuery(self.queryParams):
self.ProcessDataBlock(data)
else:
self.currentTitle = None
batchSize = 10
for batchStart in range(0, len(self.titleList), batchSize):
self.queryParams['titles'] = self.titleList[
batchStart:batchStart + batchSize]
for data in self.RunQuery(self.queryParams):
self.ProcessDataBlock(data)
except:
pywikibot.output(u'Exception at Title = %s, Next = %s'
% (self.currentTitle, self.apfrom))
try:
import traceback
pywikibot.output(traceback.format_exc())
except:
pywikibot.output(u'Unable to print exception info')
raise
def ProcessDataBlock(self, data):
if 'query' not in data or 'pages' not in data['query']:
return
firstItem = True
for pageID, page in data['query']['pages'].items():
printed = False
title = page['title']
self.currentTitle = title
if 'missing' in page:
continue
if firstItem:
if self.lastLetter != title[0]:
pywikibot.ui.output('Processing %s\n' % title)
self.lastLetter = title[0]
firstItem = False
if self.titles:
err = self.ProcessTitle(title)
if err:
changed = False
if self.replace:
if len(err[1]) == 1:
newTitle = err[1][0]
editSummary = i18n.twtranslate(
self.site, "casechecker-rename")
dst = self.Page(newTitle)
if 'redirect' in page:
src = self.Page(title)
redir = src.getRedirectTarget()
redirTitle = redir.title(asLink=True,
textlink=True)
if not dst.exists():
src.move(newTitle, editSummary,
movesubpages=True)
changed = True
replErrors = False
for p in src.getReferences(
follow_redirects=False):
if p.namespace() == 2:
continue
oldText = p.text
newText = self.ReplaceLink(oldText, title,
newTitle)
if not self.PutNewPage(
p, newText, [
self.MakeMoveSummary(title,
newTitle)]):
replErrors = True
if not replErrors:
editSummary = i18n.twtranslate(
self.site, "casechecker-delete-summary")
newText = i18n.twtranslate(
self.site,
"casechecker-delete-reason", redirTitle)
if newText:
src.text = u'{{delete}}\n\n' + newText
src.save(editSummary, minor=False)
changed = True
elif not dst.exists():
src = self.Page(title)
if page['ns'] == 14:
dst = self.Page(newTitle)
bot = CategoryMoveBot(
src.title(withNamespace=False),
dst.title(withNamespace=False),
self.autonomous,
editSummary + u' ' +
self.MakeMoveSummary(title, newTitle),
True)
bot.run()
else:
src.move(newTitle, editSummary,
movesubpages=True)
changed = True
if not changed:
if len(err[1]) > 0:
self.AppendLineToLog(self.failedTitles, title)
else:
self.AddNoSuggestionTitle(title)
self.WikiLog(u"* " + err[0])
printed = True
if self.links:
allLinks = None
if 'links' in page:
allLinks = page['links']
if 'categories' in page:
if allLinks:
allLinks = allLinks + page['categories']
else:
allLinks = page['categories']
if allLinks:
pageObj = None
pageTxt = None
msg = []
foundSuggestions = False
for l in allLinks:
ltxt = l['title']
err = self.ProcessTitle(ltxt)
if err:
if len(err[1]) > 0:
foundSuggestions = True
elif self.AddNoSuggestionTitle(ltxt):
continue
newTitle = None
if self.replace:
newTitle = self.PickTarget(title, ltxt, err[1])
if newTitle:
if pageObj is None:
pageObj = self.Page(title)
pageTxt = pageObj.get()
msg.append(self.MakeMoveSummary(ltxt,
newTitle))
pageTxt = self.ReplaceLink(pageTxt, ltxt,
newTitle)
if not newTitle:
if not printed:
self.WikiLog(u"* %s: link to %s"
% (self.MakeLink(title, False),
err[0]))
printed = True
else:
self.WikiLog(u"** link to %s" % err[0])
if pageObj is not None:
if self.PutNewPage(pageObj, pageTxt, msg):
# done, no need to log anything
foundSuggestions = False
if foundSuggestions:
self.AppendLineToLog(self.failedTitles, title)
if self.stopAfter > 0:
self.stopAfter -= 1
if self.stopAfter == 0:
raise ValueError(u'Stopping because we are done')
def WikiLog(self, text):
pywikibot.output(text)
self.wikilog.write(text + u'\n')
self.wikilog.flush()
def FindBadWords(self, title):
for m in self.badWordPtrn.finditer(title):
yield title[m.span()[0]:m.span()[1]]
def ProcessTitle(self, title):
badWords = list(self.FindBadWords(title))
if len(badWords) > 0:
# Allow known words, allow any roman numerals with local suffixes
badWords = set([i for i in badWords
if i not in self.knownWords and
self.romanNumSfxPtrn.match(i) is not None])
if len(badWords) == 0 or self.Page(title).isImage():
return
count = 0
ambigBadWords = set()
ambigBadWordsCount = 0
mapLcl = {}
mapLat = {}
for badWord in badWords:
# See if it would make sense to treat the whole word as either
# cyrilic or latin
mightBeLat = mightBeLcl = True
for l in badWord:
if l in self.localLtr:
if mightBeLat and l not in self.localSuspects:
mightBeLat = False
else:
if mightBeLcl and l not in self.latinSuspects:
mightBeLcl = False
if l not in self.latLtr:
raise ValueError(u'Assert failed')
# Some words are well known and frequently mixed-typed
if mightBeLcl and mightBeLat:
if badWord in self.alwaysInLocal:
mightBeLat = False
elif badWord in self.alwaysInLatin:
mightBeLcl = False
if mightBeLcl:
mapLcl[badWord] = badWord.translate(self.latToLclDict)
if mightBeLat:
mapLat[badWord] = badWord.translate(self.lclToLatDict)
if mightBeLcl and mightBeLat:
ambigBadWords.add(badWord)
# Cannot do len(ambigBadWords) because they might be duplicates
ambigBadWordsCount += 1
if not mightBeLcl and not mightBeLat:
# try to match one of the knownWords
bwLen = len(badWord)
kw = [w for w in self.knownWords if len(w) == bwLen]
for p in xrange(bwLen):
if len(kw) == 0:
break
c = badWord[p]
co = ord(c)
if co in self.latToLclDict:
c2 = self.latToLclDict[co]
elif co in self.lclToLatDict:
c2 = self.lclToLatDict[co]
else:
c2 = None
kw = [w for w in kw if p < len(w) and
(w[p] == c or (c2 is not None and w[p] == c2))]
if len(kw) > 1:
pywikibot.output(u"Word '%s' could be treated as more than "
u"one known words" % badWord)
elif len(kw) == 1:
mapLcl[badWord] = kw[0]
count += 1
infoText = self.MakeLink(title)
possibleAlternatives = []
if len(mapLcl) + len(mapLat) - ambigBadWordsCount < count:
# We cannot auto-translate - offer a list of suggested words
suggestions = list(mapLcl.values()) + list(mapLat.values())
if len(suggestions) > 0:
infoText += u", word suggestions: " + u', '.join(
[self.ColorCodeWord(t) for t in suggestions])
else:
infoText += u", no suggestions"
else:
# Replace all unambiguous bad words
for k, v in mapLat.items() + mapLcl.items():
if k not in ambigBadWords:
title = title.replace(k, v)
if len(ambigBadWords) == 0:
# There are no ambiguity, we can safelly convert
possibleAlternatives.append(title)
infoText += u", convert to " + self.MakeLink(title)
else:
# Try to pick 0, 1, 2, ..., len(ambiguous words) unique
# combinations from the bad words list, and convert just the
# picked words to cyrilic, whereas making all other words as
# latin character.
for itemCntToPick in xrange(0, len(ambigBadWords) + 1):
title2 = title
for uc in xuniqueCombinations(list(ambigBadWords),
itemCntToPick):
wordsToLat = ambigBadWords.copy()
for bw in uc:
title2 = title2.replace(bw, mapLcl[bw])
wordsToLat.remove(bw)
for bw in wordsToLat:
title2 = title2.replace(bw, mapLat[bw])
possibleAlternatives.append(title2)
if len(possibleAlternatives) > 0:
infoText += u", can be converted to " + u', '.join(
[self.MakeLink(t) for t in possibleAlternatives])
else:
infoText += u", no suggestions"
return (infoText, possibleAlternatives)
def PickTarget(self, title, original, candidates):
if len(candidates) == 0:
return
if len(candidates) == 1:
return candidates[0]
pagesDontExist = []
pagesRedir = {}
pagesExist = []
for newTitle in candidates:
dst = self.Page(newTitle)
if not dst.exists():
pagesDontExist.append(newTitle)
elif dst.isRedirectPage():
pagesRedir[newTitle] = dst.getRedirectTarget().title()
else:
pagesExist.append(newTitle)
if len(pagesExist) == 1:
return pagesExist[0]
elif len(pagesExist) == 0 and len(pagesRedir) > 0:
if len(pagesRedir) == 1:
return list(pagesRedir.keys())[0]
t = None
for v in pagesRedir.values():
if not t:
t = v # first item
elif t != v:
break
else:
# all redirects point to the same target
# pick the first one, doesn't matter what it is
return list(pagesRedir.keys())[0]
if not self.autonomous:
pywikibot.output(u'Could not auto-decide for page %s. Which link '
u'should be chosen?' % self.MakeLink(title, False))
pywikibot.output(u'Original title: ', newline=False)
self.ColorCodeWord(original + "\n", True)
count = 1
for t in candidates:
if t in pagesDontExist:
msg = u'missing'
elif t in pagesRedir:
msg = u'Redirect to ' + pagesRedir[t]
else:
msg = u'page exists'
self.ColorCodeWord(u' %d: %s (%s)\n' % (count, t, msg), True)
count += 1
answers = [('skip', 's')] + [(str(i), i) for i in range(1, count)]
choice = pywikibot.input_choice(u'Which link to choose?', answers)
if choice != 's':
return candidates[int(choice) - 1]
def ColorCodeWord(self, word, toScreen=False):
if not toScreen:
res = u"<b>"
lastIsCyr = word[0] in self.localLtr
if lastIsCyr:
if toScreen:
SetColor(FOREGROUND_GREEN)
else:
res += self.lclClrFnt
else:
if toScreen:
SetColor(FOREGROUND_RED)
else:
res += self.latClrFnt
for l in word:
if l in self.localLtr:
if not lastIsCyr:
if toScreen:
SetColor(FOREGROUND_GREEN)
else:
res += self.suffixClr + self.lclClrFnt
lastIsCyr = True
elif l in self.latLtr:
if lastIsCyr:
if toScreen:
SetColor(FOREGROUND_RED)
else:
res += self.suffixClr + self.latClrFnt
lastIsCyr = False
if toScreen:
pywikibot.output(l, newline=False)
else:
res += l
if toScreen:
SetColor(FOREGROUND_WHITE)
else:
return res + self.suffixClr + u"</b>"
def AddNoSuggestionTitle(self, title):
if title in self.seenUnresolvedLinks:
return True
self.seenUnresolvedLinks.add(title)
params = {
'action': 'query',
'list': 'backlinks',
'bltitle': title,
'bllimit': '50',
}
req = api.Request(site=self.site, parameters=params)
data = req.submit()
cl = 0
redirs = 0
if 'backlinks' in data['query']:
bl = data['query']['backlinks']
cl = len(bl)
redirs = len([i for i in bl if 'redirect' in i])
if cl > 0 and 'query-continue' in data:
count = '50+'
else:
count = str(cl if cl > 0 else 'no backlinks')
self.AppendLineToLog(self.nosuggestions, u'* %s (%s%s)'
% (self.MakeLink(title), count, u', %d redirects'
% redirs if redirs > 0 else u''))
return False
def PutNewPage(self, pageObj, pageTxt, msg):
title = pageObj.title(asLink=True, textlink=True)
coloredMsg = u', '.join([self.ColorCodeWord(m) for m in msg])
if pageObj.text == pageTxt:
self.WikiLog(u"* Error: Text replacement failed in %s (%s)"
% (self.MakeLink(title, False), coloredMsg))
else:
pywikibot.output(u'Case Replacements: %s' % u', '.join(msg))
pageObj.text = pageTxt
try:
pageObj.save(
u'%s: %s'
% (i18n.twtranslate(
self.site,
"casechecker-replacement-summary"),
self.site.mediawiki_message(u"comma-separator").join(msg)))
return True
except KeyboardInterrupt:
raise
except (pywikibot.LockedPage, pywikibot.PageNotSaved):
self.WikiLog(u"* Error: Could not save updated page %s (%s)"
% (self.MakeLink(title, False), coloredMsg))
return False
def MakeMoveSummary(self, fromTitle, toTitle):
return i18n.twtranslate(self.site, "casechecker-replacement-linklist",
{'source': fromTitle, 'target': toTitle})
def MakeLink(self, title, colorcode=True):
prf = u'' if self.Page(title).namespace() == 0 else u':'
cc = u'|««« %s »»»' % self.ColorCodeWord(title) if colorcode else u''
return u"[[%s%s%s]]" % (prf, title, cc)
def OpenLogFile(self, filename):
try:
return codecs.open(filename, 'a', 'utf-8')
except IOError:
return codecs.open(filename, 'w', 'utf-8')
def AppendLineToLog(self, filename, text):
with self.OpenLogFile(filename) as f:
f.write(text + u'\n')
def Page(self, title):
return pywikibot.Page(self.site, title)
def ReplaceLink(self, text, oldtxt, newtxt):
frmParts = [s.strip(self.stripChars)
for s in self.wordBreaker.split(oldtxt)]
toParts = [s.strip(self.stripChars)
for s in self.wordBreaker.split(newtxt)]
if len(frmParts) != len(toParts):
raise ValueError(u'Splitting parts do not match counts')
for i in xrange(0, len(frmParts)):
if len(frmParts[i]) != len(toParts[i]):
raise ValueError(u'Splitting parts do not match word length')
if len(frmParts[i]) > 0:
text = text.replace(first_lower(frmParts[i]), first_lower(toParts[i]))
text = text.replace(first_upper(frmParts[i]), first_upper(toParts[i]))
return text
if __name__ == "__main__":
bot = CaseChecker()
bot.Run()
|
the-stack_0_15696 | #!/usr/bin/env python
import rospy
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import NearestNeighbors # for KNN algorithm
from scipy.optimize import differential_evolution
import copy
import pandas as pd
from nav_msgs.msg import OccupancyGrid, MapMetaData
from tf.transformations import quaternion_from_euler
import rosbag
import rospkg
from map_matcher import send_map_ros_msg, rotate_map, ParticleFilterMapMatcher, likelihood, DEMapMatcher, RANSACMapMatcher, OccupancyGrid2LandmarksArray
rospack = rospkg.RosPack()
packadge_path = rospack.get_path('sequential_map_merging')
file_path = packadge_path + '/maps/map10v3.bag'
origin_publisher = rospy.Publisher('origin_map', OccupancyGrid, queue_size = 10)
global_publisher = rospy.Publisher('global_map', OccupancyGrid, queue_size = 10)
target_publisher = rospy.Publisher('target_map', OccupancyGrid, queue_size = 10)
if __name__ == '__main__':
bag = rosbag.Bag(file_path)
init, init1, init2 = 1, 1, 1
err_pf = []
err_de = []
rospy.init_node('offline_map_matcher')
for topic, msg, t in bag.read_messages(topics=['/ABot1/map', '/ABot2/map']):
if rospy.is_shutdown():
break
if topic == '/ABot1/map':
map1_msg = msg
landMarksArray1, landMarksArray1_empty = OccupancyGrid2LandmarksArray(map1_msg, filter_map = 1000)
scale1 = msg.info.resolution
if landMarksArray1 != "empty":
if init1 == 1:
cm1 = np.sum(np.transpose(landMarksArray1),axis=1)/len(landMarksArray1)
landMarksArray1 = landMarksArray1 - cm1
landMarksArray1_empty = landMarksArray1_empty - cm1
nbrs = NearestNeighbors(n_neighbors= 1, algorithm='kd_tree').fit(landMarksArray1)
nbrs_empty = NearestNeighbors(n_neighbors= 1, algorithm='kd_tree').fit(landMarksArray1_empty)
init1 = 0
else:
continue
if topic == '/ABot2/map':
map2_msg = msg
landMarksArray2, landMarksArray2_empty = OccupancyGrid2LandmarksArray(map2_msg)
scale2 = msg.info.resolution
if landMarksArray2 != "empty":
if init2 == 1:
cm2 = np.sum(np.transpose(landMarksArray2),axis=1)/len(landMarksArray2)
landMarksArray2 = landMarksArray2 - cm2
landMarksArray2_empty = landMarksArray2_empty - cm2
init2 = 0
else:
continue
if init == 1 and init1 == 0 and init2 == 0:
model = ParticleFilterMapMatcher(nbrs, landMarksArray2, Np = 1000)
#X_de = DEMapMatcher(nbrs, landMarksArray2)
init = 0
elif init == 0 and init1 == 0 and init2 == 0:
model.predict()
#model.update(landMarksArray2, nbrs, nbrs_empty, scale1)
model.update(landMarksArray2, nbrs, origin_empty_map_nbrs=None , res = scale1)
#X_de = DEMapMatcher(nbrs, landMarksArray2, X_de)
#X_ransac = RANSACMapMatcher(landMarksArray1, landMarksArray2)
if model.indicate == model.N_history:
model.resample()
X_pf = model.refinement(landMarksArray2, nbrs, res = scale1, Np = 2000)
print(X_pf)
rotated_map = rotate_map(landMarksArray2, X_pf)
rotated_empty_map = rotate_map(landMarksArray2_empty, X_pf)
estimated_global_map = np.concatenate([landMarksArray1,rotated_map], axis=0)
estimated_global_empty_map = np.concatenate([landMarksArray1_empty,rotated_empty_map], axis=0)
send_map_ros_msg(estimated_global_map, estimated_global_empty_map, global_publisher,frame_id='pf_map', resolution=scale1)
send_map_ros_msg(landMarksArray1, landMarksArray1_empty, origin_publisher,frame_id='/robot1/map', resolution=scale1)
send_map_ros_msg(landMarksArray2,landMarksArray2_empty , target_publisher,frame_id='/robot2/map', resolution=scale2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.