id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
50399
|
from mmcv.runner import checkpoint
from mmdet.apis.inference import init_detector,LoadImage, inference_detector
import easymd
config = 'config.py'
#checkpoints = './checkpoints/pseg_r101_r50_latest.pth'
checkpoints = "path/to/pth"
img = '000000322864.jpg'
results = {
'img': './datasets/coco/val2017/'+img
}
model = init_detector(config,checkpoint=checkpoints)
results = inference_detector(model,'./datasets/coco/val2017/'+img)
|
50448
|
import argparse
from pathlib import Path
import tarfile
from zipfile import ZipFile
from scanf import scanf
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('tarball_output')
parser.add_argument('tex_files',nargs='*')
args = parser.parse_args()
files = []
for tex_file in args.tex_files:
tex_file_path = Path(tex_file)
if tex_file_path.exists():
files.append(str(tex_file_path))
pdf_path = tex_file_path.with_suffix('.pdf')
if pdf_path.exists() and str(pdf_path) not in files:
files.append(str(pdf_path))
# parse dep_file generated with \RequirePackage{snapshot}
dep_file = tex_file_path.with_suffix('.dep')
if dep_file.exists():
with open(str(dep_file),'r') as f:
for line in f:
if '*{file}' not in line:
continue
match = scanf('*{file} {%s}{0000/00/00 v0.0}', line, collapseWhitespace=True)
if match is None:
alt = scanf('*{file} {%s} {0000/00/00 v0.0}', line, collapseWhitespace=True)
if alt is None:
alt2 = scanf('*{file} {%s}{Graphic v0.0}', line, collapseWhitespace=True)
if alt2 is None:
alt3 = scanf('*{file} {%s} {Graphic v0.0}', line, collapseWhitespace=True)
if alt3 is None:
continue
else:
match = alt3
else:
match = alt2
else:
match = alt
filename, = match
path = Path(filename)
if path.suffix in ['.png','.pdf','.tex','.bbl','.cls'] and path.exists():
if str(path) not in files:
files.append(str(path))
print("FILES IN TARBALL:\n")
for myfile in files:
print(myfile)
# make tarball from files
output_path = Path(args.tarball_output)
if output_path.suffix == '.gz':
with tarfile.open(args.tarball_output, 'w:gz', dereference=True) as tar:
for this_file in files:
tar.add(this_file)
elif output_path.suffix == '.zip':
with ZipFile(args.tarball_output, 'w') as myzip:
for this_file in files:
myzip.write(this_file)
else:
Exception('unrecognized output suffix')
|
50458
|
import numpy as np
from arbol import aprint
from dexp.processing.utils.scatter_gather_i2v import scatter_gather_i2v
from dexp.utils.backends import Backend
from dexp.utils.testing.testing import execute_both_backends
from dexp.utils.timeit import timeit
@execute_both_backends
def test_scatter_gather_i2v(ndim=3, length_xy=128, splits=4):
xp = Backend.get_xp_module()
rng = np.random.default_rng()
image1 = rng.uniform(0, 1, size=(length_xy,) * ndim)
image2 = rng.uniform(0, 1, size=(length_xy,) * ndim)
def f(x, y):
return xp.stack([x.min(), x.max()]), xp.stack([y.max(), y.mean(), y.min()])
with timeit("scatter_gather(f)"):
chunks = (length_xy // splits,) * ndim
result1, result2 = scatter_gather_i2v(f, (image1, image2), tiles=chunks, margins=8)
assert result1.ndim == ndim + 1
assert result2.ndim == ndim + 1
assert result1.shape[:-1] == result2.shape[:-1]
assert result1.shape[-1] == 2
assert result2.shape[-1] == 3
result1 -= (0, 1) # expected stats from uniform distribution
result1 = Backend.to_numpy(result1)
error = np.linalg.norm(result1.ravel(), ord=1) / result1.size
aprint(f"Error = {error}")
assert error < 0.001
result2 -= (1, 0.5, 0) # expected stats from uniform distribution
result2 = Backend.to_numpy(result2)
error = np.linalg.norm(result2.ravel(), ord=1) / result2.size
aprint(f"Error = {error}")
assert error < 0.001
|
50540
|
import os
from keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D
from keras.models import Model
from keras import backend as K
def model():
input_img = Input(shape=(6, 20, 20))
x = Conv2D(filters = 32, kernel_size = (3, 3), strides = (1,1), padding = 'same', activation='relu')(input_img)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(16, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
encoded = MaxPooling2D((2, 2), padding='same')(x)
# at this point the representation is (4, 4, 8) i.e. 128-dimensional
x = Conv2D(8, (3, 3), activation='relu', padding='same')(encoded)
x = UpSampling2D((2, 2))(x)
x = Conv2D(16, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(32, (3, 3), activation='relu')(x)
x = UpSampling2D((2, 2))(x)
decoded = Conv2D(6, (3, 3), activation='sigmoid', padding='same')(x)
autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer='Adam', loss='mean_squared_error')
return autoencoder, encoded, decoded
|
50568
|
import time
from celery import chain
from celery_app import app
@app.task
def add(x, y):
return x + y
'''
ref. http://docs.celeryq.org/en/latest/userguide/tasks.html#avoid-launching-synchronous-subtasks
'''
def chain_demo(x, y):
# add_demo -> mul_demo -> insert_db_demo
chain(add_demo.s(x, y), mul_demo.s(10), insert_db_demo.s())()
@app.task
def add_demo(x, y):
time.sleep(3)
return x + y
@app.task
def mul_demo(x, y):
time.sleep(3)
return x * y
@app.task(ignore_result=True)
def insert_db_demo(result):
print('insert db , result {}'.format(result))
|
50580
|
import numpy as np
X = 2 * np.random.randn(100, 5)
y = 2.5382 * np.cos(X[:, 3]) + X[:, 0] ** 2 - 0.5
from pysr import PySRRegressor
model = PySRRegressor(
niterations=40,
binary_operators=["+", "*"],
unary_operators=[
"cos",
"exp",
"sin",
"inv(x) = 1/x", # Custom operator (julia syntax)
],
model_selection="best",
loss="loss(x, y) = (x - y)^2", # Custom loss function (julia syntax)
)
model.fit(X, y)
print(model)
|
50591
|
import unittest
import os
import warnings
from ml4ir.base.data import ranklib_helper
import pandas as pd
warnings.filterwarnings("ignore")
INPUT_FILE = "ml4ir/applications/ranking/tests/data/ranklib/train/sample.txt"
OUTPUT_FILE = "ml4ir/applications/ranking/tests/data/ranklib/train/sample_ml4ir.csv"
QUERY_ID_NAME = 'qid'
RELEVANCE_NAME = 'relevance'
KEEP_ADDITIONAL_INFO = 1
GL_2_CLICKS = 1
NON_ZERO_FEATURES_ONLY = 0
class TestRanklibConversion(unittest.TestCase):
def setUp(self):
pass
def test_conversion(self):
"""Convert ranklib dataset to a csv"""
ranklib_helper.ranklib_to_csv(INPUT_FILE, OUTPUT_FILE, KEEP_ADDITIONAL_INFO,
GL_2_CLICKS, NON_ZERO_FEATURES_ONLY, QUERY_ID_NAME, RELEVANCE_NAME)
df = pd.read_csv(OUTPUT_FILE)
assert QUERY_ID_NAME in df.columns and RELEVANCE_NAME in df.columns
assert df[QUERY_ID_NAME].nunique() == 49
if KEEP_ADDITIONAL_INFO == 1:
assert len(df.columns) >= 138
else:
assert len(df.columns) == 138
if GL_2_CLICKS == 1:
assert sorted(list(df[RELEVANCE_NAME].unique())) == [0, 1]
def tearDown(self):
# Delete output file
os.remove(OUTPUT_FILE)
if __name__ == "__main__":
unittest.main()
|
50626
|
from django.apps import AppConfig
class OpenbookAuthConfig(AppConfig):
name = 'openbook_auth'
|
50627
|
from typing import Dict
from lightbus.exceptions import (
UnknownApi,
InvalidApiRegistryEntry,
EventNotFound,
MisconfiguredApiOptions,
InvalidApiEventConfiguration,
)
__all__ = ["Api", "Event"]
class ApiRegistry:
def __init__(self):
self._apis: Dict[str, Api] = dict()
def add(self, api: "Api"):
if isinstance(api, type):
raise InvalidApiRegistryEntry(
"An attempt was made to add a type to the API registry. This "
"is probably because you are trying to add the API class, rather "
"than an instance of the API class.\n"
"\n"
"Use bus.client.register_api(MyApi()), rather than bus.client.register_api(MyApi)"
)
self._apis[api.meta.name] = api
def get(self, name) -> "Api":
try:
return self._apis[name]
except KeyError:
raise UnknownApi(
"An API named '{}' was requested from the registry but the "
"registry does not recognise it. Maybe the incorrect API name "
"was specified, or maybe the API has not been registered.".format(name)
)
def remove(self, name) -> None:
try:
del self._apis[name]
except KeyError:
raise UnknownApi(
"An attempt was made to remove an API named '{}' from the registry, but the API "
"could not be found. Maybe the incorrect API name "
"was specified, or maybe the API has not been registered.".format(name)
)
def public(self):
return [api for api in self._apis.values() if not api.meta.internal]
def internal(self):
return [api for api in self._apis.values() if api.meta.internal]
def all(self):
return list(self._apis.values())
def names(self):
return list(self._apis.keys())
class ApiOptions:
name: str
internal: bool = False
version: int = 1
def __init__(self, options):
for k, v in options.items():
if not k.startswith("_"):
setattr(self, k, v)
class ApiMetaclass(type):
""" API Metaclass
Validates options in the API's Meta class and populates the
API class' `meta` attribute.
"""
def __init__(cls, name, bases=None, dict_=None):
is_api_base_class = name == "Api" and not bases
if is_api_base_class:
super(ApiMetaclass, cls).__init__(name, bases, dict_)
else:
options = dict_.get("Meta", None)
if options is None:
raise MisconfiguredApiOptions(
f"API class {name} does not contain a class named 'Meta'. Each API definition "
f"must contain a child class named 'Meta' which can contain configurations options. "
f"For example, the 'name' option is required and specifies "
f"the name used to access the API on the bus."
)
cls.sanity_check_options(name, options)
cls.meta = ApiOptions(cls.Meta.__dict__.copy())
super(ApiMetaclass, cls).__init__(name, bases, dict_)
if cls.meta.name == "default" or cls.meta.name.startswith("default."):
raise MisconfiguredApiOptions(
f"API class {name} is named 'default', or starts with 'default.'. "
f"This is a reserved name and is not allowed, please change it to something else."
)
def sanity_check_options(cls, name, options):
if not getattr(options, "name", None):
raise MisconfiguredApiOptions(
"API class {} does not specify a name option with its "
"'Meta' options."
"".format(name)
)
class Api(metaclass=ApiMetaclass):
class Meta:
name = None
def get_event(self, name) -> "Event":
event = getattr(self, name, None)
if isinstance(event, Event):
return event
else:
raise EventNotFound("Event named {}.{} could not be found".format(self, name))
def __str__(self):
return self.meta.name
class Event:
def __init__(self, parameters=tuple()):
# Ensure you update the __copy__() method if adding other instance variables below
if isinstance(parameters, str):
raise InvalidApiEventConfiguration(
f"You appear to have passed a string value of {repr(parameters)} "
f"for your API's event's parameters. This should be a list or a tuple, "
f"not a string. You probably missed a comma when defining your "
f"tuple of parameter names."
)
self.parameters = parameters
|
50629
|
import logging
from aioscrapy.utils.reqser import request_to_dict, request_from_dict
from .serializ import PickleCompat
logger = logging.getLogger(__name__)
_to_str = lambda x: x if isinstance(x, str) else str(x)
class Base(object):
"""Per-spider base queue class"""
def __init__(self, server, spider, key, serializer=None):
"""Initialize per-spider redis queue.
Parameters
----------
server : StrictRedis
Redis client instance.
spider : Spider
Scrapy spider instance.
key: str
Redis key where to put and get messages.
serializer : object
Serializer object with ``loads`` and ``dumps`` methods.
"""
if serializer is None:
# Backward compatibility.
# TODO: deprecate pickle.
serializer = PickleCompat
if not hasattr(serializer, 'loads'):
raise TypeError("serializer does not implement 'loads' function: %r"
% serializer)
if not hasattr(serializer, 'dumps'):
raise TypeError("serializer '%s' does not implement 'dumps' function: %r"
% serializer)
self.server = server
self.spider = spider
self.key = key % {'spider': spider.name}
self.serializer = serializer
def _encode_request(self, request):
"""Encode a request object"""
obj = request_to_dict(request, self.spider)
return self.serializer.dumps(obj)
def _decode_request(self, encoded_request):
"""Decode an request previously encoded"""
obj = self.serializer.loads(encoded_request)
return request_from_dict(obj, self.spider)
def __len__(self):
"""Return the length of the queue"""
raise Exception('please use len()')
async def len(self):
raise NotImplementedError
async def push(self, request):
"""Push a request"""
raise NotImplementedError
async def pop(self, timeout=0):
"""Pop a request"""
raise NotImplementedError
async def clear(self):
"""Clear queue/stack"""
await self.server.delete(self.key)
class FifoQueue(Base):
"""Per-spider FIFO queue"""
async def len(self):
return await self.server.llen(self.key)
async def push(self, request):
"""Push a request"""
await self.server.lpush(self.key, self._encode_request(request))
async def pop(self, timeout=0):
"""Pop a request"""
if timeout > 0:
data = await self.server.brpop(self.key, timeout)
if isinstance(data, tuple):
data = data[1]
else:
data = await self.server.rpop(self.key)
if data:
return self._decode_request(data)
class PriorityQueue(Base):
"""Per-spider priority queue abstraction using redis' sorted set"""
async def len(self):
return await self.server.zcard(self.key)
async def push(self, request):
"""Push a request"""
data = self._encode_request(request)
score = request.priority
# We don't use zadd method as the order of arguments change depending on
# whether the class is Redis or StrictRedis, and the option of using
# kwargs only accepts strings, not bytes.
await self.server.zadd(self.key, {data: score})
async def pop(self, timeout=0):
"""
Pop a request
timeout not support in this queue class
"""
# use atomic range/remove using multi/exec
async with self.server.pipeline(transaction=True) as pipe:
results, count = await (
pipe.zrange(self.key, 0, 0)
.zremrangebyrank(self.key, 0, 0)
.execute()
)
if results:
return self._decode_request(results[0])
class LifoQueue(Base):
"""Per-spider LIFO queue."""
async def len(self):
return await self.server.llen(self.key)
async def push(self, request):
"""Push a request"""
await self.server.lpush(self.key, self._encode_request(request))
async def pop(self, timeout=0):
"""Pop a request"""
if timeout > 0:
data = await self.server.blpop(self.key, timeout)
if isinstance(data, tuple):
data = data[1]
else:
data = await self.server.lpop(self.key)
if data:
return self._decode_request(data)
# TODO: Deprecate the use of these names.
SpiderQueue = FifoQueue
SpiderStack = LifoQueue
SpiderPriorityQueue = PriorityQueue
|
50657
|
import unittest
from argo_client.interaction import ArgoException
from pathlib import Path
import unittest
import io
import os
import time
import cryptol
import cryptol.cryptoltypes
from cryptol.single_connection import *
from cryptol.bitvector import BV
from BitVector import * #type: ignore
# Tests of the core server functionality and less
# focused on intricate Cryptol specifics per se.
class BasicServerTests(unittest.TestCase):
@classmethod
def setUpClass(self):
self.c = cryptol.connect(verify=False)
def test_extend_search_path(self):
# Test that extending the search path acts as expected w.r.t. loads
c = self.c
c.extend_search_path(str(Path('tests','cryptol','test-files', 'test-subdir')))
c.load_module('Bar').result()
ans1 = c.eval("theAnswer").result()
ans2 = c.eval("id theAnswer").result()
self.assertEqual(ans1, ans2)
def test_logging(self):
c = self.c
c.extend_search_path(str(Path('tests','cryptol','test-files', 'test-subdir')))
c.load_module('Bar').result()
log_buffer = io.StringIO()
c.logging(on=True, dest=log_buffer)
_ = c.eval("theAnswer").result()
contents = log_buffer.getvalue()
self.assertEqual(len(contents.strip().splitlines()), 2,
msg=f'log contents: {str(contents.strip().splitlines())}')
_ = c.eval("theAnswer").result()
def test_check_timeout(self):
c = self.c
c.load_file(str(Path('tests','cryptol','test-files', 'examples','AES.cry'))).result()
t1 = time.time()
with self.assertRaises(ArgoException):
c.check("\\(bv : [256]) -> ~ (~ (~ (~bv))) == bv", num_tests="all", timeout=1.0).result()
t2 = time.time()
self.assertLess(t2 - t1, 2.0)
t1 = time.time()
with self.assertRaises(ArgoException):
c.check("\\(bv : [256]) -> ~ (~ (~ (~bv))) == bv", num_tests="all", timeout=5.0).result()
t2 = time.time()
self.assertLess(t2 - t1, 7)
t1 = time.time()
c.check("\\(bv : [256]) -> ~ (~ (~ (~bv))) == bv", num_tests=10, timeout=5.0).result()
t2 = time.time()
self.assertLess(t2 - t1, 5)
def test_interrupt(self):
# Check if this test is using a local server, if not we assume it's a remote HTTP server
if os.getenv('CRYPTOL_SERVER') is not None:
c = self.c
c.load_file(str(Path('tests','cryptol','test-files', 'examples','AES.cry')))
t1 = time.time()
c.check("\\(bv : [256]) -> ~ (~ (~ (~bv))) == bv", num_tests="all", timeout=30.0)
# ^ .result() intentionally omitted so we don't wait on it's result and we can interrupt
# it on the next line. We add a timeout just in case to the test fails
time.sleep(.5)
c.interrupt()
self.assertTrue(c.safe("aesEncrypt").result())
t2 = time.time()
self.assertLess(t2 - t1, 15.0) # ensure th interrupt ended things and not the timeout
elif os.getenv('CRYPTOL_SERVER_URL') is not None:
c = self.c
other_c = cryptol.connect(verify=False)
# Since this is the HTTP server, due to client implementation details
# the requests don't return until they get a response, so we fork
# to interrupt the server
newpid = os.fork()
if newpid == 0:
time.sleep(5)
other_c.interrupt()
os._exit(0)
c.load_file(str(Path('tests','cryptol','test-files', 'examples','AES.cry')))
t1 = time.time()
c.check("\\(bv : [256]) -> ~ (~ (~ (~bv))) == bv", num_tests="all", timeout=60.0)
self.assertTrue(c.safe("aesEncrypt").result())
t2 = time.time()
self.assertLess(t2 - t1, 20.0) # ensure th interrupt ended things and not the timeout
else:
# Otherwise fail... since this shouldn't be possible
self.assertFalse("Impossible")
def test_prove_timeout(self):
c = self.c
c.load_file(str(Path('tests','cryptol','test-files', 'examples','AES.cry')))
pt = BV(size=128, value=0x3243f6a8885a308d313198a2e0370734)
key = BV(size=128, value=<KEY>)
ct = c.call("aesEncrypt", (pt, key)).result()
expected_ct = BV(size=128, value=0x3925841d02dc09fbdc118597196a0b32)
self.assertEqual(ct, expected_ct)
decrypted_ct = c.call("aesDecrypt", (ct, key)).result()
self.assertEqual(pt, decrypted_ct)
pt = BV(size=128, value=0x00112233445566778899aabbccddeeff)
key = BV(size=128, value=0x000102030405060708090a0b0c0d0e0f)
ct = c.call("aesEncrypt", (pt, key)).result()
expected_ct = BV(size=128, value=0x69c4e0d86a7b0430d8cdb78070b4c55a)
self.assertEqual(ct, expected_ct)
decrypted_ct = c.call("aesDecrypt", (ct, key)).result()
self.assertEqual(pt, decrypted_ct)
self.assertTrue(c.safe("aesEncrypt").result())
self.assertTrue(c.safe("aesDecrypt").result())
self.assertTrue(c.check("AESCorrect").result().success)
t1 = time.time()
with self.assertRaises(ArgoException):
c.prove("AESCorrect", timeout=1.0).result()
t2 = time.time()
# check the timeout worked
self.assertGreaterEqual(t2 - t1, 1.0)
self.assertLess(t2 - t1, 5.0)
# make sure things are still working
self.assertTrue(c.safe("aesEncrypt").result())
# set the timeout at the connection level
c.timeout = 1.0
t1 = time.time()
with self.assertRaises(ArgoException):
c.prove("AESCorrect").result()
t2 = time.time()
# check the timeout worked
self.assertGreaterEqual(t2 - t1, 1.0)
self.assertLess(t2 - t1, 5.0)
# make sure things are still working
c.timeout = None
self.assertTrue(c.safe("aesEncrypt").result())
c.timeout = 1.0
t1 = time.time()
with self.assertRaises(ArgoException):
# override timeout with longer time
c.prove("AESCorrect", timeout=5.0).result()
t2 = time.time()
self.assertGreaterEqual(t2 - t1, 5.0)
self.assertLess(t2 - t1, 10.0)
# make sure things are still working
c.timeout = None
self.assertTrue(c.safe("aesEncrypt").result())
class BasicLoggingServerTests(unittest.TestCase):
# Connection to cryptol
log_buffer = None
@classmethod
def setUpClass(self):
self.log_buffer = io.StringIO()
connect(verify=False, log_dest = self.log_buffer)
def test_logging(self):
extend_search_path(str(Path('tests','cryptol','test-files', 'test-subdir')))
load_module('Bar')
_ = cry_eval("theAnswer")
content_lines = self.log_buffer.getvalue().strip().splitlines()
self.assertEqual(len(content_lines), 6,
msg=f'log contents: {str(content_lines)}')
if __name__ == "__main__":
unittest.main()
|
50674
|
from __future__ import annotations
import typing
from ctc import spec
from ctc.protocols import balancer_utils
from .. import analytics_spec
async def async_compute_buybacks(
blocks: list[int], verbose: bool = False
) -> analytics_spec.MetricGroup:
return {
'name': 'Buybacks',
'metrics': {
'buybacks_usd': (await async_compute_tribe_buybacks_usd(blocks)),
},
}
async def async_compute_tribe_buybacks_usd(
blocks: list[int], swaps: typing.Optional[spec.DataFrame] = None
) -> analytics_spec.MetricData:
from ctc.toolbox import pd_utils
# load swaps
if swaps is None:
swaps = await balancer_utils.async_get_pool_swaps(
pool_address='0xc1382fe6e17bcdbc3d35f73f5317fbf261ebeecd'
)
swaps = typing.cast(
spec.DataFrame,
swaps.droplevel('transaction_index').droplevel('log_index'),
)
# filter tribe buys
fei = '0x956f47f50a910163d8bf957cf5846d573e7f87ca'
tribe_buys: typing.Any = swaps[swaps['arg__tokenOut'] == fei] # type: ignore
tribe_buys = tribe_buys['arg__amountOut'].map(float) / 1e18
cummulative_tribe_buys = tribe_buys.cumsum()
# cummulative_tribe_buys = evm.interpolate_block_series(
# start_block=min(blocks),
# pre_fill_value=0,
# series=cummulative_tribe_buys,
# end_block=max(blocks),
# )
cummulative_tribe_buys = pd_utils.interpolate_series(
series=cummulative_tribe_buys,
start_index=min(blocks),
end_index=max(blocks),
pre_fill_value=0,
)
# filter tribe sells
tribe_sells_df = swaps[swaps['arg__tokenIn'] == fei] # type: ignore
if len(tribe_sells_df) > 0:
tribe_sells = tribe_sells_df['arg__amountIn'].map(float) / 1e18
cummulative_tribe_sells = tribe_sells.cumsum()
# cummulative_tribe_sells = evm.interpolate_block_series(
# start_block=min(blocks),
# pre_fill_value=0,
# series=cummulative_tribe_sells,
# end_block=max(blocks),
# )
cummulative_tribe_sells = pd_utils.interpolate_series(
series=cummulative_tribe_sells,
start_index=min(blocks),
end_index=max(blocks),
pre_fill_value=0,
)
net_tribe_buys = cummulative_tribe_buys - cummulative_tribe_sells
else:
net_tribe_buys = cummulative_tribe_buys
return {
'name': 'Buybacks USD',
'values': [net_tribe_buys[block] for block in blocks],
'units': 'FEI',
}
|
50689
|
class NamedValue:
# defining __slots__ in a mixin doesn't play nicely with builtin types
# so a low overhead approach would have to use collections.namedtuple
# style templated code generation
def __new__(cls, *args, **kwds):
name, *args = args
self = super().__new__(cls, *args, **kwds)
self._name = name
return self
def __init__(self, *args, **kwds):
name, *args = args
super().__init__(*args, **kwds)
@property
def __name__(self):
return self._name
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
super().__repr__())
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = super()
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__()
return base_str()
# Example usage
>>> class NamedFloat(NamedValue, float):
... pass
...
>>> import math
>>> tau = NamedFloat('tau', 2*math.pi)
>>> tau
NamedFloat(tau, 6.283185307179586)
>>> print(tau)
6.283185307179586
>>> class NamedList(NamedValue, list):
... pass
...
>>> data = NamedList('data', [])
>>> data
NamedList('data', [])
>>> print(data)
[]
|
50716
|
import collections
from supriya.enums import CalculationRate
from supriya.synthdefs import UGen
class PulseDivider(UGen):
"""
::
>>> pulse_divider = supriya.ugens.PulseDivider.ar(
... div=2,
... start=0,
... trigger=0,
... )
>>> pulse_divider
PulseDivider.ar()
"""
### CLASS VARIABLES ###
_ordered_input_names = collections.OrderedDict(
'trigger',
'div',
'start',
)
_valid_calculation_rates = None
### INITIALIZER ###
def __init__(
self,
calculation_rate=None,
div=2,
start=0,
trigger=0,
):
UGen.__init__(
self,
calculation_rate=calculation_rate,
div=div,
start=start,
trigger=trigger,
)
### PUBLIC METHODS ###
@classmethod
def ar(
cls,
div=2,
start=0,
trigger=0,
):
"""
Constructs an audio-rate PulseDivider.
::
>>> pulse_divider = supriya.ugens.PulseDivider.ar(
... div=2,
... start=0,
... trigger=0,
... )
>>> pulse_divider
PulseDivider.ar()
Returns ugen graph.
"""
import supriya.synthdefs
calculation_rate = supriya.CalculationRate.AUDIO
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
div=div,
start=start,
trigger=trigger,
)
return ugen
@classmethod
def kr(
cls,
div=2,
start=0,
trigger=0,
):
"""
Constructs a control-rate PulseDivider.
::
>>> pulse_divider = supriya.ugens.PulseDivider.kr(
... div=2,
... start=0,
... trigger=0,
... )
>>> pulse_divider
PulseDivider.kr()
Returns ugen graph.
"""
import supriya.synthdefs
calculation_rate = supriya.CalculationRate.CONTROL
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
div=div,
start=start,
trigger=trigger,
)
return ugen
### PUBLIC PROPERTIES ###
@property
def div(self):
"""
Gets `div` input of PulseDivider.
::
>>> pulse_divider = supriya.ugens.PulseDivider.ar(
... div=2,
... start=0,
... trigger=0,
... )
>>> pulse_divider.div
2.0
Returns ugen input.
"""
index = self._ordered_input_names.index('div')
return self._inputs[index]
@property
def start(self):
"""
Gets `start` input of PulseDivider.
::
>>> pulse_divider = supriya.ugens.PulseDivider.ar(
... div=2,
... start=0,
... trigger=0,
... )
>>> pulse_divider.start
0.0
Returns ugen input.
"""
index = self._ordered_input_names.index('start')
return self._inputs[index]
@property
def trigger(self):
"""
Gets `trigger` input of PulseDivider.
::
>>> pulse_divider = supriya.ugens.PulseDivider.ar(
... div=2,
... start=0,
... trigger=0,
... )
>>> pulse_divider.trigger
0.0
Returns ugen input.
"""
index = self._ordered_input_names.index('trigger')
return self._inputs[index]
|
50760
|
import time,os,math,inspect,re,sys,random,argparse
from env import SenseEnv
from torch.autograd import Variable
import numpy as np
from itertools import count
from collections import namedtuple
from tensorboardX import SummaryWriter
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.autograd as autograd
from torch.autograd import Variable
writer = SummaryWriter()
SavedAction = namedtuple('SavedAction', ['action', 'value'])
class Policy(nn.Module):
def __init__(self,observation_space_n,action_space_n):
super(Policy, self).__init__()
self.affine1 = nn.Linear(observation_space_n, 256)
self.action1 = nn.Linear(256, 128)
self.value1 = nn.Linear(256, 128)
self.action_head = nn.Linear(128, action_space_n)
self.value_head = nn.Linear(128, 1)
self.saved_actions = []
self.rewards = []
self.init_weights()
def init_weights(self):
self.affine1.weight.data.uniform_(-0.1, 0.1)
self.action1.weight.data.uniform_(-0.1, 0.1)
self.value1.weight.data.uniform_(-0.1, 0.1)
def forward(self, x):
x = F.relu(self.affine1(x))
xa = F.relu(self.action1(x))
xv = F.relu(self.value1(x))
action_scores = self.action_head(xa)
state_values = self.value_head(xv)
return F.softmax(action_scores), state_values
class CNN(nn.Module):
def __init__(self,classification_n):
super(CNN, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=5, padding=2),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=5, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(2))
#self.fc = nn.Linear(7*7*32, 2)
self.fc = nn.Linear(80000, classification_n)
def forward(self, x):
x = x.unsqueeze(1).float()
out = self.layer1(x)
out = self.layer2(out)
#print("size before",out.size())
out = out.view(out.size(0), -1)
#print("size after",out.size())
out = self.fc(out)
return out
parser = argparse.ArgumentParser(description='SenseNet actor-critic example')
parser.add_argument('--gamma', type=float, default=0.99, metavar='G', help='discount factor (default: 0.99)')
parser.add_argument('--epsilon', type=float, default=0.6, metavar='G', help='epsilon value for random action (default: 0.6)')
parser.add_argument('--seed', type=int, default=42, metavar='N', help='random seed (default: 42)')
parser.add_argument('--batch_size', type=int, default=42, metavar='N', help='batch size (default: 42)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N', help='interval between training status logs (default: 10)')
parser.add_argument('--render', action='store_true', help='render the environment')
parser.add_argument('--debug', action='store_true', help='turn on debug mode')
parser.add_argument('--gpu', action='store_true', help='use GPU')
parser.add_argument('--log', type=str, help='log experiment to tensorboard')
parser.add_argument('--model_path', type=str, help='path to store/retrieve model at')
parser.add_argument('--mode', type=str, default="train", help='train/test/all model')
args = parser.parse_args()
def select_action(state,n_actions,epsilon=0.6):
if np.random.rand() < epsilon:
return np.random.choice(n_actions)
else:
state = torch.from_numpy(state).float().unsqueeze(0)
probs, state_value = model(Variable(state))
action = probs.multinomial()
model.saved_actions.append(SavedAction(action, state_value))
return action.data[0][0]
def finish_episode():
R = 0
saved_actions = model.saved_actions
value_loss = 0
rewards = []
for r in model.rewards[::-1]:
R = r + args.gamma * R
rewards.insert(0, R)
rewards = torch.Tensor(rewards)
rewards = (rewards - rewards.mean()) / (rewards.std() + np.finfo(np.float32).eps)
for (action, value), r in zip(saved_actions, rewards):
reward = r - value.data[0,0]
action.reinforce(reward)
value_loss += F.smooth_l1_loss(value, Variable(torch.Tensor([r])))
optimizer.zero_grad()
final_nodes = [value_loss] + list(map(lambda p: p.action, saved_actions))
gradients = [torch.ones(1)] + [None] * len(saved_actions)
autograd.backward(final_nodes, gradients)
optimizer.step()
del model.rewards[:]
del model.saved_actions[:]
#train
env = SenseEnv(vars(args))
print("action space: ",env.action_space())
model = Policy(env.observation_space(),env.action_space_n())
cnn = CNN(env.classification_n())
if args.gpu and torch.cuda.is_available():
model.cuda()
cnn.cuda()
if args.model_path:
if os.path.exists(args.model_path+"/model.pkl"):
print("loading pretrained models")
model.load_state_dict(torch.load(args.model_path+"/model.pkl"))
cnn.load_state_dict(torch.load(args.model_path+"/cnn.pkl"))
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
classifier_criterion = nn.CrossEntropyLoss()
classifier_optimizer = torch.optim.Adam(cnn.parameters(), lr=0.001)
running_reward = 0
batch = []
labels = []
total_steps = 0
if args.mode == "train" or args.mode == "all":
for i_episode in count(1000):
observation = env.reset()
print("episode: ", i_episode)
for t in range(1000):
action = select_action(observation,env.action_space_n(),args.epsilon)
observation, reward, done, info = env.step(action)
model.rewards.append(reward)
if env.is_touching():
print("touching!")
#print("batch size", len(batch))
if len(batch) > args.batch_size:
#TODO GPU support
#batch = torch.from_numpy(np.asarray(batch))
batch = torch.LongTensor(torch.from_numpy(np.asarray(batch)))
labels = torch.from_numpy(np.asarray(labels))
#labels = torch.LongTensor(torch.from_numpy(np.asarray(labels)))
if args.gpu and torch.cuda.is_available():
batch = batch.cuda()
labels = labels.cuda()
batch = Variable(batch)
labels = Variable(labels)
classifier_optimizer.zero_grad()
outputs = cnn(batch)
loss = classifier_criterion(outputs, labels)
loss.backward()
classifier_optimizer.step()
print ('Loss: %.4f' %(loss.data[0]))
if args.log:
writer.add_scalar(args.log + "/loss",loss.data[0],total_steps)
batch = []
labels = []
else:
batch.append(observation.reshape(200,200))
labels.append(env.class_label)
if done:
break
running_reward = running_reward * 0.99 + t * 0.01
print("running reward ", running_reward)
total_steps +=1
finish_episode()
if i_episode % args.log_interval == 0:
if args.log:
writer.add_scalar(args.log+"/reward",running_reward,total_steps)
print('Episode {}\tLast length: {:5d}\tAverage length: {:.2f}'.format(i_episode, t, running_reward))
if running_reward > 5000: #env.spec.reward_threshold:
print("Solved! Running reward is now {} and the last episode runs to {} time steps!".format(running_reward, t))
break
if args.model_path:
torch.save(model.state_dict(), os.path.join(args.model_path, 'policy.pkl' ))
torch.save(model.state_dict(), os.path.join(args.model_path, 'cnn.pkl' ))
elif args.mode == "test" or args.mode == "all":
#test
test_labels = []
predicted_labels = []
steps_to_guess = []
correct = 0
total = 0
max_steps = 500
for i_episode in range(100):
guesses = []
print("testing on a new object")
observation = env.reset()
for t in range(max_steps):
action = select_action(observation,env.action_space_n(),args.epsilon)
observation, reward, done, info = env.step(action)
model.rewards.append(reward)
#if confidence over 90%, then use it
if (t >= max_steps-1 and len(guesses) == 0) or env.is_touching:
x = [observation.reshape(200,200)]
x = torch.LongTensor(torch.from_numpy(np.asarray(x)))
x = Variable(x)
output = cnn(x)
prob, predicted = torch.max(output.data, 1)
correct += int(predicted[0][0] == env.class_label)
total += 1
print("predicted ", predicted[0][0], " with prob ", prob[0][0], " correct answer is: ",env.class_label)
print('Accuracy of the network: %d %%' % (100 * correct / total ))
else:
for i_episode in range(100):
observation = env.reset()
for t in range(1000):
env.render()
action = np.random.choice(env.action_space_n())
observation,reward,done,info = env.step(action)
print(observation)
|
50793
|
import unittest
from lib_db import DBClient
class TestDBClient(unittest.TestCase):
client: DBClient
def setUp(self) -> None:
self.client = DBClient()
def test_integrity(self):
all_peer_ids = set(self.client.get_all_peer_ids())
online_peer_ids = set(self.client.get_online_peer_ids())
self.assertTrue(online_peer_ids.issubset(all_peer_ids))
offline_peer_ids = set(self.client.get_offline_peer_ids())
self.assertTrue(offline_peer_ids.issubset(all_peer_ids))
all_entering_peer_ids = set(self.client.get_all_entering_peer_ids())
self.assertTrue(all_entering_peer_ids.issubset(all_peer_ids))
self.assertTrue(all_entering_peer_ids.isdisjoint(online_peer_ids))
self.assertTrue(all_entering_peer_ids.isdisjoint(offline_peer_ids))
all_leaving_peer_ids = set(self.client.get_all_leaving_peer_ids())
self.assertTrue(all_leaving_peer_ids.issubset(all_peer_ids))
self.assertTrue(all_leaving_peer_ids.isdisjoint(online_peer_ids))
self.assertTrue(all_leaving_peer_ids.isdisjoint(offline_peer_ids))
# The following needn't be necessarily true but unlikely that it isn't
self.assertTrue(len(all_entering_peer_ids.intersection(all_leaving_peer_ids)) > 0)
only_entering_peer_ids = set(self.client.get_only_entering_peer_ids())
self.assertTrue(only_entering_peer_ids.issubset(all_peer_ids))
self.assertTrue(only_entering_peer_ids.isdisjoint(online_peer_ids))
self.assertTrue(only_entering_peer_ids.isdisjoint(offline_peer_ids))
self.assertTrue(only_entering_peer_ids.isdisjoint(all_leaving_peer_ids))
self.assertTrue(only_entering_peer_ids.issubset(all_entering_peer_ids))
only_leaving_peer_ids = set(self.client.get_only_leaving_peer_ids())
self.assertTrue(only_leaving_peer_ids.issubset(all_peer_ids))
self.assertTrue(only_leaving_peer_ids.isdisjoint(online_peer_ids))
self.assertTrue(only_leaving_peer_ids.isdisjoint(offline_peer_ids))
self.assertTrue(only_leaving_peer_ids.isdisjoint(all_entering_peer_ids))
self.assertTrue(only_leaving_peer_ids.issubset(all_leaving_peer_ids))
ephemeral_peer_ids = set(self.client.get_ephemeral_peer_ids())
self.assertTrue(ephemeral_peer_ids.issubset(all_entering_peer_ids))
self.assertTrue(ephemeral_peer_ids.issubset(all_leaving_peer_ids))
dangling_peer_ids = set(self.client.get_dangling_peer_ids())
self.assertTrue(dangling_peer_ids.issubset(all_peer_ids))
self.assertTrue(dangling_peer_ids.isdisjoint(online_peer_ids))
self.assertTrue(dangling_peer_ids.isdisjoint(offline_peer_ids))
self.assertTrue(dangling_peer_ids.issubset(all_entering_peer_ids))
self.assertTrue(dangling_peer_ids.issubset(all_leaving_peer_ids))
oneoff_peer_ids = set(self.client.get_oneoff_peer_ids())
self.assertTrue(oneoff_peer_ids.issubset(all_peer_ids))
self.assertTrue(oneoff_peer_ids.isdisjoint(online_peer_ids))
self.assertTrue(oneoff_peer_ids.isdisjoint(offline_peer_ids))
self.assertTrue(oneoff_peer_ids.isdisjoint(dangling_peer_ids))
self.assertTrue(oneoff_peer_ids.issubset(all_entering_peer_ids))
self.assertTrue(oneoff_peer_ids.issubset(all_leaving_peer_ids))
calculated_all_peer_ids = oneoff_peer_ids | online_peer_ids | offline_peer_ids | only_entering_peer_ids | only_leaving_peer_ids | dangling_peer_ids
self.assertEqual(len(all_peer_ids), len(calculated_all_peer_ids))
self.assertEqual(all_peer_ids, calculated_all_peer_ids)
def test_get_all_peer_ids_for_all_agent_versions(self):
all_agent_versions = self.client.get_all_agent_versions()
all_peer_ids_by_all_agent_versions = set(self.client.get_peer_ids_for_agent_versions(all_agent_versions))
online_peer_ids = set(self.client.get_online_peer_ids())
all_entering_peer_ids = set(self.client.get_all_entering_peer_ids())
dangling_peer_ids = set(self.client.get_dangling_peer_ids())
self.assertTrue(online_peer_ids.issubset(all_peer_ids_by_all_agent_versions))
self.assertTrue(all_entering_peer_ids.issubset(all_peer_ids_by_all_agent_versions))
self.assertTrue(dangling_peer_ids.issubset(all_peer_ids_by_all_agent_versions))
# Now there can be nodes that started their session before
# the beginning of the time interval, were then "crawlable" (we
# could extract the agent version) and then left.
left_peer_ids = all_peer_ids_by_all_agent_versions - online_peer_ids - all_entering_peer_ids - dangling_peer_ids
only_leaving_peer_ids = set(self.client.get_only_leaving_peer_ids())
self.assertTrue(left_peer_ids.issubset(only_leaving_peer_ids))
# TODO: there is a minor bug in the time calculation of session start/ends. When that's fixed:
# self.assertEqual(left_peer_ids, only_leaving_peer_ids)
def test_agent_version_queries(self):
agent_version_distribution = self.client.get_agent_versions_distribution()
agent_version = agent_version_distribution[0][0]
agent_version_count = agent_version_distribution[0][1]
peer_ids_by_agent_version = self.client.get_peer_ids_for_agent_versions([agent_version])
self.assertEqual(agent_version_count, len(peer_ids_by_agent_version))
agent_versions_for_peer_ids = self.client.get_agent_versions_for_peer_ids(peer_ids_by_agent_version)
self.assertEqual(agent_versions_for_peer_ids[0][1],
agent_version_count) # we only queried for peers with one agent
def test_geo_integrity(self):
import pandas as pd
all_peer_ids = set(self.client.get_all_peer_ids())
no_public_ip_peer_ids = set(self.client.get_no_public_ip_peer_ids())
self.assertTrue(no_public_ip_peer_ids.issubset(all_peer_ids))
countries = self.client.get_countries()
countries_peer_ids = set(pd.DataFrame(countries, columns=["peer_id", "country"])["peer_id"].unique())
self.assertTrue(countries_peer_ids.issubset(all_peer_ids))
self.assertTrue(countries_peer_ids.isdisjoint(no_public_ip_peer_ids))
countries_with_relays = self.client.get_countries_with_relays()
countries_with_relays_peer_ids = set(
pd.DataFrame(countries_with_relays, columns=["peer_id", "country"])["peer_id"].unique())
self.assertTrue(countries_with_relays_peer_ids.issubset(all_peer_ids))
self.assertTrue(countries_with_relays_peer_ids.isdisjoint(no_public_ip_peer_ids))
self.assertTrue(countries_peer_ids.issubset(countries_with_relays_peer_ids))
unresolved_peer_ids = set(self.client.get_unresolved_peer_ids())
self.assertTrue(unresolved_peer_ids.issubset(all_peer_ids))
self.assertTrue(unresolved_peer_ids.isdisjoint(no_public_ip_peer_ids))
self.assertTrue(unresolved_peer_ids.isdisjoint(countries_peer_ids))
self.assertTrue(unresolved_peer_ids.isdisjoint(countries_with_relays_peer_ids))
calculated_all = no_public_ip_peer_ids | countries_peer_ids | countries_with_relays_peer_ids | unresolved_peer_ids
self.assertEqual(all_peer_ids, calculated_all)
def test_flatten(self):
flattened = DBClient._DBClient__flatten([(1,), (2,)])
self.assertListEqual(flattened, [1, 2])
if __name__ == '__main__':
unittest.main()
|
50807
|
import json
from flask import Flask, request
import requests
# Token that has to be generated from webhook page portal
ACCESS_TOKEN = "random <PASSWORD>"
# Token that has to be added for verification with developer portal
VERIFICATION_TOKEN = "abc"
# Identifier payloads for initial button
C19INDIA = "C19INDIA"
app = Flask(__name__)
# This get endpoint is for verification with messenger app
@app.route('/webhook', methods=['GET'])
def webhook():
verify_token = request.args.get("hub.verify_token")
if verify_token == VERIFICATION_TOKEN:
return request.args.get("hub.challenge")
return 'Unable to authorise.'
@app.route("/webhook", methods=['POST'])
def webhook_handle():
data = request.get_json()
if data["object"] == "page": # To verify that the request is being originated from a page
for entry in data["entry"]:
for event in entry["messaging"]:
if event.get("message"): # somebody typed a message
process_message(event)
# user clicked/tapped "postback" button in earlier message
elif event.get("postback"):
process_postback(event)
return 'ok'
def process_message(event):
# the facebook ID of the person sending you the message
sender_id = event["sender"]["id"]
# could receive text or attachment but not both
if "text" in event["message"]:
send_initial_menu(sender_id)
def send_initial_menu(sender_id):
message_data = json.dumps({
"recipient": {
"id": sender_id
},
"message": {
"attachment": {
"type": "template",
"payload": {
"template_type": "generic",
"elements": [{
"title": "Covid India Stats",
"subtitle": "Get the covid19 stats of Indian states",
"buttons": [{
"type": "web_url",
"url": "https://www.worldometers.info/coronavirus/country/india/",
"title": "Open Worldometer India"
}, {
"type": "postback",
"title": "Get Stats By Indian States",
"payload": C19INDIA,
}],
}]
}
}
}
})
call_send_api(message_data)
def send_state_list(sender_id):
message_data = json.dumps({
"recipient": {
"id": sender_id
},
"message": {
"attachment": {
"type": "template",
"payload": {
"template_type": "generic",
"elements": [{
"title": "Select State",
"buttons": create_state_list(1)
}, {
"title": "Select State",
"buttons": create_state_list(2)
}, {
"title": "Select State",
"buttons": create_state_list(3)
}, {
"title": "Select State",
"buttons": create_state_list(4)
}, {
"title": "Select State",
"buttons": create_state_list(5)
}, {
"title": "Select State",
"buttons": create_state_list(6)
}, {
"title": "Select State",
"buttons": create_state_list(7)
}, {
"title": "Select State",
"buttons": create_state_list(8)
}, {
"title": "Select State",
"buttons": create_state_list(9)
}, {
"title": "Select State",
"buttons": create_state_list(10)
}]
}
}
}
})
call_send_api(message_data)
def create_state_list(index):
state_list = ["Maharashtra", "Kerala", "Karnataka", "Andhra Pradesh", "Tamil Nadu", "Delhi", "Uttar Pradesh",
"West Bengal", "Odisha", "Rajasthan", "Chhattisgarh", "Telangana", "Haryana", "Gujarat", "Bihar",
"Madhya Pradesh", "Assam", "Punjab", "Jharkhand", "Uttarakhand", "Himachal Pradesh", "Goa", "Tripura",
"Manipur", "<NAME>", "Meghalaya", "Nagaland", "Sikkim", "Mizoram"]
payload_list = []
start_index = 0 + 3 * (index - 1)
end_index = 29 if (start_index + 3) > 29 else (start_index + 3)
for i in range(start_index, end_index):
postback = {}
postback["type"] = "postback"
postback["title"] = state_list[i]
postback["payload"] = state_list[i]
payload_list.append(postback)
return payload_list
def get_stats_send(sender_id, state):
response = json.loads(requests.get(
"https://api.covid19india.org/data.json").text)
list_state = response['statewise']
for i in list_state:
if i['state'] == state:
x = i
break
message_data = json.dumps({
"recipient": {
"id": sender_id
},
"message": {
"text": "ACTIVE CASES: {}\nCONFIRMED CASES: {}\nDEATHS: {}\nRECOVERED: {}".format(x['active'],
x['confirmed'],
x['deaths'],
x['recovered'])
}
})
call_send_api(message_data)
def process_postback(event):
sender_id = event["sender"]["id"]
payload = event["postback"]["payload"]
if payload == C19INDIA:
send_state_list(sender_id)
else:
get_stats_send(sender_id, payload)
def call_send_api(message_data):
params = {
"access_token": ACCESS_TOKEN
}
headers = {
"Content-Type": "application/json"
}
r = requests.post("https://graph.facebook.com/v5.0/me/messages",
params=params, headers=headers, data=message_data)
if __name__ == "__main__":
app.run()
|
50809
|
import os
from pathlib import Path
from django.apps import apps as django_apps
from django.conf import settings
from django.core.management.base import BaseCommand, CommandParser, DjangoHelpFormatter
from django.db.models import Model
from rich.align import Align
from rich.bar import Bar
from rich.console import Console
from rich.padding import Padding
from rich.style import Style
from rich.table import Table
from ._field_attr_utils import (
get_field_column,
get_field_db_type,
get_field_name,
get_field_name_on_reverse_model,
get_field_type,
get_field_type_on_reverse_model,
get_field_verbose_name,
get_related_model,
get_related_name,
)
from ._info_classes import FieldOther, FieldRelation, FieldReverseRelation, Method, ModelInfo
from ._method_attr_utils import get_method_docstring, get_method_file, get_method_line_number, get_method_signature
from ._model_attr_utils import (
get_model_base_manager,
get_model_database_table,
get_model_default_manager,
get_model_docstring,
get_model_file,
get_model_is_abstract,
get_model_is_managed,
get_model_is_proxy,
get_model_line_number,
get_model_name,
get_model_verbose_name,
)
console = Console(record=True)
DEFAULT_DJANGO_METHODS = (
"_check_column_name_clashes",
"_check_constraints",
"_check_default_pk",
"_check_field_name_clashes",
"_check_fields",
"_check_id_field",
"_check_index_together",
"_check_indexes",
"_check_local_fields",
"_check_long_column_names",
"_check_m2m_through_same_relationship",
"_check_managers",
"_check_model",
"_check_model_name_db_lookup_clashes",
"_check_ordering",
"_check_property_name_related_field_accessor_clashes",
"_check_single_primary_key",
"_check_swappable",
"_check_unique_together",
"_do_insert",
"_do_update",
"_get_expr_references",
"_get_FIELD_display",
"_get_next_or_previous_by_FIELD",
"_get_next_or_previous_in_order",
"_get_pk_val",
"_get_unique_checks",
"_meta",
"_perform_date_checks",
"_perform_unique_checks",
"_prepare_related_fields_for_save",
"_save_parents",
"_save_table",
"_set_pk_val",
"check",
"clean",
"clean_fields",
"date_error_message",
"delete",
"from_db",
"full_clean",
"get_absolute_url",
"get_deferred_fields",
"prepare_database_save",
"refresh_from_db",
"save",
"save_base",
"serializable_value",
"unique_error_message",
"validate_unique",
)
class Command(BaseCommand):
"""
A management command which lists models within your project, and optionally, details about model fields and methods
Verbosity outputs:
0 Model names only - Convenient when you just need a list of all your project's models in one place
1 Model names, field names, and non-dunder/common method names
2 * Model names, field names & details, and non-dunder/common method names & details
3 Model names, field names & details, and all method names & full details
* Verbosity of 2 is default
"""
help = "List out the fields and methods for each model"
def create_parser(self, prog_name, subcommand, **kwargs):
"""
Create and return the ``ArgumentParser`` which will be used to
parse the arguments to this command.
Reimplemented to allow new default verbosity of 2
"""
parser = CommandParser(
prog="%s %s" % (os.path.basename(prog_name), subcommand),
description=self.help or None,
formatter_class=DjangoHelpFormatter,
missing_args_message=getattr(self, "missing_args_message", None),
called_from_command_line=getattr(self, "_called_from_command_line", None),
**kwargs,
)
parser.add_argument("--version", action="version", version=self.get_version())
parser.add_argument(
"--settings",
help=(
"The Python path to a settings module, e.g. "
'"myproject.settings.main". If this isn\'t provided, the '
"DJANGO_SETTINGS_MODULE environment variable will be used."
),
)
parser.add_argument(
"--pythonpath",
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".',
)
parser.add_argument("--traceback", action="store_true", help="Raise on CommandError exceptions")
parser.add_argument(
"--no-color",
action="store_true",
help="Don't colorize the command output.",
)
parser.add_argument(
"--force-color",
action="store_true",
help="Force colorization of the command output.",
)
if self.requires_system_checks:
parser.add_argument(
"--skip-checks",
action="store_true",
help="Skip system checks.",
)
self.add_arguments(parser)
return parser
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
"-v",
"--verbosity",
default=2,
type=int,
choices=[0, 1, 2, 3],
help="Verbosity level: "
"0 Model names only - Convenient when you just need a list of all your project's models in one place, "
"1 Model names + field names +non-dunder/common method names, "
"2 (default) Model names + field names & details + non-dunder/common method names & details, "
"3 Model names + field names & details + all method names & details.",
)
parser.add_argument(
"-e",
"--export",
nargs="?",
type=str,
default=None,
help="Filename to export. The filename must have a file extension of `.txt`, `.html`, or `htm`",
)
parser.add_argument(
"-f",
"--filter",
nargs="+",
type=str,
default=None,
help="Provide one or more apps or models, to which the results will be limited. "
"Input should be in the form `appname` or `appname.Modelname`.",
)
def model_info(self, options):
section_style = Style(color="green", bold=True, underline=True)
subsection_style = Style(color="green", bold=True)
def get_options() -> tuple:
VERBOSITY = options.get("verbosity", None)
if VERBOSITY is None:
VERBOSITY = (
getattr(settings, "MODEL_INFO_VERBOSITY", 2)
if type(getattr(settings, "MODEL_INFO_VERBOSITY", 2)) is int
else 2
)
FILTER = options.get("filter", None)
if FILTER is None:
FILTER = (
getattr(settings, "MODEL_INFO_FILTER", None)
if type(getattr(settings, "MODEL_INFO_FILTER", None)) is list
else None
)
FILENAME = (
options.get("export")
if options.get("export", None) is not None and type(options.get("export", None)) is str
else None
)
return VERBOSITY, FILTER, FILENAME
VERBOSITY, FILTER, FILENAME = get_options()
def build_model_objects(model) -> ModelInfo:
"""
Given a model, returns a ModelInfo object
"""
new_model = ModelInfo()
new_model.model_name.value = get_model_name(model)
new_model.verbose_name.value = get_model_verbose_name(model)
new_model.docstring.value = get_model_docstring(model)
new_model.is_abstract.value = get_model_is_abstract(model)
new_model.is_proxy.value = get_model_is_proxy(model)
new_model.is_managed.value = get_model_is_managed(model)
new_model.database_table.value = get_model_database_table(model)
new_model.base_manager.value = get_model_base_manager(model)
new_model.default_manager.value = get_model_default_manager(model)
new_model.file.value = get_model_file(model)
new_model.line_number.value = get_model_line_number(model)
return new_model
def build_field_objects(field_list: list) -> tuple:
"""
Given a list of model fields, returns a tuple of FieldRelation,
FieldReverseRelation, and FieldOther object lists
"""
fields_relation = []
fields_reverse_relation = []
fields_other = []
for field in field_list:
# Identify the kind of field this is, and build associated object
if hasattr(field, "related_model") and field.related_model is not None:
if "reverse_related" in field.__class__.__module__.__str__():
# Build a FieldReverseRelation object
new_field = FieldReverseRelation()
new_field.name = get_related_name(field)
new_field.field_type = get_field_type(field)
new_field.field_db_type = get_field_db_type(field)
new_field.related_model = get_related_model(field)
new_field.field_name_on_related_model = get_field_name_on_reverse_model(field)
new_field.field_type_on_related_model = get_field_type_on_reverse_model(field)
fields_reverse_relation.append(new_field)
else:
# Build a FieldRelation object
new_field = FieldRelation()
new_field.name = get_field_name(field)
new_field.field_type = get_field_type(field)
new_field.field_column = get_field_column(field)
new_field.field_db_type = get_field_db_type(field)
new_field.related_model = get_related_model(field)
new_field.related_name = get_related_name(field)
fields_relation.append(new_field)
else:
# Build a FieldOther object
new_field = FieldOther()
new_field.name = get_field_name(field)
new_field.field_type = get_field_type(field)
new_field.field_column = get_field_column(field)
new_field.field_db_type = get_field_db_type(field)
new_field.field_verbose_name = get_field_verbose_name(field)
fields_other.append(new_field)
return (
fields_relation,
fields_reverse_relation,
fields_other,
)
def build_method_objects(method_list: list, model: Model) -> tuple:
"""
Given a list of model methods, returns a tuple of MethodCommonDjango,
MethodDunder, MethodOther, MethodOtherPrivate object lists
"""
method_dunder = []
method_common_django = []
method_other_private = []
method_other = []
for method in method_list:
# Build the object, and assign to the correct list
new_method = Method()
new_method.name = method
if VERBOSITY > 1:
new_method.method_signature = get_method_signature(method, model, VERBOSITY)
if VERBOSITY > 2:
new_method.method_docstring = get_method_docstring(method, model)
new_method.method_file = get_method_file(method, model)
new_method.method_line_number = get_method_line_number(method, model)
if method.startswith("__") and method.endswith("__"):
# Dunder methods
method_dunder.append(new_method)
elif method in DEFAULT_DJANGO_METHODS:
# Common Django methods
method_common_django.append(new_method)
elif method.startswith("_"):
# Other Private methods
method_other_private.append(new_method)
else:
# Other methods
method_other.append(new_method)
return (
method_dunder,
method_common_django,
method_other_private,
method_other,
)
def _fill_table(info_table: Table, info_object_list: list or None, info_type: type, column_count: int):
"""
Given a rich table, a list of
"""
if isinstance(info_object_list, list) and all(isinstance(row, info_type) for row in info_object_list):
sorted_field_object_list = sorted(info_object_list, key=lambda x: x.name)
for row in sorted_field_object_list:
if VERBOSITY >= 2:
info_table.add_row(*row.render_row(column_count=column_count))
else:
info_table.add_row(*row.render_simple_row())
else:
info_table.add_row("none")
return info_table
def _print_table(table):
console.print(Padding(table, (1, 0, 0, 8)))
def render_model_table(info_object_list: list or None, info_type: type):
"""Provided a list of FieldRelation objects, prints the resulting sorted table to console"""
model_table = Table(title="Model Details")
row_count = 2
if VERBOSITY > 1:
row_count = 5
if VERBOSITY > 2:
row_count = 11
model_table.add_column("Key", justify="left", style="blue", no_wrap=True)
model_table.add_column("Value", justify="left", style="magenta")
if isinstance(info_object_list, ModelInfo):
for row in info_object_list.render_rows(row_count):
new_row = tuple(row)
model_table.add_row(new_row[0], new_row[1])
else:
model_table.add_row("none")
_print_table(model_table)
def render_field_relations_table(info_object_list: list or None, info_type: type):
"""Provided a list of FieldRelation objects, prints the resulting sorted table to console"""
field_table = Table(title="Relations")
column_count = 1
field_table.add_column("Field Name", justify="left", style="yellow", no_wrap=True)
if VERBOSITY >= 2:
column_count = 6
field_table.add_column("Field Type", justify="left", style="magenta")
field_table.add_column("Database Column", justify="left", style="magenta")
field_table.add_column("Database Type", justify="left", style="magenta")
field_table.add_column("Related Model", justify="right", style="dark_red")
field_table.add_column("Related Name", justify="right", style="dark_red")
field_table = _fill_table(field_table, info_object_list, info_type, column_count)
_print_table(field_table)
def render_field_reverse_relations_table(info_object_list: list or None, info_type: type):
"""Provided a list of FieldReverseRelation objects, prints the resulting sorted table to console"""
field_table = Table(title="Reverse Relations")
column_count = 1
field_table.add_column("Related Name", justify="left", style="yellow", no_wrap=True)
if VERBOSITY >= 2:
column_count = 7
field_table.add_column("Field Type", justify="left", style="magenta")
field_table.add_column("Database Type", justify="left", style="magenta")
field_table.add_column("Related Model", justify="right", style="dark_red")
field_table.add_column("Field Name on Related Model", justify="left", style="dark_red")
field_table.add_column("Field Type on Related Model", justify="left", style="dark_red")
field_table = _fill_table(field_table, info_object_list, info_type, column_count)
_print_table(field_table)
def render_field_others_table(info_object_list: list or None, info_type: type):
"""Provided a list of FieldOther objects, prints the resulting sorted table to console"""
field_table = Table(title="Other Fields")
column_count = 1
field_table.add_column("Field Name", justify="left", style="yellow", no_wrap=True)
if VERBOSITY >= 2:
column_count = 6
field_table.add_column("Field Type", justify="left", style="magenta")
field_table.add_column("Database Column", justify="left", style="magenta")
field_table.add_column("Database Type", justify="left", style="magenta")
field_table.add_column("Verbose Name", justify="left", style="white")
field_table = _fill_table(field_table, info_object_list, info_type, column_count)
_print_table(field_table)
def render_method_table(info_object_list: list or None, info_type: str):
"""Provided a list of Method objects, prints the resulting sorted table to console"""
method_table = Table(title=info_type)
column_count = 1
method_table.add_column("Method Name", justify="left", style="cyan", no_wrap=True)
if VERBOSITY > 1:
column_count = 2
method_table.add_column("Signature", justify="left", style="magenta")
if VERBOSITY > 2:
column_count = 5
method_table.add_column("Docstring", justify="left", style="magenta")
method_table.add_column("File", justify="left", style="magenta")
method_table.add_column("Line Number", justify="left", style="magenta")
method_table = _fill_table(method_table, info_object_list, Method, column_count)
_print_table(method_table)
def get_model_list():
if FILTER is not None:
model_list = []
for filter_item in FILTER:
if filter_item.count(".") == 0:
# Get the models and add to the list
# model_list.append(django_apps.get_app_config(filter_item).get_models())
try:
app_models = [x for x in django_apps.get_app_config(filter_item).get_models()]
except LookupError as e:
print(f"Error while looking up `{filter_item}`: {e}")
else:
model_list.extend(app_models)
elif filter_item.count(".") == 1:
# Add to the model list
try:
filter_model = django_apps.get_model(filter_item)
except LookupError as e:
print(f"Error while looking up `{filter_item}`: {e}")
else:
model_list.append(filter_model)
else:
model_list = sorted(
django_apps.get_models(), key=lambda x: (x._meta.app_label, x._meta.object_name), reverse=False
)
return model_list
model_list = get_model_list()
for model in model_list:
if VERBOSITY > 0:
console.print(Padding("", (1, 0, 0, 0)))
console.print(Padding("", (0, 0, 0, 0), style=section_style))
console.print(Padding("", (0, 0, 0, 0)))
console.print(f"{model._meta.label}", style=section_style)
if VERBOSITY > 0:
def process_model():
build_model_objects(model)
model_info = build_model_objects(model)
render_model_table(model_info, list)
process_model()
def process_fields():
console.print(Padding("Fields:", (1, 0, 0, 4), style=subsection_style))
field_list = model._meta.get_fields(include_hidden=True)
fields_relation, fields_reverse_relation, fields_other = build_field_objects(field_list)
render_field_relations_table(fields_relation, FieldRelation)
render_field_reverse_relations_table(fields_reverse_relation, FieldReverseRelation)
render_field_others_table(fields_other, FieldOther)
process_fields()
def get_clean_method_list():
"""
Remove any potential method names that start with an uppercase character, are blank, or not callable
"""
return [
method_name
for method_name in dir(model)
if method_name is not None
and not method_name == ""
and not method_name[0].isupper()
and hasattr(model, method_name)
and callable(getattr(model, method_name))
]
method_list = get_clean_method_list()
def process_methods():
if VERBOSITY == 3:
console.print(Padding("Methods (all):", (1, 0, 0, 4), style=subsection_style))
else:
console.print(Padding("Methods (non-private/internal):", (1, 0, 0, 4), style=subsection_style))
method_dunder, method_common_django, method_other_private, method_other = build_method_objects(
method_list, model
)
if VERBOSITY > 1:
render_method_table(method_dunder, "Dunder Methods")
render_method_table(method_common_django, "Common Django Methods")
render_method_table(method_other_private, "Other Private methods")
render_method_table(method_other, "Other Methods")
process_methods()
self.stdout.write("\n")
console.print(f"\nTotal Models Listed: {len(model_list)}\n", style=section_style)
console.print(Align(Bar(size=0.1, begin=0.0, end=0.0, width=100), align="center"), style="red")
def process_export():
"""If a FILENAME was provided in options, try to save the appropriate type of file"""
if FILENAME is not None:
extension = Path(FILENAME).suffixes
if len(extension) > 0:
if any(x in extension[-1] for x in ["htm", "html"]):
console.save_html(path=FILENAME)
# Using print() to avoid exporting following line
print(f"Saved as {FILENAME}")
elif "txt" in extension[-1]:
console.save_text(path=FILENAME)
# Using print() to avoid exporting following line
print(f"Saved as {FILENAME}")
process_export()
def handle(self, *args, **options):
self.model_info(options)
|
50865
|
from torch.autograd import Variable
import torch.nn.functional as F
import scripts.utils as utils
import torch.nn as nn
import numpy as np
import torch
class CrossEntropy2d(nn.Module):
def __init__(self, size_average=True, ignore_label=255):
super(CrossEntropy2d, self).__init__()
self.size_average = size_average
self.ignore_label = ignore_label
def forward(self, predict, target, weight=None):
"""
Args:
predict:(n, c, h, w)
target:(n, h, w)
weight (Tensor, optional): a manual rescaling weight given to each class.
If given, has to be a Tensor of size "nclasses"
"""
assert not target.requires_grad
assert predict.dim() == 4
assert target.dim() == 3
assert predict.size(0) == target.size(0), "{0} vs {1} ".format(predict.size(0), target.size(0))
assert predict.size(2) == target.size(1), "{0} vs {1} ".format(predict.size(2), target.size(1))
assert predict.size(3) == target.size(2), "{0} vs {1} ".format(predict.size(3), target.size(3))
n, c, h, w = predict.size()
target_mask = (target >= 0) * (target != self.ignore_label)
target = target[target_mask]
predict = predict.transpose(1, 2).transpose(2, 3).contiguous()
predict = predict[target_mask.view(n, h, w, 1).repeat(1, 1, 1, c)].view(-1, c)
loss = F.cross_entropy(predict, target, weight=weight, size_average=self.size_average)
return loss
def cross_entropy2d(input, target, weight=None, size_average=True):
# 1. input: (n, c, h, w), target: (n, h, w)
n, c, h, w = input.size()
# 2. log_p: (n, c, h, w)
log_p = F.log_softmax(input, dim=1)
# 3. log_p: (n*h*w, c) - contiguous() required if transpose() is used before view().
log_p = log_p.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)
log_p = log_p[target.view(n * h * w, 1).repeat(1, c) >= 0]
log_p = log_p.view(-1, c)
# 4. target: (n*h*w,)
mask = target >= 0
target = target[mask]
loss = F.nll_loss(log_p, target, ignore_index=250, weight=weight, size_average=False)
if size_average:
loss /= mask.data.sum()
# loss /= mask.sum().data[0]
return loss
def bootstrapped_cross_entropy2d(input, target, K, weight=None, size_average=False):
"""A categorical cross entropy loss for 4D tensors.
We assume the following layout: (batch, classes, height, width)
Args:
input: The outputs.
target: The predictions.
K: The number of pixels to select in the bootstrapping process.
The total number of pixels is determined as 512 * multiplier.
Returns:
The pixel-bootstrapped cross entropy loss.
"""
batch_size = input.size()[0]
def _bootstrap_xentropy_single(input, target, K, weight=None, size_average=False):
n, c, h, w = input.size()
# 1. The log softmax. log_p: (n, c, h, w)
log_p = F.log_softmax(input, dim=1)
# 2. log_p: (n*h*w, c) - contiguous() required if transpose() is used before view().
log_p = log_p.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)
log_p = log_p[target.view(n * h * w, 1).repeat(1, c) >= 0]
log_p = log_p.view(-1, c)
# 3. target: (n*h*w,)
mask = target >= 0
target = target[mask]
loss = F.nll_loss(log_p, target, weight=weight, ignore_index=250,
reduce=False, size_average=size_average)
# For each element in the batch, collect the top K worst predictions
topk_loss, _ = loss.topk(K)
reduced_topk_loss = topk_loss.sum() / K
return reduced_topk_loss
loss = 0.0
# Bootstrap from each image not entire batch
for i in range(batch_size):
loss += _bootstrap_xentropy_single(input=torch.unsqueeze(input[i], 0),
target=torch.unsqueeze(target[i], 0),
K=K,
weight=weight,
size_average=size_average)
return loss / float(batch_size)
class FocalLoss2D(nn.Module):
"""
Focal Loss, which is proposed in:
"Focal Loss for Dense Object Detection (https://arxiv.org/abs/1708.02002v2)"
"""
def __init__(self, num_classes=19, ignore_label=250, alpha=0.25, gamma=2, size_average=True):
"""
Loss(x, class) = - \alpha (1-softmax(x)[class])^gamma \log(softmax(x)[class])
:param num_classes: (int) num of the classes
:param ignore_label: (int) ignore label
:param alpha: (1D Tensor or Variable) the scalar factor
:param gamma: (float) gamma > 0;
reduces the relative loss for well-classified examples (probabilities > .5),
putting more focus on hard, mis-classified examples
:param size_average: (bool): By default, the losses are averaged over observations for each mini-batch.
If the size_average is set to False, the losses are
instead summed for each mini-batch.
"""
super(FocalLoss2D, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.num_classes = num_classes
self.ignore_label = ignore_label
self.size_average = size_average
self.one_hot = Variable(torch.eye(self.num_classes))
def forward(self, cls_preds, cls_targets):
"""
:param cls_preds: (n, c, h, w)
:param cls_targets: (n, h, w)
:return:
"""
assert not cls_targets.requires_grad
assert cls_targets.dim() == 3
assert cls_preds.size(0) == cls_targets.size(0), "{0} vs {1} ".format(cls_preds.size(0), cls_targets.size(0))
assert cls_preds.size(2) == cls_targets.size(1), "{0} vs {1} ".format(cls_preds.size(2), cls_targets.size(1))
assert cls_preds.size(3) == cls_targets.size(2), "{0} vs {1} ".format(cls_preds.size(3), cls_targets.size(3))
if cls_preds.is_cuda:
self.one_hot = self.one_hot.cuda()
n, c, h, w = cls_preds.size()
# +++++++++++++++++++++++++++++++++++++++++++++++++++ #
# 1. target reshape and one-hot encode
# +++++++++++++++++++++++++++++++++++++++++++++++++++ #
# 1.1. target: (n*h*w,)
cls_targets = cls_targets.view(n * h * w, 1)
target_mask = (cls_targets >= 0) * (cls_targets != self.ignore_label)
cls_targets = cls_targets[target_mask]
cls_targets = self.one_hot.index_select(dim=0, index=cls_targets)
# +++++++++++++++++++++++++++++++++++++++++++++++++++ #
# 2. compute focal loss for multi-classification
# +++++++++++++++++++++++++++++++++++++++++++++++++++ #
# 2.1. The softmax. prob: (n, c, h, w)
prob = F.softmax(cls_preds, dim=1)
# 2.2. prob: (n*h*w, c) - contiguous() required if transpose() is used before view().
prob = prob.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)
prob = prob[target_mask.repeat(1, c)]
prob = prob.view(-1, c) # (n*h*w, c)
probs = torch.clamp((prob * cls_targets).sum(1).view(-1, 1), min=1e-8, max=1.0)
batch_loss = -self.alpha * (torch.pow((1 - probs), self.gamma)) * probs.log()
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss.sum()
return loss
class SemanticEncodingLoss(nn.Module):
def __init__(self, num_classes=19, ignore_label=250, alpha=0.25):
super(SemanticEncodingLoss, self).__init__()
self.alpha = alpha
self.num_classes = num_classes
self.ignore_label = ignore_label
def unique_encode(self, cls_targets):
batch_size, _, _ = cls_targets.size()
target_mask = (cls_targets >= 0) * (cls_targets != self.ignore_label)
cls_targets = [cls_targets[idx].masked_select(target_mask[idx]) for idx in np.arange(batch_size)]
# unique_cls = [np.unique(label.numpy(), return_counts=True) for label in cls_targets]
unique_cls = [np.unique(label.numpy()) for label in cls_targets]
encode = np.zeros((batch_size, self.num_classes), dtype=np.uint8)
for idx in np.arange(batch_size):
np.put(encode[idx], unique_cls[idx], 1)
return torch.from_numpy(encode).float()
def forward(self, predicts, enc_cls_target, size_average=True):
se_loss = F.binary_cross_entropy_with_logits(predicts, enc_cls_target, weight=None,
size_average=size_average)
return self.alpha * se_loss
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# Lovasz-Softmax
# <NAME> 2018 ESAT-PSI KU Leuven
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1. - intersection / union
if p > 1: # cover 1-pixel case
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
def iou_binary(preds, labels, EMPTY=1., ignore=None, per_image=True):
"""
IoU for foreground class
binary: 1 foreground, 0 background
"""
if not per_image:
preds, labels = (preds,), (labels,)
ious = []
for pred, label in zip(preds, labels):
intersection = ((label == 1) & (pred == 1)).sum()
union = ((label == 1) | ((pred == 1) & (label != ignore))).sum()
if not union:
iou = EMPTY
else:
iou = float(intersection) / union
ious.append(iou)
iou = utils.mean(ious) # mean accross images if per_image
return 100 * iou
def iou(preds, labels, C, EMPTY=1., ignore=None, per_image=False):
"""
Array of IoU for each (non ignored) class
"""
if not per_image:
preds, labels = (preds,), (labels,)
ious = []
for pred, label in zip(preds, labels):
iou = []
for i in range(C):
if i != ignore: # The ignored label is sometimes among predicted classes (ENet - CityScapes)
intersection = ((label == i) & (pred == i)).sum()
union = ((label == i) | ((pred == i) & (label != ignore))).sum()
if not union:
iou.append(EMPTY)
else:
iou.append(float(intersection) / union)
ious.append(iou)
ious = map(utils.mean, zip(*ious)) # mean accross images if per_image
return 100 * np.array(ious)
def lovasz_softmax(probas, labels, only_present=False, per_image=False, ignore=None):
"""
Multi-class Lovasz-Softmax loss
probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1)
labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)
only_present: average only on classes present in ground truth
per_image: compute the loss per image instead of per batch
ignore: void class labels
"""
if per_image:
loss = utils.mean(lovasz_softmax_flat(*flatten_probas(prob, lab, ignore), only_present=only_present)
for prob, lab in zip(probas, labels))
else:
loss = lovasz_softmax_flat(*flatten_probas(probas, labels, ignore), only_present=only_present)
return loss
def lovasz_softmax_flat(probas, labels, only_present=False):
"""
Multi-class Lovasz-Softmax loss
probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1)
labels: [P] Tensor, ground truth labels (between 0 and C - 1)
only_present: average only on classes present in ground truth
"""
C = probas.size(1)
losses = []
for c in range(C):
fg = (labels == c).float() # foreground for class c
if only_present and fg.sum() == 0:
continue
errors = (fg - probas[:, c]).abs()
errors_sorted, perm = torch.sort(errors, 0, descending=True)
perm = perm.data
fg_sorted = fg[perm]
losses.append(torch.dot(errors_sorted, lovasz_grad(fg_sorted)))
return utils.mean(losses)
def flatten_probas(scores, labels, ignore=None):
"""
Flattens predictions in the batch
"""
B, C, H, W = scores.size()
scores = scores.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C
labels = labels.view(-1)
if ignore is None:
return scores, labels
valid = (labels != ignore)
vscores = scores[valid.nonzero().squeeze()]
vlabels = labels[valid]
return vscores, vlabels
if __name__ == "__main__":
from torch.autograd import Variable
while True:
dummy_in = Variable(torch.randn(2, 3, 32, 32), requires_grad=True)
dummy_gt = Variable(torch.LongTensor(2, 32, 32).random_(0, 3))
dummy_in = F.softmax(dummy_in, dim=1)
loss = lovasz_softmax(dummy_in, dummy_gt, ignore=255)
print(loss.data[0])
|
50915
|
import lightnion as lnn
import nacl.public
import base64
def hand(guard, encode=True):
identity = base64.b64decode(guard['router']['identity'] + '====')
onion_key = base64.b64decode(guard['ntor-onion-key'] + '====')
ephemeral_key, payload = lnn.crypto.ntor.hand(identity, onion_key)
if encode:
payload = str(base64.b64encode(payload), 'utf8')
return payload, (onion_key, ephemeral_key, identity)
def shake(payload, material):
payload = base64.b64decode(payload)
onion_key, ephemeral_key, identity = material
material = lnn.crypto.ntor.shake(ephemeral_key, payload,
identity, onion_key, length=92)
return lnn.crypto.ntor.kdf(material)
|
50921
|
from __future__ import absolute_import
import torch
from torch import nn
from torch.autograd import Variable
from torch.nn import functional as F
from scipy.stats import norm
import numpy as np
class VirtualCE(nn.Module):
def __init__(self, beta=0.1):
super(VirtualCE, self).__init__()
self.beta = beta
def forward(self, inputs, targets):
# norm first
n = inputs.shape[0]
inputs = F.normalize(inputs, p=2)
allPids = targets.cpu().numpy().tolist()
# All Centers
centerHash = {
pid: F.normalize(inputs[targets == pid, :].mean(dim=0, keepdim=True), p=2).detach() for pid in set(allPids)
}
allCenters = torch.autograd.Variable(torch.cat(list(centerHash.values()))).cuda()
centerPID = torch.from_numpy(np.asarray(list(centerHash.keys())))
# sampler vs center
samplerCenter = torch.autograd.Variable(torch.cat([allCenters[centerPID == pid, :] for pid in allPids])).cuda()
# inputs--(128*1024), allCenters--(32*1024)
vce = torch.diag(torch.exp(samplerCenter.mm(inputs.t()) / self.beta)) # 1*128
centerScore = torch.exp(allCenters.mm(inputs.t()) / self.beta).sum(dim=0) # 32(center number)*128->1*128
return -torch.log(vce.div(centerScore)).mean()
class VirtualKCE(nn.Module):
def __init__(self, beta=0.1):
super(VirtualKCE, self).__init__()
self.beta = beta
def forward(self, inputs, targets):
# norm first
n = inputs.shape[0]
inputs = F.normalize(inputs, p=2)
allPids = targets.cpu().numpy().tolist()
# All Centers
centerHash = {
pid: F.normalize(inputs[targets == pid, :].mean(dim=0, keepdim=True), p=2).detach() for pid in set(allPids)
}
allCenters = torch.autograd.Variable(torch.cat(list(centerHash.values()))).cuda()
centerPID = torch.from_numpy(np.asarray(list(centerHash.keys())))
samplerCenter = torch.autograd.Variable(torch.cat([allCenters[centerPID == pid, :] for pid in allPids])).cuda()
# inputs--(128*1024), allCenters--(32*1024)
vce = torch.diag(torch.exp(samplerCenter.mm(inputs.t()) / self.beta)) # 1*128
centerScore = torch.exp(allCenters.mm(inputs.t()) / self.beta).sum(dim=0) # 32*128->1*128
kNegScore = torch.diag(inputs.mm(inputs.t()))
return -torch.log(vce.div(kNegScore + centerScore)).mean()
|
50941
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from datas.benchmark import Benchmark
from datas.div2k import DIV2K
from models.ecbsr import ECBSR
from torch.utils.data import DataLoader
import math
import argparse, yaml
import utils
import os
from tqdm import tqdm
import logging
import sys
import time
parser = argparse.ArgumentParser(description='ECBSR')
## yaml configuration files
parser.add_argument('--config', type=str, default=None, help = 'pre-config file for training')
## paramters for ecbsr
parser.add_argument('--scale', type=int, default=2, help = 'scale for sr network')
parser.add_argument('--colors', type=int, default=1, help = '1(Y channls of YCbCr)')
parser.add_argument('--m_ecbsr', type=int, default=4, help = 'number of ecb')
parser.add_argument('--c_ecbsr', type=int, default=8, help = 'channels of ecb')
parser.add_argument('--idt_ecbsr', type=int, default=0, help = 'incorporate identity mapping in ecb or not')
parser.add_argument('--act_type', type=str, default='prelu', help = 'prelu, relu, splus, rrelu')
parser.add_argument('--pretrain', type=str, default=None, help = 'path of pretrained model')
## parameters for model training
parser.add_argument('--patch_size', type=int, default=64, help = 'patch size of HR image')
parser.add_argument('--batch_size', type=int, default=32, help = 'batch size of training data')
parser.add_argument('--data_repeat', type=int, default=1, help = 'times of repetition for training data')
parser.add_argument('--data_augment', type=int, default=1, help = 'data augmentation for training')
parser.add_argument('--epochs', type=int, default=600, help = 'number of epochs')
parser.add_argument('--test_every', type=int, default=1, help = 'test the model every N epochs')
parser.add_argument('--log_every', type=int, default=1, help = 'print log of loss, every N steps')
parser.add_argument('--log_path', type=str, default="./experiments/")
parser.add_argument('--lr', type=float, default=5e-4, help = 'learning rate of optimizer')
parser.add_argument('--store_in_ram', type=int, default=0, help = 'store the whole training data in RAM or not')
## hardware specification
parser.add_argument('--gpu_id', type=int, default=0, help = 'gpu id for training')
parser.add_argument('--threads', type=int, default=1, help = 'number of threads for training')
## dataset specification
parser.add_argument('--div2k_hr_path', type=str, default='/Users/xindongzhang/Documents/SRData/DIV2K/DIV2K_train_HR', help = '')
parser.add_argument('--div2k_lr_path', type=str, default='/Users/xindongzhang/Documents/SRData/DIV2K/DIV2K_train_LR_bicubic', help = '')
parser.add_argument('--set5_hr_path', type=str, default='/Users/xindongzhang/Documents/SRData/benchmark/Set5/HR', help = '')
parser.add_argument('--set5_lr_path', type=str, default='/Users/xindongzhang/Documents/SRData/benchmark/Set5/LR_bicubic', help = '')
parser.add_argument('--set14_hr_path', type=str, default='/Users/xindongzhang/Documents/SRData/benchmark/Set14/HR', help = '')
parser.add_argument('--set14_lr_path', type=str, default='/Users/xindongzhang/Documents/SRData/benchmark/Set14/LR_bicubic', help = '')
parser.add_argument('--b100_hr_path', type=str, default='/Users/xindongzhang/Documents/SRData/benchmark/B100/HR', help = '')
parser.add_argument('--b100_lr_path', type=str, default='/Users/xindongzhang/Documents/SRData/benchmark/B100/LR_bicubic', help = '')
parser.add_argument('--u100_hr_path', type=str, default='/Users/xindongzhang/Documents/SRData/benchmark/Urban100/HR', help = '')
parser.add_argument('--u100_lr_path', type=str, default='/Users/xindongzhang/Documents/SRData/benchmark/Urban100/LR_bicubic', help = '')
if __name__ == '__main__':
args = parser.parse_args()
if args.config:
opt = vars(args)
yaml_args = yaml.load(open(args.config), Loader=yaml.FullLoader)
opt.update(yaml_args)
if args.colors == 3:
raise ValueError("ECBSR is trained and tested with colors=1.")
device = None
if args.gpu_id >= 0 and torch.cuda.is_available():
print("use cuda & cudnn for acceleration!")
print("the gpu id is: {}".format(args.gpu_id))
device = torch.device('cuda:{}'.format(args.gpu_id))
torch.backends.cudnn.benchmark = True
else:
print("use cpu for training!")
device = torch.device('cpu')
torch.set_num_threads(args.threads)
div2k = DIV2K(
args.div2k_hr_path,
args.div2k_lr_path,
train=True,
augment=args.data_augment,
scale=args.scale,
colors=args.colors,
patch_size=args.patch_size,
repeat=args.data_repeat,
store_in_ram=args.store_in_ram
)
set5 = Benchmark(args.set5_hr_path, args.set5_lr_path, scale=args.scale, colors=args.colors, store_in_ram=args.store_in_ram)
set14 = Benchmark(args.set14_hr_path, args.set14_lr_path, scale=args.scale, colors=args.colors, store_in_ram=args.store_in_ram)
b100 = Benchmark(args.b100_hr_path, args.b100_lr_path, scale=args.scale, colors=args.colors, store_in_ram=args.store_in_ram)
u100 = Benchmark(args.u100_hr_path, args.u100_lr_path, scale=args.scale, colors=args.colors, store_in_ram=args.store_in_ram)
train_dataloader = DataLoader(dataset=div2k, num_workers=args.threads, batch_size=args.batch_size, shuffle=True, pin_memory=True, drop_last=True)
valid_dataloaders = []
valid_dataloaders += [{'name': 'set5', 'dataloader': DataLoader(dataset=set5, batch_size=1, shuffle=False)}]
valid_dataloaders += [{'name': 'set14', 'dataloader': DataLoader(dataset=set14, batch_size=1, shuffle=False)}]
valid_dataloaders += [{'name': 'b100', 'dataloader': DataLoader(dataset=b100, batch_size=1, shuffle=False)}]
valid_dataloaders += [{'name': 'u100', 'dataloader': DataLoader(dataset=u100, batch_size=1, shuffle=False)}]
## definitions of model, loss, and optimizer
model = ECBSR(module_nums=args.m_ecbsr, channel_nums=args.c_ecbsr, with_idt=args.idt_ecbsr, act_type=args.act_type, scale=args.scale, colors=args.colors).to(device)
loss_func = nn.L1Loss()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
if args.pretrain is not None:
print("load pretrained model: {}!".format(args.pretrain))
model.load_state_dict(torch.load(args.pretrain))
else:
print("train the model from scratch!")
## auto-generate the output logname
timestamp = utils.cur_timestamp_str()
experiment_name = "ecbsr-x{}-m{}c{}-{}-{}".format(args.scale, args.m_ecbsr, args.c_ecbsr, args.act_type, timestamp)
experiment_path = os.path.join(args.log_path, experiment_name)
if not os.path.exists(experiment_path):
os.makedirs(experiment_path)
experiment_model_path = os.path.join(experiment_path, 'models')
if not os.path.exists(experiment_model_path):
os.makedirs(experiment_model_path)
log_name = os.path.join(experiment_path, "log.txt")
sys.stdout = utils.ExperimentLogger(log_name, sys.stdout)
stat_dict = utils.get_stat_dict()
## save training paramters
exp_params = vars(args)
exp_params_name = os.path.join(experiment_path, 'config.yml')
with open(exp_params_name, 'w') as exp_params_file:
yaml.dump(exp_params, exp_params_file, default_flow_style=False)
timer_start = time.time()
for epoch in range(args.epochs):
epoch_loss = 0.0
stat_dict['epochs'] = epoch
model = model.train()
print("##===========Epoch: {}=============##".format(epoch))
for iter, batch in enumerate(train_dataloader):
optimizer.zero_grad()
lr, hr = batch
lr, hr = lr.to(device), hr.to(device)
sr = model(lr)
loss = loss_func(sr, hr)
loss.backward()
optimizer.step()
epoch_loss += float(loss)
if (iter + 1) % args.log_every == 0:
cur_steps = (iter+1)*args.batch_size
total_steps = len(train_dataloader.dataset)
fill_width = math.ceil(math.log10(total_steps))
cur_steps = str(cur_steps).zfill(fill_width)
epoch_width = math.ceil(math.log10(args.epochs))
cur_epoch = str(epoch).zfill(epoch_width)
avg_loss = epoch_loss / (iter + 1)
stat_dict['losses'].append(avg_loss)
timer_end = time.time()
duration = timer_end - timer_start
timer_start = timer_end
print("Epoch:{}, {}/{}, loss: {:.4f}, time: {:.3f}".format(cur_epoch, cur_steps, total_steps, avg_loss, duration))
if (epoch + 1) % args.test_every == 0:
torch.set_grad_enabled(False)
test_log = ""
model = model.eval()
for valid_dataloader in valid_dataloaders:
avg_psnr = 0.0
avg_ssim = 0.0
name = valid_dataloader['name']
loader = valid_dataloader['dataloader']
for lr, hr in tqdm(loader, ncols=80):
lr, hr = lr.to(device), hr.to(device)
sr = model(lr)
# crop
hr = hr[:, :, args.scale:-args.scale, args.scale:-args.scale]
sr = sr[:, :, args.scale:-args.scale, args.scale:-args.scale]
# quantize
hr = hr.clamp(0, 255)
sr = sr.clamp(0, 255)
# calculate psnr
psnr = utils.calc_psnr(sr, hr)
ssim = utils.calc_ssim(sr, hr)
avg_psnr += psnr
avg_ssim += ssim
avg_psnr = round(avg_psnr/len(loader), 2)
avg_ssim = round(avg_ssim/len(loader), 4)
stat_dict[name]['psnrs'].append(avg_psnr)
stat_dict[name]['ssims'].append(avg_ssim)
if stat_dict[name]['best_psnr']['value'] < avg_psnr:
stat_dict[name]['best_psnr']['value'] = avg_psnr
stat_dict[name]['best_psnr']['epoch'] = epoch
if stat_dict[name]['best_ssim']['value'] < avg_ssim:
stat_dict[name]['best_ssim']['value'] = avg_ssim
stat_dict[name]['best_ssim']['epoch'] = epoch
test_log += "[{}-X{}], PSNR/SSIM: {:.2f}/{:.4f} (Best: {:.2f}/{:.4f}, Epoch: {}/{})\n".format(
name, args.scale, float(avg_psnr), float(avg_ssim),
stat_dict[name]['best_psnr']['value'], stat_dict[name]['best_ssim']['value'],
stat_dict[name]['best_psnr']['epoch'], stat_dict[name]['best_ssim']['epoch'])
# print log & flush out
print(test_log)
sys.stdout.flush()
# save model
saved_model_path = os.path.join(experiment_model_path, 'model_x{}_{}.pt'.format(args.scale, epoch))
torch.save(model.state_dict(), saved_model_path)
torch.set_grad_enabled(True)
# save stat dict
## save training paramters
stat_dict_name = os.path.join(experiment_path, 'stat_dict.yml')
with open(stat_dict_name, 'w') as stat_dict_file:
yaml.dump(stat_dict, stat_dict_file, default_flow_style=False)
|
50970
|
if __name__ == '__main__' and __package__ is None:
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ))
import os
import torch
from utils.model_serialization import strip_prefix_if_present
from utils import zipreader
import argparse
from tqdm import tqdm
import pickle
import cv2
import numpy as np
parser = argparse.ArgumentParser(description="PyTorch Keypoints Training")
parser.add_argument(
"--src",
default="~/datasets",
help="source model",
type=str,
)
parser.add_argument(
"--dst",
default="~/local/datasets/h36m/undistortedimages",
help="dst model",
type=str,
)
parser.add_argument(
"--anno",
default="~/datasets/h36m/annot/h36m_validation.pkl",
type=str,
)
args = parser.parse_args()
src = os.path.expanduser(args.src)
dst = os.path.expanduser(args.dst)
with open(os.path.expanduser(args.anno), 'rb') as f:
data = pickle.load(f)
for db_rec in tqdm(data):
path = db_rec['image']
image_dir = 'images.zip@'
image_file = os.path.join(src, db_rec['source'], image_dir, 'images', db_rec['image'])
output_path = os.path.join(dst, path)
if os.path.exists(output_path):
continue
output_dir = os.path.dirname(output_path)
os.makedirs(output_dir, exist_ok=True)
data_numpy = zipreader.imread(
image_file, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
camera = db_rec['camera']
K = np.array([
[float(camera['fx']), 0, float(camera['cx'])],
[0, float(camera['fy']), float(camera['cy'])],
[0, 0, 1.],
])
distCoeffs = np.array([float(i) for i in [camera['k'][0], camera['k'][1], camera['p'][0], camera['p'][1], camera['k'][2]]])
data_numpy = cv2.undistort(data_numpy, K, distCoeffs)
#cv2.imwrite(output_path, data_numpy, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
#cv2.imwrite(output_path, data_numpy)
cv2.imwrite(output_path, data_numpy, [int(cv2.IMWRITE_JPEG_QUALITY), 90])
|
50984
|
import logging
import tflite
import numpy as np
from tflite2onnx import mapping
from tflite2onnx.op.common import Operator
from tflite2onnx.op.binary import PowerWrapper
logger = logging.getLogger('tflite2onnx')
class Rsqrt(Operator):
# use square root as input operator and propagate output to power
TypeMapping = {
tflite.BuiltinOperator.RSQRT: 'Sqrt',
}
def __init__(self, TFactory, index):
super().__init__(TFactory, index)
self.setInited()
@property
def type(self):
if self.status.uninitialized:
return 'Sqrt'
else:
op = self.tflite
opcode = self.model.OperatorCodes(op.OpcodeIndex()).BuiltinCode()
assert(opcode in self.TypeMapping)
return self.TypeMapping[opcode]
def parse(self):
logger.debug("Parsing %s...", self.type)
op = self.tflite
opcode = self.model.OperatorCodes(op.OpcodeIndex()).BuiltinCode()
assert(opcode in self.TypeMapping)
assert(op.InputsLength() == 1)
assert(op.OutputsLength() == 1)
self.parseInput(0)
self.parseOutput(0)
# invert square root result
self.appendInvert()
self.setParsed()
def propagatableTensors(self):
return self.inputs + self.outputs
def transform(self):
pass
def appendInvert(self):
invert = PowerWrapper(self.TFactory, -1)
invert_name = 'TFLITE2ONNX_Invert_%s' % self.outputs[0].name
invert_t = self.TFactory.getWithRef(self.outputs[0], invert_name, True)
invert_t.setParsed()
invert_t.addProducer(self)
invert_t.addConsumer(invert)
pow_t = 'TFLITE2ONNX_PowData_%s' % self.outputs[0].name
pow_t = self.TFactory.getWithRef(self.outputs[0], pow_t, True)
pow_dtype = mapping.DTYPE_ONNX2NAME[pow_t.dtype]
pow_t.data = np.full(shape=pow_t.shape, fill_value=-1, dtype=pow_dtype)
pow_t.setParsed()
pow_t.addConsumer(invert)
invert.inputs.append(invert_t)
invert.inputs.append(pow_t)
invert.outputs.append(self.outputs[0])
self.replaceOutput(self.outputs[0], invert_t)
invert.setParsed()
self.post.append(invert)
|
51024
|
from savu.plugins.plugin_tools import PluginTools
class PyfaiAzimuthalIntegratorTools(PluginTools):
"""1D azimuthal integrator by pyFAI
"""
|
51031
|
from abc import abstractmethod
class BaseCriterion:
def __init__(self, **kwargs):
pass
def __call__(self, ground_truth, predictions):
return self.compute_criterion(ground_truth, predictions)
@abstractmethod
def compute_criterion(self, ground_truth, predictions):
raise NotImplementedError
|
51065
|
import wx
from gui.textutil import CopyFont, default_font
#from gui.toolbox import prnt
from wx import EXPAND,ALL,TOP,VERTICAL,ALIGN_CENTER_HORIZONTAL,ALIGN_CENTER_VERTICAL,LI_HORIZONTAL
ALIGN_CENTER = ALIGN_CENTER_HORIZONTAL|ALIGN_CENTER_VERTICAL
TOPLESS = ALL & ~TOP
bgcolors = [
wx.Color(238, 238, 238),
wx.Color(255, 255, 255),
]
hovbgcolor = wx.Color(220, 220, 220)
#def printlist(list):
# prnt(list)
class VisualListEditorList(wx.VListBox):
text_alignment = ALIGN_CENTER
min_width = 150
def __init__(self,
parent,
list2sort,
prettynames = None,
listcallback = None,
ischecked = None # if given, a function of one argument that determines if an argument is checked
):
wx.VListBox.__init__(self, parent)
self.Font = default_font()
self.item_height = self.Font.Height + 12
self.oldlist = None
self.prettynames = prettynames or {}
self.listcallback = listcallback
self.SetList(list2sort)
self.setup_checkboxes(ischecked)
self.BackgroundColour = wx.WHITE
self._hovered = -1
Bind = self.Bind
Bind(wx.EVT_MOTION, self.OnMotion)
Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
Bind(wx.EVT_PAINT,self.OnPaint)
def CalcMinWidth(self):
return self.min_width
def SetList(self, seq):
self.thelist = seq[:]
self.SetItemCount(len(self.thelist))
height = self.OnMeasureItem(0) * self.ItemCount
self.SetMinSize(wx.Size(self.CalcMinWidth(), height))
self.RefreshAll()
def OnPaint(self,event):
event.Skip()
srect= wx.Rect(*self.Rect)
srect.Inflate(1,1)
pcdc = wx.ClientDC(self.Parent)
pcdc.Brush = wx.TRANSPARENT_BRUSH
pcdc.Pen = wx.Pen(wx.Colour(213,213,213))
pcdc.DrawRectangleRect(srect)
def GetHovered(self):
return self._hovered
def GetItem(self, n):
return self.thelist[n]
def SetHovered(self,i):
slist = self.thelist
if i >= len(slist):
return
n = self._hovered
self._hovered = i
if n != -1:
self.RefreshLine(n)
if i != -1:
self.RefreshLine(i)
Hovered = property(GetHovered,SetHovered)
def OnMeasureItem(self,n):
return self.item_height
def OnDrawBackground(self,dc,rect,n):
dc.Brush = wx.Brush(hovbgcolor if self.Hovered == n else bgcolors[n % len(bgcolors)])
dc.Pen = wx.TRANSPARENT_PEN
dc.DrawRectangleRect(rect)
def OnDrawItem(self,dc,rect,n):
elem = self.thelist[n]
self._draw_checkbox(dc, rect, n)
if hasattr(self.prettynames, '__call__'):
name = self.prettynames(elem)
else:
name = self.prettynames.get(self.thelist[n], _('(Unnamed Panel)'))
dc.Font = self.Font
dc.DrawLabel(name, rect, self.text_alignment)
def OnMotion(self,event):
rect = self.ClientRect
wap = wx.FindWindowAtPointer()
mp = event.Position
hit = self.HitTest(mp)
dragging = event.Dragging()
selection = self.Selection
thelist = self.thelist
checked = self.checked
if hit != -1:
cursor = wx.CURSOR_ARROW if self._over_checkbox(mp, hit) else wx.CURSOR_HAND
self.SetCursor(wx.StockCursor(cursor))
if not dragging:
if not rect.Contains(mp) or not wap == self:
while self.HasCapture():
self.ReleaseMouse()
self.Hovered = -1
return
elif not self.HasCapture():
self.CaptureMouse()
if dragging and -1 not in (selection, hit) and hit != selection:
self.Selection = hit
item = thelist[selection]
if checked is not None:
item_checked = checked[selection]
thelist.pop(selection)
thelist.insert(hit, item)
if checked is not None:
checked.pop(selection)
checked.insert(hit, item_checked)
self.Refresh()
self.Hovered = hit
def OnLeftDown(self,event):
mp = event.Position
self.oldlist = list(self.thelist)
hit = self.HitTest(mp)
if hit != -1 and self._over_checkbox(mp, hit):
self.checked[hit] = not self.checked[hit]
self.listcallback(self.thelist, self.checked)
self.Refresh()
else:
self.Selection = hit
def OnLeftUp(self,event):
if self.oldlist and self.oldlist != self.thelist and self.listcallback:
if self.checked is not None:
self.listcallback(self.thelist, self.checked)
else:
self.listcallback(self.thelist)
self.Selection = -1
self.oldlist = None
#
# checkbox support
#
def setup_checkboxes(self, ischecked):
if ischecked is not None:
self.checked = [ischecked(e) for e in self.thelist]
else:
self.checked = None
self.checkbox_border = 5
self.checkbox_size = 16
self.checkbox_rect = wx.Rect(self.checkbox_border, (self.item_height - self.checkbox_size) / 2, self.checkbox_size, self.checkbox_size)
def _draw_checkbox(self, dc, rect, n):
if self.checked is None:
return
flag = wx.CONTROL_CHECKED if self.checked[n] else 0
# draw a checkbox
cbrect = wx.Rect(*self.checkbox_rect)
cbrect.Offset((rect.x, rect.y))
wx.RendererNative.Get().DrawCheckBox(self, dc, cbrect, flag)
rect.x = rect.x + self.checkbox_size + self.checkbox_border * 2
def _over_checkbox(self, mp, hit):
if self.checked is None: return False
hitmp = mp - (0, hit * self.item_height)
return self.checkbox_rect.Contains(hitmp)
class VisualListEditorListWithLinks(VisualListEditorList):
'''
A "visual list editor" which draws clickable links on the right.
Subclasses override LinksForRow(n), returning ("Link Text", link_func)
where link_func is a callable taking one argument, the row's item.
Subclasses should also call PaintLinks(dc, rect, n) in their EVT_PAINT
handlers.
'''
link_padding = 5
def LinksForRow(self, n):
'''Overridden by subclasses'''
return []
def PaintLinks(self, dc, rect, n):
'''Should be called by subclassess' EVT_PAINT handler.'''
dc.Font = self.Font
dc.TextForeground = wx.BLUE
for (link_text, func), rect in self.LinkRectsForRow(n):
rect.y += n * self.item_height
dc.DrawLabel(link_text, rect, wx.ALIGN_CENTER_VERTICAL)
def __init__(self, *a, **k):
VisualListEditorList.__init__(self, *a, **k)
self.Bind(wx.EVT_LEFT_DOWN, self.__leftdown)
def __leftdown(self, e):
mp = e.Position
hit = self.HitTest(mp)
link = self._link_hittest(mp, hit)
if link:
link_text, link_func = link
return link_func(self.thelist[hit])
e.Skip()
def _link_hittest(self, mp, hit):
if hit == -1: return
mp = mp - (0, hit * self.item_height)
for link, rect in self.LinkRectsForRow(hit):
if rect.Contains(mp):
return link
def LinkRectsForRow(self, hit):
links = self.LinksForRow(hit)
dc = wx.ClientDC(self)
dc.Font = self.Font
# calculate link rects from the right.
p = self.ClientRect.TopRight
rects = []
for link_text, func in reversed(links):
w, h = dc.GetTextExtent(link_text)
w += self.link_padding
r = wx.Rect(p.x - w, p.y, w, self.item_height)
rects.append(((link_text, func), r))
p.x -= w
rects.reverse() # restore left to right order.
return rects
class VisualListEditor(wx.Dialog):
dialog_style = wx.DEFAULT_DIALOG_STYLE | wx.FRAME_FLOAT_ON_PARENT
def __init__(self, parent, list2sort, prettynames=None, listcallback=None,
title=_("Arrange Panels"),
listclass = VisualListEditorList,
ischecked = None):
wx.Dialog.__init__(self, parent, -1, title, style = self.dialog_style)
Bind = self.Bind
Bind(wx.EVT_CLOSE, self.Done)
# construct
panel = wx.Panel(self)
text = wx.StaticText(panel, -1, _('Drag and drop to reorder'), style = ALIGN_CENTER)
text.Font = CopyFont(text.Font, weight=wx.BOLD)
self.vle = vle = listclass(panel, list2sort, prettynames, listcallback, ischecked=ischecked)
Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
self.vle.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
hline = wx.StaticLine(panel,style = LI_HORIZONTAL)
done = wx.Button(panel,-1, _('Done'))
done.Bind(wx.EVT_BUTTON,self.Done)
# layout
main_sizer = self.Sizer = wx.BoxSizer(VERTICAL)
main_sizer.Add(panel, 1, EXPAND)
s = panel.Sizer = wx.BoxSizer(VERTICAL)
border_size = 6
s.AddMany([(text, 0, EXPAND|ALL, border_size),
(vle, 1, EXPAND|TOPLESS, border_size),
(hline, 0, EXPAND|TOPLESS, border_size),
(done, 0, EXPAND|TOPLESS, border_size)])
self.Fit()
def SetList(self, seq):
return self.vle.SetList(seq)
def Done(self, event):
self.Hide()
self.Destroy()
def OnKeyDown(self, e):
if e.KeyCode == wx.WXK_ESCAPE:
self.Close()
else:
e.Skip()
|
51068
|
factors = [1, 2, 4]
pads = [32, 64, 128, 256, 512]
gen_scope = "gen"
dis_scope = "dis"
outputs_prefix = "output_"
lr_key = "lr"
hr_key = "hr"
lr_input_name = "lr_input"
hr_input_name = "hr_input"
pretrain_key = "pretrain"
train_key = "train"
epoch_key = "per_epoch"
|
51117
|
def is_string(thing):
try:
return isinstance(thing, basestring)
except NameError:
return isinstance(thing, str)
|
51142
|
from __future__ import print_function
from __future__ import division
import os
import sys
import time
import datetime
import os.path as osp
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from torch.optim import lr_scheduler
from args import argument_parser, image_dataset_kwargs, optimizer_kwargs
from torchreid.data_manager import ImageDataManager
from torchreid import models
from torchreid.losses import CrossEntropyLoss, DeepSupervision
from torchreid.utils.iotools import save_checkpoint, check_isfile
from torchreid.utils.avgmeter import AverageMeter
from torchreid.utils.loggers import Logger, RankLogger
from torchreid.utils.torchtools import count_num_param, open_all_layers, open_specified_layers
from torchreid.utils.reidtools import visualize_ranked_results
from torchreid.eval_metrics import evaluate
from torchreid.optimizers import init_optimizer
from torchreid.regularizers import get_regularizer
from torchreid.losses.wrapped_cross_entropy_loss import WrappedCrossEntropyLoss
from torchreid.models.tricks.dropout import DropoutOptimizer
import logging
logging.basicConfig(level=os.environ.get('LOGLEVEL', 'CRITICAL'))
# global variables
parser = argument_parser()
args = parser.parse_args()
dropout_optimizer = DropoutOptimizer(args)
os.environ['TORCH_HOME'] = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '.torch'))
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for
the specified values of k.
Args:
output (torch.Tensor): prediction matrix with shape (batch_size, num_classes).
target (torch.LongTensor): ground truth labels with shape (batch_size).
topk (tuple, optional): accuracy at top-k will be computed. For example,
topk=(1, 5) means accuracy at top-1 and top-5 will be computed.
Returns:
list: accuracy at top-k.
Examples::
>>> from torchreid import metrics
>>> metrics.accuracy(output, target)
"""
maxk = max(topk)
batch_size = target.size(0)
if isinstance(output, (tuple, list)):
output = output[0]
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
acc = correct_k.mul_(100.0 / batch_size)
res.append(acc)
return res
def get_criterions(num_classes: int, use_gpu: bool, args) -> ('criterion', 'fix_criterion', 'switch_criterion'):
from torchreid.losses.wrapped_triplet_loss import WrappedTripletLoss
from torchreid.regularizers.param_controller import HtriParamController
htri_param_controller = HtriParamController()
if 'htri' in args.criterion:
fix_criterion = WrappedTripletLoss(num_classes, use_gpu, args, htri_param_controller)
switch_criterion = WrappedTripletLoss(num_classes, use_gpu, args, htri_param_controller)
else:
fix_criterion = WrappedCrossEntropyLoss(num_classes=num_classes, use_gpu=use_gpu, label_smooth=args.label_smooth)
switch_criterion = WrappedCrossEntropyLoss(num_classes=num_classes, use_gpu=use_gpu, label_smooth=args.label_smooth)
if args.criterion == 'xent':
criterion = WrappedCrossEntropyLoss(num_classes=num_classes, use_gpu=use_gpu, label_smooth=args.label_smooth)
elif args.criterion == 'spectral':
from torchreid.losses.spectral_loss import SpectralLoss
criterion = SpectralLoss(num_classes=num_classes, use_gpu=use_gpu, label_smooth=args.label_smooth, penalty_position=args.penalty_position)
elif args.criterion == 'batch_spectral':
from torchreid.losses.batch_spectral_loss import BatchSpectralLoss
criterion = BatchSpectralLoss(num_classes=num_classes, use_gpu=use_gpu, label_smooth=args.label_smooth)
elif args.criterion == 'lowrank':
from torchreid.losses.lowrank_loss import LowRankLoss
criterion = LowRankLoss(num_classes=num_classes, use_gpu=use_gpu, label_smooth=args.label_smooth)
elif args.criterion == 'singular':
from torchreid.losses.singular_loss import SingularLoss
criterion = SingularLoss(num_classes=num_classes, use_gpu=use_gpu, label_smooth=args.label_smooth, penalty_position=args.penalty_position)
elif args.criterion == 'htri':
criterion = WrappedTripletLoss(num_classes=num_classes, use_gpu=use_gpu, args=args, param_controller=htri_param_controller)
elif args.criterion == 'singular_htri':
from torchreid.losses.singular_triplet_loss import SingularTripletLoss
criterion = SingularTripletLoss(num_classes, use_gpu, args, htri_param_controller)
elif args.criterion == 'incidence':
from torchreid.losses.incidence_loss import IncidenceLoss
criterion = IncidenceLoss()
elif args.criterion == 'incidence_xent':
from torchreid.losses.incidence_xent_loss import IncidenceXentLoss
criterion = IncidenceXentLoss(num_classes, use_gpu, args.label_smooth)
else:
raise RuntimeError('Unknown criterion {!r}'.format(criterion))
if args.fix_custom_loss:
fix_criterion = criterion
if args.switch_loss < 0:
criterion, switch_criterion = switch_criterion, criterion
return criterion, fix_criterion, switch_criterion, htri_param_controller
def main():
global args, dropout_optimizer
torch.manual_seed(args.seed)
if not args.use_avai_gpus:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
use_gpu = torch.cuda.is_available()
if args.use_cpu:
use_gpu = False
log_name = 'log_test.txt' if args.evaluate else 'log_train.txt'
sys.stderr = sys.stdout = Logger(osp.join(args.save_dir, log_name))
print("==========\nArgs:{}\n==========".format(args))
if use_gpu:
print("Currently using GPU {}".format(args.gpu_devices))
cudnn.benchmark = True
torch.cuda.manual_seed_all(args.seed)
else:
print("Currently using CPU, however, GPU is highly recommended")
print("Initializing image data manager")
dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args))
trainloader, testloader_dict = dm.return_dataloaders()
print("Initializing model: {}".format(args.arch))
model = models.init_model(name=args.arch, num_classes=dm.num_train_pids, loss={'xent'}, use_gpu=use_gpu, dropout_optimizer=dropout_optimizer)
print(model)
print("Model size: {:.3f} M".format(count_num_param(model)))
# criterion = WrappedCrossEntropyLoss(num_classes=dm.num_train_pids, use_gpu=use_gpu, label_smooth=args.label_smooth)
criterion, fix_criterion, switch_criterion, htri_param_controller = get_criterions(dm.num_train_pids, use_gpu, args)
regularizer, reg_param_controller = get_regularizer(args.regularizer)
optimizer = init_optimizer(model.parameters(), **optimizer_kwargs(args))
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=args.stepsize, gamma=args.gamma)
if args.load_weights and check_isfile(args.load_weights):
# load pretrained weights but ignore layers that don't match in size
try:
checkpoint = torch.load(args.load_weights)
except Exception as e:
print(e)
checkpoint = torch.load(args.load_weights, map_location={'cuda:0': 'cpu'})
# dropout_optimizer.set_p(checkpoint.get('dropout_p', 0))
# print(list(checkpoint.keys()), checkpoint['dropout_p'])
pretrain_dict = checkpoint['state_dict']
model_dict = model.state_dict()
pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size()}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict)
print("Loaded pretrained weights from '{}'".format(args.load_weights))
if args.resume and check_isfile(args.resume):
checkpoint = torch.load(args.resume)
state = model.state_dict()
state.update(checkpoint['state_dict'])
model.load_state_dict(state)
# args.start_epoch = checkpoint['epoch'] + 1
print("Loaded checkpoint from '{}'".format(args.resume))
print("- start_epoch: {}\n- rank1: {}".format(args.start_epoch, checkpoint['rank1']))
if use_gpu:
model = nn.DataParallel(model, device_ids=list(range(len(args.gpu_devices.split(','))))).cuda()
extract_train_info(model, trainloader)
def extract_train_info(model, trainloader):
model.eval()
os.environ['fake'] = '1'
accs = [AverageMeter() for _ in range(3)]
with torch.no_grad():
for imgs, pids, _, paths in trainloader:
xent_features = model(imgs.cuda())[1]
for i, xent_feature in enumerate(xent_features):
accs[i].update(
accuracy(xent_feature, pids.cuda())[0].item(),
pids.size(0),
)
with open(args.load_weights + '.acc', 'w') as f:
print(*(acc.avg for acc in accs), file=f)
if __name__ == '__main__':
main()
|
51147
|
import sys
from typing import NoReturn, Optional, Type
from traceback_with_variables.print import print_exc, Format
def global_print_exc(fmt: Optional[Format] = None) -> NoReturn:
sys.excepthook = lambda e_cls, e, tb: print_exc(e=e, fmt=fmt)
def global_print_exc_in_ipython(fmt: Optional[Format] = None) -> NoReturn:
try:
import IPython
except ModuleNotFoundError:
raise ValueError("IPython not found")
IPython.core.interactiveshell.InteractiveShell.showtraceback = \
lambda self, *args, **kwargs: print_exc(num_skipped_frames=1, fmt=fmt)
def is_ipython_global(name: str, type_: Type, filename: str, is_global: bool) -> bool:
return is_global and (
name in ['In', 'Out', 'get_ipython', 'exit', 'quit']
or name.startswith('_')
)
|
51175
|
from torch import nn
import torch.nn.functional as F
from model.basic import DownSampling, SSnbt, APN
class LEDNet(nn.Module):
def __init__(self, nclass, drop=0.1):
super(LEDNet, self).__init__()
self.encoder = nn.Sequential(
DownSampling(3, 29), SSnbt(32, 1, 0.1 * drop), SSnbt(32, 1, 0.1 * drop), SSnbt(32, 1, 0.1 * drop),
DownSampling(32, 32), SSnbt(64, 1, 0.1 * drop), SSnbt(64, 1, 0.1 * drop),
DownSampling(64, 64), SSnbt(128, 1, drop), SSnbt(128, 2, drop), SSnbt(128, 5, drop),
SSnbt(128, 9, drop), SSnbt(128, 2, drop), SSnbt(128, 5, drop), SSnbt(128, 9, drop), SSnbt(128, 17, drop)
)
self.decoder = APN(128, nclass)
def forward(self, x):
_, _, h, w = x.shape
x = self.encoder(x)
x = self.decoder(x)
return F.interpolate(x, size=(h, w), mode='bilinear', align_corners=True)
if __name__ == '__main__':
net = LEDNet(21)
import torch
a = torch.randn(2, 3, 554, 253)
out = net(a)
print(out.shape)
|
51181
|
from vulnscan_parser.models.vsfinding import VSFinding
class TestsslFinding(VSFinding):
def __init__(self):
super().__init__()
self.ignored_dict_props.extend(['vulnerability', 'ports'])
self.ignored_dict_props.remove('finding')
self.vulnerability = None
self._cwe = []
self._cve = []
self.finding = ''
@property
def cwe(self):
if not self._cwe:
return self.vulnerability.cwe
return self._cwe
@cwe.setter
def cwe(self, value):
if value != self.vulnerability.cwe:
self._cwe = value
@property
def cve(self):
if not self._cve:
return self.vulnerability.cve
return self._cve
@cve.setter
def cve(self, value):
if value != self.vulnerability.cve:
self._cve = value
@property
def name(self):
return self.vulnerability.name
def to_serializable_dict(self):
result = self.vulnerability.to_serializable_dict()
result.update(self.host.to_serializable_dict())
result.update(super().to_serializable_dict())
# remove unnecessary attrs
result.pop('hostnames')
return result
|
51187
|
from fest import utils
class SomeClass:
pass
def test_future():
fut = utils.Future(iter('abcdefg'))
ret = fut.filter(lambda x: x < 'e').execute()
exp = list('abcd')
assert ret == exp
def test_digest():
ret = {'fizz': 'buzz'}
assert utils.digest(ret) == 'f45195aef08daea1be5dbb1c7feb5763c5bc7b37'
def test_logger():
obj = SomeClass()
ret = utils.logger(obj)
exp = 'tests.utils_test.SomeClass'
assert ret.name == exp
|
51197
|
import time
from umqtt.robust import MQTTClient
def sub_cb(topic, msg):
print((topic, msg))
c = MQTTClient("umqtt_client", "localhost")
# Print diagnostic messages when retries/reconnects happens
c.DEBUG = True
c.set_callback(sub_cb)
# Connect to server, requesting not to clean session for this
# client. If there was no existing session (False return value
# from connect() method), we perform the initial setup of client
# session - subscribe to needed topics. Afterwards, these
# subscriptions will be stored server-side, and will be persistent,
# (as we use clean_session=False).
#
# There can be a problem when a session for a given client exists,
# but doesn't have subscriptions a particular application expects.
# In this case, a session needs to be cleaned first. See
# example_reset_session.py for an obvious way how to do that.
#
# In an actual application, it's up to its developer how to
# manage these issues. One extreme is to have external "provisioning"
# phase, where initial session setup, and any further management of
# a session, is done by external tools. This allows to save resources
# on a small embedded device. Another extreme is to have an application
# to perform auto-setup (e.g., clean session, then re-create session
# on each restart). This example shows mid-line between these 2
# approaches, where initial setup of session is done by application,
# but if anything goes wrong, there's an external tool to clean session.
if not c.connect(clean_session=False):
print("New session being set up")
c.subscribe(b"foo_topic")
while 1:
c.wait_msg()
c.disconnect()
|
51206
|
from moai.monads.human.pose.openpose import (
Split as OpenposeSplit,
JointMap as OpenposeJointMap
)
__all__ = [
'OpenposeSplit',
'OpenposeJointMap',
]
|
51224
|
import sys
import asyncio
import zmq
import zmq.asyncio
from zmq.auth import Authenticator
from zmq.auth.thread import _inherit_docstrings, ThreadAuthenticator, \
AuthenticationThread
# Copying code from zqm classes since no way to inject these dependencies
class MultiZapAuthenticator(Authenticator):
"""
`Authenticator` supports only one ZAP socket in a single process, this lets
you have multiple ZAP sockets
"""
count = 0
def __init__(self, context=None, encoding='utf-8', log=None):
MultiZapAuthenticator.count += 1
super().__init__(context=context, encoding=encoding, log=log)
def start(self):
"""Create and bind the ZAP socket"""
self.zap_socket = self.context.socket(zmq.REP)
self.zap_socket.linger = 1
zapLoc = 'inproc://zeromq.zap.{}'.format(MultiZapAuthenticator.count)
self.zap_socket.bind(zapLoc)
self.log.debug('Starting ZAP at {}'.format(zapLoc))
def stop(self):
"""Close the ZAP socket"""
if self.zap_socket:
self.log.debug(
'Stopping ZAP at {}'.format(self.zap_socket.LAST_ENDPOINT))
super().stop()
@_inherit_docstrings
class ThreadMultiZapAuthenticator(ThreadAuthenticator):
def start(self):
"""Start the authentication thread"""
# create a socket to communicate with auth thread.
self.pipe = self.context.socket(zmq.PAIR)
self.pipe.linger = 1
self.pipe.bind(self.pipe_endpoint)
authenticator = MultiZapAuthenticator(self.context, encoding=self.encoding,
log=self.log)
self.thread = AuthenticationThread(self.context, self.pipe_endpoint,
encoding=self.encoding, log=self.log,
authenticator=authenticator)
self.thread.start()
# Event.wait:Changed in version 2.7: Previously, the method always returned None.
if sys.version_info < (2, 7):
self.thread.started.wait(timeout=10)
else:
if not self.thread.started.wait(timeout=10):
raise RuntimeError("Authenticator thread failed to start")
class AsyncioAuthenticator(MultiZapAuthenticator):
"""ZAP authentication for use in the asyncio IO loop"""
def __init__(self, context=None, loop=None):
super().__init__(context)
self.loop = loop or asyncio.get_event_loop()
self.__poller = None
self.__task = None
# TODO: Remove this commented method later
# @asyncio.coroutine
# def __handle_zap(self):
# while True:
# events = yield from self.__poller.poll()
# if self.zap_socket in dict(events):
# msg = yield from self.zap_socket.recv_multipart()
# self.handle_zap_message(msg)
async def __handle_zap(self):
while True:
events = await self.__poller.poll()
if self.zap_socket in dict(events):
msg = await self.zap_socket.recv_multipart()
self.handle_zap_message(msg)
def start(self):
"""Start ZAP authentication"""
super().start()
self.__poller = zmq.asyncio.Poller()
self.__poller.register(self.zap_socket, zmq.POLLIN)
self.__task = asyncio.ensure_future(self.__handle_zap())
def stop(self):
"""Stop ZAP authentication"""
if self.__task:
self.__task.cancel()
if self.__poller:
self.__poller.unregister(self.zap_socket)
self.__poller = None
super().stop()
|
51227
|
import numpy as np
import unittest
import pytest
from pysph.base.particle_array import ParticleArray
import pysph.tools.mesh_tools as G
from pysph.base.utils import get_particle_array
# Data of a unit length cube
def cube_data():
points = np.array([[0., 0., 0.],
[0., 1., 0.],
[1., 1., 0.],
[1., 0., 0.],
[0., 0., 1.],
[0., 1., 1.],
[1., 0., 1.],
[1., 1., 1.]])
x_cube, y_cube, z_cube = points.T
cells = np.array([[0, 1, 2],
[0, 2, 3],
[0, 4, 5],
[0, 5, 1],
[0, 3, 6],
[0, 6, 4],
[4, 6, 7],
[4, 7, 5],
[3, 2, 7],
[3, 7, 6],
[1, 5, 7],
[1, 7, 2]])
normals = np.array([[0., 0., -1.],
[0., 0., -1.],
[-1., 0., 0.],
[-1., 0., 0.],
[0., -1., 0.],
[0., -1., 0.],
[0., 0., 1.],
[0., 0., 1.],
[1., 0., 0.],
[1., 0., 0.],
[0., 1., 0.],
[0., 1., 0.]])
vectors = np.zeros((len(cells), 3, 3))
for i, cell in enumerate(cells):
idx1, idx2, idx3 = cell
vector = np.array([[x_cube[idx1], y_cube[idx1], z_cube[idx1]],
[x_cube[idx2], y_cube[idx2], z_cube[idx2]],
[x_cube[idx3], y_cube[idx3], z_cube[idx3]]])
vectors[i] = vector
return x_cube, y_cube, z_cube, cells, normals, vectors
class TestGeometry(unittest.TestCase):
def test_in_triangle(self):
assert(G._in_triangle(0.5, 0.5, 0.0, 0.0, 1.5, 0.0, 0.0, 1.5) is True)
assert(G._in_triangle(1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0) is False)
def test_interp_2d(self):
# Check interpolation between two points on line y=x
dx = 0.1
r = G._interp_2d(np.array([0., 0.]), np.array([1., 1.]), dx)
# Check if all points satisfy y=x
np.testing.assert_array_almost_equal(
r[:, 0] - r[:, 1], np.zeros(r.shape[0]))
# Check if distance between consecutive points is lesser than dx
np.testing.assert_array_less(np.linalg.norm(r[1:] - r[0:-1], axis=1),
np.ones(r.shape[0] - 1) * dx)
def test_fill_triangle(self):
triangle = np.array([[0., 0., 0.],
[1., 0., 0.],
[0., 1., 0.]])
dx_triangle = 0.1
x, y, z = G._fill_triangle(triangle, dx_triangle)
EPS = np.finfo(float).eps
np.testing.assert_array_less(-x, np.zeros(x.shape[0]) + EPS)
np.testing.assert_array_less(-y, np.zeros(x.shape[0]) + EPS)
np.testing.assert_array_less(-(x + y), np.ones(x.shape[0]) + EPS)
np.testing.assert_almost_equal(z, np.zeros(x.shape[0]))
def test_fill_triangle_throws_zero_area_triangle_exception(self):
self.assertRaises(G.ZeroAreaTriangleException, G._fill_triangle,
np.zeros((3, 3)), 0.5)
def test_fill_triangle_throws_polygon_mesh_error(self):
self.assertRaises(G.PolygonMeshError, G._fill_triangle,
np.zeros((4, 3)), 0.5)
def test_get_points_from_mgrid(self):
"""Find neighbouring particles around a unit cube"""
h = 0.1
x_cube, y_cube, z_cube, cells, normals, vectors = cube_data()
x, y, z, x_list, y_list, z_list, vectors = \
G._get_surface_mesh(x_cube, y_cube, z_cube, cells, h, uniform=True)
pa_mesh = ParticleArray(name='mesh', x=x, y=y, z=z, h=h)
offset = h
x_grid, y_grid, z_grid = np.meshgrid(
np.arange(x.min() - offset, x.max() + offset, h),
np.arange(y.min() - offset, y.max() + offset, h),
np.arange(z.min() - offset, z.max() + offset, h))
pa_grid = ParticleArray(name='grid', x=x_grid, y=y_grid, z=z_grid, h=h)
x_grid, y_grid, z_grid = G.get_points_from_mgrid(
pa_grid, pa_mesh, x_list, y_list, z_list, 1, h, vectors, normals
)
for i in range(x.shape[0]):
assert((x[i] ** 2 + y[i] ** 2 + z[i] ** 2) <= 4)
def _cube_assert(self, x, y, z, h):
"""Check if x,y,z lie within surface of thickness `h` of a unit cube"""
def surface1(x, y, z): return min(abs(x), abs(1 - x)) < h and \
y > -h and y < 1 + h and z > -h and z < 1 + h
def on_surface(x, y, z): return surface1(x, y, z) or \
surface1(y, x, z) or surface1(z, x, y)
for i in range(x.shape[0]):
assert on_surface(x[i], y[i], z[i])
def test_get_surface_mesh(self):
"""Check if mesh is generated correctly for unit cube"""
x_cube, y_cube, z_cube, cells, normals, vectors = cube_data()
x, y, z = G._get_surface_mesh(x_cube, y_cube, z_cube, cells, 0.1)
h = np.finfo(float).eps
self._cube_assert(x, y, z, h)
def test_get_surface_points(self):
"""Check if surface is generated correctly for unit cube"""
h = 0.1
x_cube, y_cube, z_cube, cells, normals, vectors = cube_data()
x, y, z = G.surface_points(x_cube, y_cube, z_cube, cells, h)
self._cube_assert(x, y, z, h)
def test_get_surface_points_uniform(self):
"""Check if uniform surface is generated correctly for unit cube"""
h = 0.1
x_cube, y_cube, z_cube, cells, normals, vectors = cube_data()
x, y, z = G.surf_points_uniform(x_cube, y_cube, z_cube,
cells, normals, 1.0, 1.0)
self._cube_assert(x, y, z, h)
def test_prism(self):
tri_normal = np.array([0, -1, 0])
tri_points = np.array([[0, 0, 0], [1, 0, 0], [0, 0, 1]])
h = 1/1.5
prism_normals, prism_points, prism_face_centres = \
G.prism(tri_normal, tri_points, h)
assert np.array([-1, 0, 0]) in prism_normals
assert np.array([0, 1, 0]) in prism_points
assert np.array([0.5, 0.5, 0]) in prism_face_centres
if __name__ == "__main__":
unittest.main()
|
51268
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import torch
import numpy as np
from torch.cuda.amp import autocast, GradScaler
from src.opts import opt
from src.dataset import Dataset
from src.losses import CtdetLoss
from src.utils.logger import Logger
from src.utils.average_meter import AverageMeter, TimeMeter
from src.model import get_model, load_model, save_model
def train(model, train_loader, criterion, optimizer, logger, opt, epoch, scaler, time_stats):
model.train()
avg_loss_stats = {l: AverageMeter()
for l in ['loss', 'hm_loss', 'wh_loss', 'off_loss']}
for iter_id, batch in enumerate(train_loader):
# to cuda
for k in batch:
batch[k] = batch[k].to(device=opt.device, non_blocking=True)
# amp
with autocast():
output = model(batch['input'])
loss_stats = criterion(output, batch)
loss = loss_stats['loss'].mean()
optimizer.zero_grad()
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
# else:
# # no amp
# output = model(batch['input'])
# loss_stats = criterion(output, batch)
# loss = loss_stats['loss'].mean()
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
info = f'train : [{epoch}][{iter_id}/{len(train_loader)}] |'
for l in avg_loss_stats:
avg_loss_stats[l].update(
loss_stats[l].mean().item(), batch['input'].size(0))
info += f'|{l} {avg_loss_stats[l].avg:.4f} '
time_stats.update(epoch, iter_id)
info += f'|left_time: {time_stats.left_time:.1f} hour'
# log
if iter_id % 100 == 0:
logger.write(info)
def val(model, val_loader, criterion, logger, opt, epoch):
with torch.no_grad():
model.eval()
torch.cuda.empty_cache()
avg_loss_stats = {l: AverageMeter()
for l in ['loss', 'hm_loss', 'wh_loss', 'off_loss']}
for iter_id, batch in enumerate(val_loader):
for k in batch:
batch[k] = batch[k].to(device=opt.device, non_blocking=True)
output = model(batch['input'])
loss_stats = criterion(output, batch)
info = f'val : [{epoch}][{iter_id}/{len(val_loader)}] |'
for l in avg_loss_stats:
avg_loss_stats[l].update(
loss_stats[l].mean().item(), batch['input'].size(0))
info += f'|{l} {avg_loss_stats[l].avg:.4f} '
# log
if iter_id % 100 == 0:
logger.write(info)
def main():
torch.manual_seed(317)
torch.backends.cudnn.benckmark = True
train_logger = Logger(opt, "train")
val_logger = Logger(opt, "val")
start_epoch = 0
print('Creating model...')
model = get_model(opt.arch, opt.heads).to(opt.device)
optimizer = torch.optim.Adam(model.parameters(), opt.lr)
criterion = CtdetLoss(opt)
print('Loading model...')
if opt.load_model != '':
model, optimizer, start_epoch = load_model(
model, opt.load_model, optimizer, opt.lr, opt.lr_step)
model = torch.nn.DataParallel(model)
# amp
scaler = GradScaler()
print('Setting up data...')
train_dataset = Dataset(opt, 'train')
val_dataset = Dataset(opt, 'val')
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=opt.batch_size,
shuffle=True,
num_workers=16,
pin_memory=True,
drop_last=True
)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=1,
shuffle=False,
num_workers=1,
pin_memory=True
)
# cal left time
time_stats = TimeMeter(opt.num_epochs, len(train_loader))
for epoch in range(start_epoch + 1, opt.num_epochs + 1):
print('train...')
train(model, train_loader, criterion, optimizer,
train_logger, opt, epoch, scaler, time_stats)
if epoch % opt.val_intervals == 0:
print('val...')
val(model, val_loader, criterion, val_logger, opt, epoch)
save_model(os.path.join(opt.save_dir, f'model_{epoch}.pth'),
epoch, model, optimizer)
# update learning rate
if epoch in opt.lr_step:
lr = opt.lr * (0.1 ** (opt.lr_step.index(epoch) + 1))
print('Drop LR to', lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# without optimizer
save_model(os.path.join(opt.save_dir, 'model_final.pth'), epoch, model)
if __name__ == '__main__':
main()
|
51357
|
import os
import itertools
import re
from typing import List, Optional, Tuple, Dict, Callable, Any, NamedTuple
from string import Template
from typing import List
from tokenizers import Tokenizer, Encoding
dirname = os.path.dirname(__file__)
css_filename = os.path.join(dirname, "visualizer-styles.css")
with open(css_filename) as f:
css = f.read()
class Annotation:
start: int
end: int
label: int
def __init__(self, start: int, end: int, label: str):
self.start = start
self.end = end
self.label = label
AnnotationList = List[Annotation]
PartialIntList = List[Optional[int]]
class CharStateKey(NamedTuple):
token_ix: Optional[int]
anno_ix: Optional[int]
class CharState:
char_ix: Optional[int]
def __init__(self, char_ix):
self.char_ix = char_ix
self.anno_ix: Optional[int] = None
self.tokens: List[int] = []
@property
def token_ix(self):
return self.tokens[0] if len(self.tokens) > 0 else None
@property
def is_multitoken(self):
"""
BPE tokenizers can output more than one token for a char
"""
return len(self.tokens) > 1
def partition_key(self) -> CharStateKey:
return CharStateKey(
token_ix=self.token_ix,
anno_ix=self.anno_ix,
)
class Aligned:
pass
class EncodingVisualizer:
"""
Build an EncodingVisualizer
Args:
tokenizer (:class:`~tokenizers.Tokenizer`):
A tokenizer instance
default_to_notebook (:obj:`bool`):
Whether to render html output in a notebook by default
annotation_converter (:obj:`Callable`, `optional`):
An optional (lambda) function that takes an annotation in any format and returns
an Annotation object
"""
unk_token_regex = re.compile("(.{1}\b)?(unk|oov)(\b.{1})?", flags=re.IGNORECASE)
def __init__(
self,
tokenizer: Tokenizer,
default_to_notebook: bool = True,
annotation_converter: Optional[Callable[[Any], Annotation]] = None,
):
if default_to_notebook:
try:
from IPython.core.display import display, HTML
except ImportError as e:
raise Exception(
"""We couldn't import IPython utils for html display.
Are you running in a notebook?
You can also pass `default_to_notebook=False` to get back raw HTML
"""
)
self.tokenizer = tokenizer
self.default_to_notebook = default_to_notebook
self.annotation_coverter = annotation_converter
pass
def __call__(
self,
text: str,
annotations: AnnotationList = [],
default_to_notebook: Optional[bool] = None,
) -> Optional[str]:
"""
Build a visualization of the given text
Args:
text (:obj:`str`):
The text to tokenize
annotations (:obj:`List[Annotation]`, `optional`):
An optional list of annotations of the text. The can either be an annotation class
or anything else if you instantiated the visualizer with a converter function
default_to_notebook (:obj:`bool`, `optional`, defaults to `False`):
If True, will render the html in a notebook. Otherwise returns an html string.
Returns:
The HTML string if default_to_notebook is False, otherwise (default) returns None and
renders the HTML in the notebook
"""
final_default_to_notebook = self.default_to_notebook
if default_to_notebook is not None:
final_default_to_notebook = default_to_notebook
if final_default_to_notebook:
try:
from IPython.core.display import display, HTML
except ImportError as e:
raise Exception(
"""We couldn't import IPython utils for html display.
Are you running in a notebook?"""
)
if self.annotation_coverter is not None:
annotations = list(map(self.annotation_coverter, annotations))
encoding = self.tokenizer.encode(text)
html = EncodingVisualizer.__make_html(text, encoding, annotations)
if final_default_to_notebook:
display(HTML(html))
else:
return html
@staticmethod
def calculate_label_colors(annotations: AnnotationList) -> Dict[str, str]:
"""
Generates a color palette for all the labels in a given set of annotations
Args:
annotations (:obj:`Annotation`):
A list of annotations
Returns:
:obj:`dict`: A dictionary mapping labels to colors in HSL format
"""
if len(annotations) == 0:
return {}
labels = set(map(lambda x: x.label, annotations))
num_labels = len(labels)
h_step = int(255 / num_labels)
if h_step < 20:
h_step = 20
s = 32
l = 64
h = 10
colors = {}
for label in sorted(
labels
): # sort so we always get the same colors for a given set of labels
colors[label] = f"hsl({h},{s}%,{l}%"
h += h_step
return colors
@staticmethod
def consecutive_chars_to_html(
consecutive_chars_list: List[CharState],
text: str,
encoding: Encoding,
):
"""
Converts a list of "consecutive chars" into a single HTML element.
Chars are consecutive if they fall under the same word, token and annotation.
The CharState class is a named tuple with a "partition_key" method that makes it easy to
compare if two chars are consecutive.
Args:
consecutive_chars_list (:obj:`List[CharState]`):
A list of CharStates that have been grouped together
text (:obj:`str`):
The original text being processed
encoding (:class:`~tokenizers.Encoding`):
The encoding returned from the tokenizer
Returns:
:obj:`str`: The HTML span for a set of consecutive chars
"""
first = consecutive_chars_list[0]
if first.char_ix is None:
# its a special token
stoken = encoding.tokens[first.token_ix]
# special tokens are represented as empty spans. We use the data attribute and css
# magic to display it
return f'<span class="special-token" data-stoken={stoken}></span>'
# We're not in a special token so this group has a start and end.
last = consecutive_chars_list[-1]
start = first.char_ix
end = last.char_ix + 1
span_text = text[start:end]
css_classes = [] # What css classes will we apply on the resulting span
data_items = {} # What data attributes will we apply on the result span
if first.token_ix is not None:
# We can either be in a token or not (e.g. in white space)
css_classes.append("token")
if first.is_multitoken:
css_classes.append("multi-token")
if first.token_ix % 2:
# We use this to color alternating tokens.
# A token might be split by an annotation that ends in the middle of it, so this
# lets us visually indicate a consecutive token despite its possible splitting in
# the html markup
css_classes.append("odd-token")
else:
# Like above, but a different color so we can see the tokens alternate
css_classes.append("even-token")
if (
EncodingVisualizer.unk_token_regex.search(encoding.tokens[first.token_ix])
is not None
):
# This is a special token that is in the text. probably UNK
css_classes.append("special-token")
# TODO is this the right name for the data attribute ?
data_items["stok"] = encoding.tokens[first.token_ix]
else:
# In this case we are looking at a group/single char that is not tokenized.
# e.g. white space
css_classes.append("non-token")
css = f'''class="{' '.join(css_classes)}"'''
data = ""
for key, val in data_items.items():
data += f' data-{key}="{val}"'
return f"<span {css} {data} >{span_text}</span>"
@staticmethod
def __make_html(text: str, encoding: Encoding, annotations: AnnotationList) -> str:
char_states = EncodingVisualizer.__make_char_states(text, encoding, annotations)
current_consecutive_chars = [char_states[0]]
prev_anno_ix = char_states[0].anno_ix
spans = []
label_colors_dict = EncodingVisualizer.calculate_label_colors(annotations)
cur_anno_ix = char_states[0].anno_ix
if cur_anno_ix is not None:
# If we started in an annotation make a span for it
anno = annotations[cur_anno_ix]
label = anno.label
color = label_colors_dict[label]
spans.append(f'<span class="annotation" style="color:{color}" data-label="{label}">')
for cs in char_states[1:]:
cur_anno_ix = cs.anno_ix
if cur_anno_ix != prev_anno_ix:
# If we've transitioned in or out of an annotation
spans.append(
# Create a span from the current consecutive characters
EncodingVisualizer.consecutive_chars_to_html(
current_consecutive_chars,
text=text,
encoding=encoding,
)
)
current_consecutive_chars = [cs]
if prev_anno_ix is not None:
# if we transitioned out of an annotation close it's span
spans.append("</span>")
if cur_anno_ix is not None:
# If we entered a new annotation make a span for it
anno = annotations[cur_anno_ix]
label = anno.label
color = label_colors_dict[label]
spans.append(
f'<span class="annotation" style="color:{color}" data-label="{label}">'
)
prev_anno_ix = cur_anno_ix
if cs.partition_key() == current_consecutive_chars[0].partition_key():
# If the current charchter is in the same "group" as the previous one
current_consecutive_chars.append(cs)
else:
# Otherwise we make a span for the previous group
spans.append(
EncodingVisualizer.consecutive_chars_to_html(
current_consecutive_chars,
text=text,
encoding=encoding,
)
)
# An reset the consecutive_char_list to form a new group
current_consecutive_chars = [cs]
# All that's left is to fill out the final span
# TODO I think there is an edge case here where an annotation's span might not close
spans.append(
EncodingVisualizer.consecutive_chars_to_html(
current_consecutive_chars,
text=text,
encoding=encoding,
)
)
res = HTMLBody(spans) # Send the list of spans to the body of our html
return res
@staticmethod
def __make_anno_map(text: str, annotations: AnnotationList) -> PartialIntList:
"""
Args:
text (:obj:`str`):
The raw text we want to align to
annotations (:obj:`AnnotationList`):
A (possibly empty) list of annotations
Returns:
A list of length len(text) whose entry at index i is None if there is no annotation on
charachter i or k, the index of the annotation that covers index i where k is with
respect to the list of annotations
"""
annotation_map = [None] * len(text)
for anno_ix, a in enumerate(annotations):
for i in range(a.start, a.end):
annotation_map[i] = anno_ix
return annotation_map
@staticmethod
def __make_char_states(
text: str, encoding: Encoding, annotations: AnnotationList
) -> List[CharState]:
"""
For each character in the original text, we emit a tuple representing it's "state":
* which token_ix it corresponds to
* which word_ix it corresponds to
* which annotation_ix it corresponds to
Args:
text (:obj:`str`):
The raw text we want to align to
annotations (:obj:`List[Annotation]`):
A (possibly empty) list of annotations
encoding: (:class:`~tokenizers.Encoding`):
The encoding returned from the tokenizer
Returns:
:obj:`List[CharState]`: A list of CharStates, indicating for each char in the text what
it's state is
"""
annotation_map = EncodingVisualizer.__make_anno_map(text, annotations)
# Todo make this a dataclass or named tuple
char_states: List[CharState] = [CharState(char_ix) for char_ix in range(len(text))]
for token_ix, token in enumerate(encoding.tokens):
offsets = encoding.token_to_chars(token_ix)
if offsets is not None:
start, end = offsets
for i in range(start, end):
char_states[i].tokens.append(token_ix)
for char_ix, anno_ix in enumerate(annotation_map):
char_states[char_ix].anno_ix = anno_ix
return char_states
def HTMLBody(children: List[str], css_styles=css) -> str:
"""
Generates the full html with css from a list of html spans
Args:
children (:obj:`List[str]`):
A list of strings, assumed to be html elements
css_styles (:obj:`str`, `optional`):
Optional alternative implementation of the css
Returns:
:obj:`str`: An HTML string with style markup
"""
children_text = "".join(children)
return f"""
<html>
<head>
<style>
{css_styles}
</style>
</head>
<body>
<div class="tokenized-text" dir=auto>
{children_text}
</div>
</body>
</html>
"""
|
51372
|
import os
import re
import pyblish.api
from avalon import aftereffects
class CollectExtensionVersion(pyblish.api.ContextPlugin):
""" Pulls and compares version of installed extension.
It is recommended to use same extension as in provided Openpype code.
Please use Anastasiy’s Extension Manager or ZXPInstaller to update
extension in case of an error.
You can locate extension.zxp in your installed Openpype code in
`repos/avalon-core/avalon/aftereffects`
"""
# This technically should be a validator, but other collectors might be
# impacted with usage of obsolete extension, so collector that runs first
# was chosen
order = pyblish.api.CollectorOrder - 0.5
label = "Collect extension version"
hosts = ["aftereffects"]
optional = True
active = True
def process(self, context):
installed_version = aftereffects.stub().get_extension_version()
if not installed_version:
raise ValueError("Unknown version, probably old extension")
manifest_url = os.path.join(os.path.dirname(aftereffects.__file__),
"extension", "CSXS", "manifest.xml")
if not os.path.exists(manifest_url):
self.log.debug("Unable to locate extension manifest, not checking")
return
expected_version = None
with open(manifest_url) as fp:
content = fp.read()
found = re.findall(r'(ExtensionBundleVersion=")([0-9\.]+)(")',
content)
if found:
expected_version = found[0][1]
if expected_version != installed_version:
msg = (
"Expected version '{}' found '{}'\n Please update"
" your installed extension, it might not work properly."
).format(expected_version, installed_version)
raise ValueError(msg)
|
51391
|
import requests
url = 'http://127.0.0.1:9000/api/comments'
resp = requests.post(
url,
data={
"name": "wnn",
"email": "<EMAIL>",
"comments": "comment",
"page_id":"2"
}
)
print(resp.text)
|
51427
|
from django import template
from django_extras.utils import humanize
register = template.Library()
@register.filter(is_safe=True)
def describe_seconds(value):
"""
Convert a seconds value into a human readable (ie week, day, hour) value.
:param value: integer value of the number of seconds.
:return: a string with the humanized value.
"""
return humanize.describe_seconds(value)
|
51516
|
from django.urls import path
from . import views
urlpatterns = [
path(
"model-attrs/<int:content_type_id>/",
views.model_attrs,
name="django_signals_model_attrs",
),
]
|
51550
|
from glacier import glacier
def f1(name: str, verbose: bool = False) -> None:
pass
def f2(name: str, verbose: bool = False) -> None:
pass
def f3(name: str, verbose: bool = False) -> None:
pass
if __name__ == '__main__':
glacier({
'run': f1,
'build': f2,
'test': f3,
})
|
51565
|
import pickle
import torch
import numpy as np
from transformers import RobertaModel, RobertaTokenizerFast
from retriever_train.dataset_config import DATASET_CONFIG, BASE_CONFIG
from retriever_train.data_utils import Instance
from retriever_train.utils import init_parent_model
class PrefixSuffixWrapper(object):
def __init__(self, model_path, config_only=False):
self.model_path = model_path
self.args = torch.load("{}/training_args.bin".format(self.model_path))
if "prefix_truncate_dir" not in self.args:
# hack for backward compatability
self.args.prefix_truncate_dir = "left"
self.args.device = torch.cuda.current_device()
if self.args.data_dir in DATASET_CONFIG:
self.config = DATASET_CONFIG[self.args.data_dir]
else:
self.config = BASE_CONFIG
print(self.config)
if not config_only:
self.model, self.tokenizer = init_parent_model(checkpoint_dir=model_path,
args=self.args,
model_class=RobertaModel,
tokenizer_class=RobertaTokenizerFast)
def preprocess_sentences(self, contexts, vectors_type="prefix"):
args = self.args
tokenizer = self.tokenizer
instances = []
all_context_ids = []
for context in contexts:
context = " ".join(context.split())
context_ids = tokenizer(context)["input_ids"]
if vectors_type == "suffix":
placeholder_prefix_ids = tokenizer("left context <mask> right context")["input_ids"]
all_context_ids.append([placeholder_prefix_ids, context_ids])
else:
all_context_ids.append([context_ids, context_ids])
instance = Instance(
self.args, self.config, all_context_ids
)
instance.preprocess(tokenizer)
return instance
def encode_batch(self, contexts, vectors_type="prefix"):
args = self.args
instance = self.preprocess_sentences(contexts, vectors_type)
input_tensors = {
"prefices": torch.tensor(instance.prefices).unsqueeze(0),
"prefix_masks": torch.tensor(instance.prefix_masks).unsqueeze(0),
"suffices": torch.tensor(instance.suffices).unsqueeze(0),
"suffix_masks": torch.tensor(instance.suffix_masks).unsqueeze(0)
}
return self.model.get_vectors(input_tensors, vectors_type=vectors_type)
|
51590
|
import time
import base64
import hashlib
import hmac
from requests.auth import AuthBase
class CoinbaseProAuth(AuthBase):
"""Request authorization.
Provided by Coinbase Pro:
https://docs.pro.coinbase.com/?python#signing-a-message
"""
def __init__(self, api_key, secret_key, passphrase):
self.api_key = api_key
self.secret_key = secret_key
self.passphrase = passphrase
def __call__(self, request):
timestamp = str(time.time())
message = timestamp + request.method + request.path_url + (request.body or "")
message = message.encode("ascii")
hmac_key = base64.b64decode(self.secret_key)
signature = hmac.new(hmac_key, message, hashlib.sha256)
signature_b64 = base64.b64encode(signature.digest())
request.headers.update(
{
"CB-ACCESS-SIGN": signature_b64,
"CB-ACCESS-TIMESTAMP": timestamp,
"CB-ACCESS-KEY": self.api_key,
"CB-ACCESS-PASSPHRASE": self.passphrase,
"Content-Type": "application/json",
}
)
return request
|
51599
|
from django.contrib import admin
from . import models
class SightingAdmin(admin.ModelAdmin):
list_display = ('superhero', 'power', 'location', 'sighted_on')
date_hierarchy = 'sighted_on'
search_fields = ['superhero']
ordering = ['superhero']
admin.site.register(models.Origin)
admin.site.register(models.Location)
admin.site.register(models.Sighting, SightingAdmin)
|
51601
|
expected_output = {
"tag": {
"VRF1": {
"hostname_db": {
"hostname": {
"7777.77ff.eeee": {"hostname": "R7", "level": 2},
"2222.22ff.4444": {"hostname": "R2", "local_router": True},
}
}
},
"test": {
"hostname_db": {
"hostname": {
"9999.99ff.3333": {"hostname": "R9", "level": 2},
"8888.88ff.1111": {"hostname": "R8", "level": 2},
"7777.77ff.eeee": {"hostname": "R7", "level": 2},
"5555.55ff.aaaa": {"hostname": "R5", "level": 2},
"3333.33ff.6666": {"hostname": "R3", "level": 2},
"1111.11ff.2222": {"hostname": "R1", "level": 1},
"2222.22ff.4444": {"hostname": "R2", "local_router": True},
}
}
},
}
}
|
51656
|
def howdoyoudo():
global helvar
if helvar <= 2:
i01.mouth.speak("I'm fine thank you")
helvar += 1
elif helvar == 3:
i01.mouth.speak("you have already said that at least twice")
i01.moveArm("left",43,88,22,10)
i01.moveArm("right",20,90,30,10)
i01.moveHand("left",0,0,0,0,0,119)
i01.moveHand("right",0,0,0,0,0,119)
sleep(2)
relax()
helvar += 1
elif helvar == 4:
i01.mouth.speak("what is your problem stop saying how do you do all the time")
i01.moveArm("left",30,83,22,10)
i01.moveArm("right",40,85,30,10)
i01.moveHand("left",130,180,180,180,180,119)
i01.moveHand("right",130,180,180,180,180,119)
sleep(2)
relax()
helvar += 1
elif helvar == 5:
i01.mouth.speak("i will ignore you if you say how do you do one more time")
unhappy()
sleep(4)
relax()
helvar += 1
|
51702
|
import aoareader as reader
import torch
import time
import argparse
import os
from preprocess import get_stories, vectorize_stories
parser = argparse.ArgumentParser(description="test.py")
parser.add_argument('-testdata', default='data/test.txt.pt',
help='Path to the test.txt.pt, test.txt.pt will be used if exists.')
parser.add_argument('-dict', default="data/dict.pt",
help='Path to the dictionary file, default value: data/dict.pt')
parser.add_argument('-out', default='data/result.txt',
help='output file name.')
parser.add_argument('-model', required=True, help='path to the saved model.')
testopt = parser.parse_args()
print(testopt)
def load_testdata(testfile, vocab_dict, with_answer=True):
if os.path.exists(testfile + '.pt'):
return torch.load(testfile + '.pt')
else:
testd = {}
with open(testfile, 'r') as tf:
tlines = tf.readlines()
test_stories = get_stories(tlines, with_answer=with_answer)
testd['documents'], testd['querys'], testd['answers'], testd['candidates'] = vectorize_stories(test_stories, vocab_dict)
torch.save(testd, testfile + '.pt')
return testd
def evalulate(model, data, vocab_dict):
def acc(answers, pred_answers):
num_correct = (answers == pred_answers).sum().squeeze().data[0]
return num_correct
model.eval()
answers = []
total_correct = 0
total = 0
for i in range(len(data)):
(batch_docs, batch_docs_len, doc_mask), (batch_querys, batch_querys_len, query_mask), batch_answers , candidates = data[i]
pred_answers, _ = model(batch_docs, batch_docs_len, doc_mask,
batch_querys, batch_querys_len, query_mask,
candidates=candidates, answers=batch_answers)
answers.extend(pred_answers.data)
num_correct = acc(batch_answers, pred_answers)
total_in_minibatch = batch_answers.size(0)
total_correct += num_correct
total += total_in_minibatch
del pred_answers
print("Evaluating on test set:\nAccurary {:.2%}".format(total_correct / total))
return vocab_dict.convert2word(answers)
def main():
print("Loading dict", testopt.dict)
vocab_dict = torch.load(testopt.dict)
print("Loading test data")
test_data = torch.load(testopt.testdata)
print("Loading model from ", testopt.model)
ckp = torch.load(testopt.model)
opt = ckp['opt']
model_state = ckp['model']
if opt.gpu:
torch.cuda.set_device(opt.gpu)
test_dataset = reader.Dataset(test_data, opt.batch_size, True, volatile=True)
print(' * vocabulary size = %d' %
(vocab_dict.size()))
print(' * number of test samples. %d' %
len(test_data['candidates']))
print(' * maximum batch size. %d' % opt.batch_size)
print('Building model...')
model = reader.AoAReader(vocab_dict, dropout_rate=opt.dropout, embed_dim=opt.embed_size, hidden_dim=opt.gru_size)
# no way on CPU
model.cuda()
# load state
model.load_state_dict(model_state)
print('Evaluate on test data')
answers = evalulate(model, test_dataset, vocab_dict)
with open(testopt.out, 'w') as out:
print('\n'.join(answers), file=out)
if __name__ == '__main__':
main()
|
51719
|
STRING_51_CHARS = "SFOTYFUZTMDSOULXMKVFDOBQWNBAVGANMVLXQQZZQZQHBLJRZNY"
STRING_301_CHARS = (
"ZFOMVKXETILJKBZPVKOYAUPNYWWWUICNEVXVPWNAMGCNHDBRMATGPMUHUZHUJKFWWLXBQXVDNCGJHAPKEK"
"DZCXKBXEHWCWBYDIGNYXTOFWWNLPBTVIGTNQKIQDHUAHZPWQDKKCHERBYKLAUOOKJXJJLGOPSCRVEHCOAD"
"BFYKJTXHMPPYWQVXCVGNNSXLNIHVKTVMEOIRXQDPLHIDZBAHUEDWXKXILEBOLILOYGZLNGCNXKWMFJWYYI"
"PIDUKJVGKTUERTPRMMMVZNAAOMZJFXFSEENCAMBOUJMYXTPHJEOPKDB"
)
STRING_3001_CHARS = (
"<KEY>"
"<KEY>BPRALVWQEYTFBK<KEY>RALDRZHKPGTWZAXOUFQJKOGTMYSFEDBEQQXIGKZMXNKDCEN"
"LSVHNGWVCIDMNSIZTBWBBVUMLPHRUCIZLZBFEGNFXZNJEZBUTNHNCYWWYSJSJDNOPPGHUPZLPJWDKEATZO"
"UGKZEGFTFBGZDNRITDFBDJLYDGETUHBDGFEELBJBDMSRBVFPXMRJXWULONCZRZZBNFOPARFNXPQONKEIKG"
"QDPJWCMGYSEIBAOLJNWPJVUSMJGCSQBLGZCWXJOYJHIZMNFMTLUQFGEBOONOZMGBWORFEUGYIUJAKLVAJZ"
"FTNOPOZNMUJPWRMGPKNQSBMZQRJXLRQJPYYUXLFUPICAFTXDTQIUOQRCSLWPHHUZAOPVTBRCXWUIXMFGYT"
"RBKPWJJXNQPLIAZAOKIMDWCDZABPLNOXYOZZBTHSDIPXXBKXKOSYYCITFSMNVIOCNGEMRKRBPCLBOCXBZQ"
"VVWKNJBPWQNJOJWAGAIBOBFRVDWLXVBLMBSXYLOAWMPLKJOVHABNNIFTKTKBIIBOSHYQZRUFPPPRDQPMUV"
"WMSWBLRUHKEMUFHIMZRUNNITKWYIWRXYPGFPXMNOABRWXGQFCWOYMMBYRQQLOIBFENIZBUIWLMDTIXCPXW"
"NNHBSRPSMCQIMYRCFCPLQQGVOHYZOUGFEXDTOETUKQAXOCNGYBYPYWDQHYOKPCCORGRNHXZAA<KEY>"
"CM<KEY>"
"<KEY>"
"OLHPFFSWTZGYPAZJXRRPATWXKRDFQJRAEOBFNIWVZDKLNYXUFBOAWSDSKFYYRTADBBYHEWNZSTDXAAOQCD"
"WARSJZONQXRACMNBXZSEWZYBWADNDVRXBNJPJZQUNDYLBASCLCPFJWAMJUQAHBUZYDTIQPBPNJVVOHISZP"
"VGBDNXFIHYCABTSVNVILZUPPZXMPPZVBRTRHDGHTXXLBIYTMRDOUBYBVHVVKQAXAKISFJNUTRZKOCACJAX"
"ZXRRKMFOKYBHFUDBIXFAQSNUTYFNVQNGYWPJZGTLQUMOWXKKTUZGOUXAOVLQMMNKKECQCCOBNPPPXZYWZU"
"WHLHZQDIETDDPXWTILXGAYJKPHBXPLRFDPDSHFUPOIWRQDWQQNARPHPVKJPXZGGXOUVBYZSLUPVIJKWKNF"
"WMFKWYSYJJCCSCALMVPYIPHDKRXOWTUAYJFTAANCTVYDNSSIHGCWGKLDHFFBFSIFBMGHHFHZQSWOWZXOUW"
"PKNICGXPFMFIESHPDDMGSSWGBIAQVBANHLGDBYENRLSUARJXLQWPMOUSUKIIVXICBJPSWOEZPEUAJSLITV"
"XEQWSRENUJRJHPLBPFMBRPKGQNSYFWVLFLSQGGETKDUGYOLNFSMRVAZLQOAEKCUGNFEXRUDYSKBOQPYJAH"
"QHEIMSAAMTTYVJTHZDGQEITLERRYYQCTEQPTYQPHLMBDPCZZNNJYLGAGNXONCTIBSXEHXPYWBCTEEZLIYI"
"FMPYONXRVLSGZOEDZIMVDDPRXBKCKEPHOVLRBSPKMLZPXNRZVSSSYAOMGSVJODUZAJDYLGUZAFJMCOVGQX"
"ZUWQJENTEWQRFZYQTVEAHFQUWBUCFWHGRTMNQQFSPKKYYUBJVXKFQCCMBNGWNTRFGFKBFWTTPNDTGGWTAK"
"EOTXUPGFXOVWTOERFQSEZWVUYMGHVBQZIKIBJCNMKTZANNNOVMYTFLQYVNKTVZHFUJTPWNQWRYKGMYRYDC"
"WNTCUCYJCWXMMOJXUJSDWJKTTYOBFJFLBUCECGTVWKELCBDIKDUDOBLZLHYJQTVHXSUAFHDFDMETLHHEEJ"
"XJYWEOTXAUOZARSSQTBBXULKBBSTQHMJAAOUDIQCCETFWAINYIJCGXCILMDCAUYDMNZBDKIPVRCKCYKOIG"
"JHBLUHPOLDBWREFAZVEFFSOQQHMCXQYCQGMBHYKHJDBZXRAXLVZNYQXZEQYRSZHKKGCSOOEGNPFZDNGIMJ"
"QCXAEWWDYIGTQMJKBTMGSJAJCKIODCAEXVEGYCUBEEGCMARPJIKNAROJHYHKKTKGKKRVVSVYADCJXGSXAR"
"KGOUSUSZGJGFIKJDKJUIRQVSAHSTBCVOWZJDCCBWNNCBIYTCNOUPEYACCEWZNGETBTDJWQIEWRYIQXOZKP"
"ULDPCINLDFFPNORJHOZBSSYPPYNZTLXBRFZGBECKTTNVIHYNKGBXTTIXIKRBGVAPNWBPFNCGWQMZHBAHBX"
"MFEPSWVBUDLYDIVLZFHXTQJWUNWQHSWSCYFXQQSVORFQGUQIHUAJYFLBNBKJPOEIPYATRMNMGUTTVBOUHE"
"ZKXVAUEXCJYSCZEMGWTPXMQJEUWYHTFJQTBOQBEPQIPDYLBPIKKGPVYPOVLPPHYNGNWFTNQCDAATJVKRHC"
"OZGEBPFZZDPPZOWQCDFQZJAMXLVREYJQQFTQJKHMLRFJCVPVCTSVFVAGDVNXIGINSGHKGTWCKXNRZCZFVX"
"FPKZHPOMJTQOIVDIYKEVIIBAUHEDGOUNPCPMVLTZQLICXKKIYRJASBNDUZAONDDLQNVRXGWNQAOWSJSFWU"
"YWTTLOVXIJYERRZQCJMRZHCXEEAKYCLEICUWOJUXWHAPHQJDTBVRPVWTMCJRAUYCOTFXLLIQLOBASBMPED"
"KLDZDWDYAPXCKLZMEFIAOFYGFLBMURWVBFJDDEFXNIQOORYRMNROGVCOESSHSNIBNFRHPSWVAUQQVDMAHX"
"STDOVZMZEFRRFCKOLDOOFVOBCPRRLGYFJNXVPPUZONOSALUUI"
)
|
51753
|
import numpy as np
from numba import jit
from numba.core import types
from numba.tests.support import TestCase, tag
import unittest
# Array overlaps involving a displacement
def array_overlap1(src, dest, k=1):
assert src.shape == dest.shape
dest[k:] = src[:-k]
def array_overlap2(src, dest, k=1):
assert src.shape == dest.shape
dest[:-k] = src[k:]
def array_overlap3(src, dest, k=1):
assert src.shape == dest.shape
dest[:,:-k] = src[:,k:]
def array_overlap4(src, dest, k=1):
assert src.shape == dest.shape
dest[:,k:] = src[:,:-k]
def array_overlap5(src, dest, k=1):
assert src.shape == dest.shape
dest[...,:-k] = src[...,k:]
def array_overlap6(src, dest, k=1):
assert src.shape == dest.shape
dest[...,k:] = src[...,:-k]
# Array overlaps involving an in-place reversal
def array_overlap11(src, dest):
assert src.shape == dest.shape
dest[::-1] = src
def array_overlap12(src, dest):
assert src.shape == dest.shape
dest[:] = src[::-1]
def array_overlap13(src, dest):
assert src.shape == dest.shape
dest[:,::-1] = src
def array_overlap14(src, dest):
assert src.shape == dest.shape
dest[:] = src[:,::-1]
def array_overlap15(src, dest):
assert src.shape == dest.shape
dest[...,::-1] = src
def array_overlap16(src, dest):
assert src.shape == dest.shape
dest[:] = src[...,::-1]
class TestArrayOverlap(TestCase):
def check_overlap(self, pyfunc, min_ndim, have_k_argument=False):
N = 4
def vary_layouts(orig):
yield orig.copy(order='C')
yield orig.copy(order='F')
a = orig[::-1].copy()[::-1]
assert not a.flags.c_contiguous and not a.flags.f_contiguous
yield a
def check(pyfunc, cfunc, pydest, cdest, kwargs):
pyfunc(pydest, pydest, **kwargs)
cfunc(cdest, cdest, **kwargs)
self.assertPreciseEqual(pydest, cdest)
cfunc = jit(nopython=True)(pyfunc)
# Check for up to 3d arrays
for ndim in range(min_ndim, 4):
shape = (N,) * ndim
orig = np.arange(0, N**ndim).reshape(shape)
# Note we cannot copy a 'A' layout array exactly (bitwise),
# so instead we call vary_layouts() twice
for pydest, cdest in zip(vary_layouts(orig), vary_layouts(orig)):
if have_k_argument:
for k in range(1, N):
check(pyfunc, cfunc, pydest, cdest, dict(k=k))
else:
check(pyfunc, cfunc, pydest, cdest, {})
def check_overlap_with_k(self, pyfunc, min_ndim):
self.check_overlap(pyfunc, min_ndim=min_ndim, have_k_argument=True)
def test_overlap1(self):
self.check_overlap_with_k(array_overlap1, min_ndim=1)
def test_overlap2(self):
self.check_overlap_with_k(array_overlap2, min_ndim=1)
def test_overlap3(self):
self.check_overlap_with_k(array_overlap3, min_ndim=2)
def test_overlap4(self):
self.check_overlap_with_k(array_overlap4, min_ndim=2)
def test_overlap5(self):
self.check_overlap_with_k(array_overlap5, min_ndim=1)
def test_overlap6(self):
self.check_overlap_with_k(array_overlap6, min_ndim=1)
def test_overlap11(self):
self.check_overlap(array_overlap11, min_ndim=1)
def test_overlap12(self):
self.check_overlap(array_overlap12, min_ndim=1)
def test_overlap13(self):
self.check_overlap(array_overlap13, min_ndim=2)
def test_overlap14(self):
self.check_overlap(array_overlap14, min_ndim=2)
def test_overlap15(self):
self.check_overlap(array_overlap15, min_ndim=1)
def test_overlap16(self):
self.check_overlap(array_overlap16, min_ndim=1)
if __name__ == '__main__':
unittest.main()
|
51796
|
import datetime as dt
from stpmex.utils import strftime, strptime
def test_strftime():
assert strftime(dt.date(2020, 4, 20)) == '20200420'
def test_strptime():
assert strptime('20200420') == dt.date(2020, 4, 20)
|
51814
|
import random
import numpy as np
from pybullet_planning import multiply, interval_generator
from pybullet_planning import Pose, Point, Euler
def get_random_direction_generator(**kwargs):
lower = [-np.pi, -np.pi]
upper = [+np.pi, +np.pi]
for [roll, pitch] in interval_generator(lower, upper, **kwargs):
pose = Pose(euler=Euler(roll=roll, pitch=pitch))
yield pose
def get_enumeration_pose_generator(pose_list, shuffle=False):
if shuffle:
random.shuffle(pose_list)
for p in pose_list:
yield p
|
51861
|
from bytewax import Dataflow, run
flow = Dataflow()
flow.map(lambda x: x * x)
flow.capture()
if __name__ == "__main__":
for epoch, y in sorted(run(flow, enumerate(range(10)))):
print(y)
|
51867
|
import angr
######################################
# recv
######################################
class recv(angr.SimProcedure):
#pylint:disable=arguments-differ,unused-argument
def run(self, fd, dst, length, flags):
simfd = self.state.posix.get_fd(fd)
if simfd is None:
return -1
return simfd.read(dst, length)
|
51909
|
import unittest
class TestLogoMain(unittest.TestCase):
def test_imports(self):
try:
from dreamcoder.domains.logo.main import (
animateSolutions,
dreamFromGrammar,
list_options,
outputDreams,
enumerateDreams,
visualizePrimitives,
Flatten,
LogoFeatureCNN,
main
)
except Exception:
self.fail('Unable to import logo module')
if __name__ == '__main__':
unittest.main()
|
51915
|
import shutil
import subprocess # nosec # have to use subprocess
import warnings
from collections import Counter
from copy import deepcopy
from os import listdir, makedirs
from os.path import abspath, basename, dirname, exists, isfile, join
from subprocess import PIPE # nosec # have to use subprocess
from tempfile import mkdtemp
import f90nml
import numpy as np
import pandas as pd
from dateutil.relativedelta import relativedelta
from openscm_units import unit_registry
from scmdata import run_append
from .config import _wine_installed, config
from .errors import InvalidTemporalResError, NoReaderWriterError
from .io import MAGICCData, read_cfg_file
from .io.utils import _get_openscm_var_from_filepath
from .scenarios import zero_emissions
from .utils import get_date_time_string
IS_WINDOWS = config["is_windows"]
class WineNotInstalledError(Exception):
"""Exception raised if wine is not installed but is required"""
def _copy_files(source, target, recursive=False):
"""
Copy all the files in source directory to target.
If ``recursive``, include subdirectories, otherwise ignores subdirectories.
"""
if recursive:
shutil.copytree(source, target)
return
source_files = listdir(source)
if not exists(target):
makedirs(target)
for filename in source_files:
full_filename = join(source, filename)
if isfile(full_filename):
shutil.copy(full_filename, target)
def _clean_value(v):
if isinstance(v, str):
return v.strip()
elif isinstance(v, list):
if isinstance(v[0], str):
return [i.replace("\0", "").strip().replace("\n", "") for i in v]
return v
class MAGICCBase(object):
"""
Provides access to the MAGICC binary and configuration.
To enable multiple MAGICC 'setups' to be configured independently,
the MAGICC directory containing the input files, configuration
and binary is copied to a new folder. The configuration in this
MAGICC copy can then be edited without impacting other instances or your
original MAGICC distribution.
A ``MAGICC`` instance first has to be setup by calling
``create_copy``. If many model runs are being performed this step only has
to be performed once. The ``run`` method can then be called many times
without re-copying the files each time. Between each call to ``run``, the
configuration files can be updated to perform runs with different
configurations.
Parameters
----------
root_dir : str
If ``root_dir`` is supplied, an existing MAGICC 'setup' is
used.
"""
version = None
_scen_file_name = "SCENARIO.SCEN7"
def __init__(self, root_dir=None, strict=True):
"""
Initialise
Parameters
----------
root_dir : str
Root directory of the MAGICC package. If ``None``, a temporary
copy of MAGICC is made based on the result of `
`self.get_exectuable()``.
strict: bool
If True, enforce the configuration checks, otherwise a warning
is raised if any invalid configuration is found and the run is
continued. Setting ``strict=False`` is only recommended for
experienced users of MAGICC.
"""
self.root_dir = root_dir
self.config = None
self.executable = self.get_executable()
self.strict = strict
if root_dir is not None:
self.is_temp = False
else:
# Create a temp directory
self.is_temp = True
def __enter__(self):
if self.is_temp and self.run_dir is None:
self.create_copy()
return self
def __exit__(self, *args, **kwargs):
self.remove_temp_copy()
def create_copy(self):
"""
Initialises a temporary directory structure and copy of MAGICC
configuration files and binary.
The root folder and ``bin`` folders are copied (not recursively). The
``run`` folder is copied recursively.
"""
if self.executable is None or not isfile(self.executable):
raise FileNotFoundError(
"Could not find MAGICC{} executable: {}".format(
self.version, self.executable
)
)
if self.is_temp:
if self.root_dir is not None:
raise AssertionError(
"A temp copy for this instance has already been created"
)
self.root_dir = mkdtemp(prefix="pymagicc-")
if exists(self.run_dir):
raise Exception("A copy of MAGICC has already been created.")
if not exists(self.root_dir):
makedirs(self.root_dir)
exec_dir = basename(self.original_dir)
# Copy a subset of folders from the MAGICC `original_dir`
# Also copy anything which is in the root of the MAGICC distribution
# Assumes that the MAGICC binary is in a folder one level below the root
# of the MAGICC distribution. i.e. /run/magicc.exe or /bin/magicc
dirs_to_copy = [".", "bin"]
dirs_to_copy_recursive = ["run"]
# Check that the executable is in a valid sub directory
if exec_dir not in dirs_to_copy + dirs_to_copy_recursive:
raise AssertionError("binary must be in bin/ or run/ directory")
for d in dirs_to_copy + dirs_to_copy_recursive:
source_dir = abspath(join(self.original_dir, "..", d))
if exists(source_dir):
_copy_files(
source_dir,
join(self.root_dir, d),
recursive=d in dirs_to_copy_recursive,
)
# Create an empty out dir
# MAGICC assumes that the 'out' directory already exists
makedirs(join(self.root_dir, "out"))
# Create basic configuration files so magicc can run
self.set_years()
self.set_config()
@property
def binary_name(self):
"""
Name of the MAGICC binary file
Returns
-------
str
Name of the binary file
"""
return basename(self.executable)
@property
def original_dir(self):
"""
Directory of the MAGICC package.
This is the directory which contains the ``run`` and ``out`` folders.
Returns
-------
str
Path of the MAGICC package
"""
return dirname(self.executable)
@property
def run_dir(self):
"""
Run directory of the MAGICC package.
This path always ends in ``run``.
Returns
-------
str
Path of the run directory
"""
if self.root_dir is None:
return None
return join(self.root_dir, "run")
@property
def out_dir(self):
"""
Output directory of the MAGICC package.
This path always ends in ``out``.
Returns
-------
str
Path of the output directory
"""
if self.root_dir is None:
return None
return join(self.root_dir, "out")
@property
def default_config(self):
"""
Default configuration for a run
Returns
-------
:obj:`f90nml.Namelist`
Namelist object containing the default configuration
"""
base = f90nml.read(join(self.run_dir, "MAGCFG_DEFAULTALL.CFG"))
user = f90nml.read(join(self.run_dir, "MAGCFG_USER.CFG"))
self._default_config = deepcopy(base)
def _deep_update(b, o):
for k, v in o.items():
if isinstance(v, dict):
_deep_update(b[k], v)
else:
b.update(o)
_deep_update(self._default_config, user)
return self._default_config
def run(self, scenario=None, only=None, debug=False, **kwargs):
"""
Run MAGICC and parse the output.
As a reminder, putting ``out_parameters=1`` will cause MAGICC to write out its
parameters into ``out/PARAMETERS.OUT`` and they will then be read into
``output.metadata["parameters"]`` where ``output`` is the returned object.
Any logged output from running magicc will be in``output.metadata["stderr"]``.
For MAGICC7 and above, The level of logging can be controlled with the
``debug`` argument.
Any subannual files output by MAGICC will be ignored by this function. These
files can be read in manually using :class:`pymagicc.io.MAGICCData` directly.
Parameters
----------
scenario : :obj:`pymagicc.io.MAGICCData`
Scenario to run. If None MAGICC will simply run with whatever config has
already been set.
only : list of str
If not None, only extract variables in this list.
debug: {True, False, "verbose"}
If true, MAGICC will run in debug mode with the maximum amount of logging.
If "verbose", MAGICC will be run in verbose mode.
kwargs
Other config values to pass to MAGICC for the run
Returns
-------
:obj:`pymagicc.io.MAGICCData`
MAGICCData object containing that data in its ``df`` attribute and
metadata and parameters (depending on the value of ``include_parameters``)
in its ``metadata`` attribute.
Raises
------
ValueError
If no output is found which matches the list specified in ``only``.
subprocess.CalledProcessError
If MAGICC fails to run. Check the 'stderr' key of the result's `metadata`
attribute to inspect the results output from MAGICC.
ValueError
The user attempts to use ``debug`` with MAGICC6
"""
if not exists(self.root_dir):
raise FileNotFoundError(self.root_dir)
if self.executable is None:
raise ValueError(
"MAGICC executable not found, try setting an environment variable `MAGICC_EXECUTABLE_{}=/path/to/binary`".format(
self.version
)
)
if scenario is not None:
kwargs = self.set_emission_scenario_setup(scenario, kwargs)
yr_config = {}
if "startyear" in kwargs:
yr_config["startyear"] = kwargs.pop("startyear")
if "endyear" in kwargs:
yr_config["endyear"] = kwargs.pop("endyear")
if yr_config:
self.set_years(**yr_config)
# should be able to do some other nice metadata stuff re how magicc was run
# etc. here
kwargs.setdefault("rundate", get_date_time_string())
self.update_config(**kwargs)
self.check_config()
exec_dir = basename(self.original_dir)
command = [join(self.root_dir, exec_dir, self.binary_name)]
if self.version >= 7:
if debug == "verbose":
command.append("--verbose")
elif debug:
command.append("--debug")
elif debug:
raise ValueError("MAGICC6 has no debug capability")
if not IS_WINDOWS and self.binary_name.endswith(".exe"): # pragma: no cover
if not _wine_installed:
raise WineNotInstalledError(
"Wine is not installed but is required to run `.exe` binaries"
)
command.insert(0, "wine")
try:
res = subprocess.run( # nosec # on Windows shell=True is required
command,
check=True,
# thank you https://stackoverflow.com/a/53209196 for Python 3.6 hack
stdout=PIPE,
stderr=PIPE,
cwd=self.run_dir,
shell=IS_WINDOWS,
)
except subprocess.CalledProcessError as exc:
print("stderr:\n{}".format(exc.stderr.decode()))
raise exc
outfiles = self._get_output_filenames()
read_cols = {"climate_model": ["MAGICC{}".format(self.version)]}
if scenario is not None:
read_cols["model"] = scenario["model"].unique().tolist()
read_cols["scenario"] = scenario["scenario"].unique().tolist()
else:
read_cols.setdefault("model", ["unspecified"])
read_cols.setdefault("scenario", ["unspecified"])
mdata = []
for filepath in outfiles:
if filepath.startswith("DAT_VOLCANIC_RF.") or "SUBANN" in filepath:
warnings.warn(
"Not reading file: {}. Monthly data are not read in automatically by `run`. "
"Use `MAGICCData` instead.".format(filepath)
)
continue
try:
openscm_var = _get_openscm_var_from_filepath(filepath)
if only is None or openscm_var in only:
tempdata = MAGICCData(
join(self.out_dir, filepath), columns=deepcopy(read_cols)
)
mdata.append(tempdata)
except (NoReaderWriterError, InvalidTemporalResError):
# TODO: something like warnings.warn("Could not read {}".format(filepath))
continue
if not mdata and only is not None:
raise ValueError("No output found for only={}".format(only))
if not mdata:
if self.strict:
raise ValueError("No output found. Check configuration")
else:
# No data was loaded return an empty MAGICCData object
mdata = MAGICCData(
data={},
columns={
"model": [],
"unit": [],
"variable": [],
"region": [],
"scenario": [],
},
)
else:
mdata = run_append(mdata)
try:
run_paras = self.read_parameters()
self.config = run_paras
mdata.metadata["parameters"] = run_paras
except FileNotFoundError:
pass
mdata.metadata["stderr"] = res.stderr.decode("ascii")
levels_to_warn = ["WARNING", "ERROR", "FATAL"]
for level in levels_to_warn:
if "<{}>".format(level) in mdata.metadata["stderr"]:
warnings.warn(
"magicc logged a {} message. Check the 'stderr' key of the "
"result's `metadata` attribute.".format(level)
)
return mdata
def _get_output_filenames(self):
outfiles = [f for f in listdir(self.out_dir) if f != "PARAMETERS.OUT"]
bin_out = [
f.split(".")[0]
for f in outfiles
if f.startswith("DAT_") and f.endswith(".BINOUT")
]
extras = []
for f in outfiles:
var_name, ext = f.split(".")
if ext != "BINOUT" and var_name not in bin_out:
extras.append(f)
return [f + ".BINOUT" for f in bin_out] + extras
def _check_failed(self, msg):
if self.strict:
raise ValueError(msg)
else:
warnings.warn(msg)
def check_config(self):
"""Check that our MAGICC ``.CFG`` files are set to safely work with PYMAGICC
For further detail about why this is required, please see :ref:`MAGICC flags`.
Raises
------
ValueError
If we are not certain that the config written by PYMAGICC will overwrite
all other config i.e. that there will be no unexpected behaviour. A
ValueError will also be raised if the user tries to use more than one
scenario file.
"""
cfg_error_msg = (
"PYMAGICC is not the only tuning model that will be used by "
"`MAGCFG_USER.CFG`: your run is likely to fail/do odd things"
)
emisscen_error_msg = (
"You have more than one `FILE_EMISSCEN_X` flag set. Using more than "
"one emissions scenario is hard to debug and unnecessary with "
"Pymagicc's Dataframe scenario input. Please combine all your "
"scenarios into one Dataframe with Pymagicc and Pandas, then feed "
"this single Dataframe into Pymagicc's run API."
)
nml_to_check = "nml_allcfgs"
usr_cfg = read_cfg_file(join(self.run_dir, "MAGCFG_USER.CFG"))
for k in usr_cfg[nml_to_check]:
if k.startswith("file_tuningmodel"):
first_tuningmodel = k in ["file_tuningmodel", "file_tuningmodel_1"]
if first_tuningmodel:
if usr_cfg[nml_to_check][k] != "PYMAGICC":
self._check_failed(cfg_error_msg)
elif usr_cfg[nml_to_check][k] not in ["USER", ""]:
self._check_failed(cfg_error_msg)
elif k.startswith("file_emisscen_"):
if usr_cfg[nml_to_check][k] not in ["NONE", ""]:
self._check_failed(emisscen_error_msg)
self._check_config()
def write(self, mdata, name):
"""Write an input file to disk
Parameters
----------
mdata : :obj:`pymagicc.io.MAGICCData`
A MAGICCData instance with the data to write
name : str
The name of the file to write. The file will be written to the MAGICC
instance's run directory i.e. ``self.run_dir``
"""
mdata.write(join(self.run_dir, name), self.version)
def read_parameters(self):
"""
Read a parameters.out file
Returns
-------
dict
A dictionary containing all the configuration used by MAGICC
"""
param_fname = join(self.out_dir, "PARAMETERS.OUT")
if not exists(param_fname):
raise FileNotFoundError("No PARAMETERS.OUT found")
with open(param_fname) as nml_file:
parameters = dict(f90nml.read(nml_file))
for group in ["nml_years", "nml_allcfgs", "nml_outputcfgs"]:
parameters[group] = dict(parameters[group])
for k, v in parameters[group].items():
parameters[group][k] = _clean_value(v)
parameters[group.replace("nml_", "")] = parameters.pop(group)
self.config = parameters
return parameters
def remove_temp_copy(self):
"""
Removes a temporary copy of the MAGICC version shipped with Pymagicc.
"""
if self.is_temp and self.root_dir is not None:
shutil.rmtree(self.root_dir)
self.root_dir = None
def set_config(
self,
filename="MAGTUNE_PYMAGICC.CFG",
top_level_key="<KEY>",
conflict="warn",
**kwargs,
):
"""
Create a configuration file for MAGICC.
Writes a fortran namelist in run_dir.
Parameters
----------
filename : str
Name of configuration file to write
top_level_key : str
Name of namelist to be written in the
configuration file
conflict : {'warn', 'ignore'}
If 'warn', when a flag needs to be replaced by a different name (because,
for example, the flag name changed between MAGICC versions), a warning is
raised. If 'ignore', no warning is raised when a replacement is required.
kwargs
Other parameters to pass to the configuration file. No
validation on the parameters is performed.
Returns
-------
dict
The contents of the namelist which was written to file
Warning
-------
If a key is renamed, a warning is raised
Raises
------
ValueError
An invalid value for ``conflict`` is supplied
"""
kwargs = self._check_and_format_config(kwargs)
fname = join(self.run_dir, filename)
conf = {top_level_key: kwargs}
conf = self._fix_legacy_keys(conf, conflict=conflict)
f90nml.write(conf, fname, force=True)
return conf
def update_config(
self,
filename="MAGTUNE_PYMAGICC.CFG",
top_level_key="<KEY>",
conflict="warn",
**kwargs,
):
"""Updates a configuration file for MAGICC
Updates the contents of a fortran namelist in the run directory,
creating a new namelist if none exists.
Parameters
----------
filename : str
Name of configuration file to write
top_level_key : str
Name of namelist to be written in the
configuration file
conflict : {'warn', 'ignore'}
If 'warn', when a flag needs to be replaced by a different name (because,
for example, the flag name changed between MAGICC versions), a warning is
raised. If 'ignore', no warning is raised when a replacement is required.
kwargs
Other parameters to pass to the configuration file. No
validation on the parameters is performed.
Returns
-------
dict
The contents of the namelist which was written to file
Warning
-------
If a key is renamed, a warning is raised
Raises
------
ValueError
An invalid value for ``conflict`` is supplied
"""
kwargs = self._check_and_format_config(kwargs)
fname = join(self.run_dir, filename)
if exists(fname):
conf = f90nml.read(fname)
else:
conf = {top_level_key: {}}
conf[top_level_key].update(kwargs)
conf = self._fix_legacy_keys(conf, conflict=conflict)
f90nml.write(conf, fname, force=True)
return conf
def _fix_legacy_keys(self, conf, conflict="warn"):
"""
Go through config and fix any keys which are misnamed.
For example, fix any keys which have been renamed between MAGICC versions to
match the new names.
Parameters
----------
conf :obj:`f90nml.Namelist`
Configuration to check
conflict : {'warn', 'ignore'}
If 'warn', when a conflict is found, a warning is raised. If 'ignore', no
warning is raised when a conflict is found.
Returns
-------
:obj:`f90nml.Namelist`
Configuration with updated keys
Warning
-------
If a key is renamed, a warning is raised
Raises
------
ValueError
An invalid value for ``conflict`` is supplied
"""
valid_conflicts = ["warn", "ignore"]
if conflict not in valid_conflicts:
raise ValueError("`conflict` must be one of: {}".format(valid_conflicts))
cfg_key = "<KEY>"
if cfg_key not in conf:
return conf
new_conf = deepcopy(conf)
for wrong_key, right_key in self._config_renamings.items():
if wrong_key in new_conf[cfg_key]:
new_conf[cfg_key][right_key] = new_conf[cfg_key].pop(wrong_key)
if conflict == "warn":
warnings.warn(
"Altering config flag {} to {}".format(wrong_key, right_key)
)
return new_conf
def set_zero_config(self):
"""Set config such that radiative forcing and temperature output will be zero
This method is intended as a convenience only, it does not handle everything in
an obvious way. Adjusting the parameter settings still requires great care and
may behave unepexctedly.
"""
# zero_emissions is imported from scenarios module
# TODO: setup MAGICC6 so it puts extra variables in right place and hence
# warning about ignoring some data disappears
zero_emissions.write(join(self.run_dir, self._scen_file_name), self.version)
time = zero_emissions.filter(variable="Emissions|CH4", region="World")[
"time"
].values
no_timesteps = len(time)
# value doesn't actually matter as calculations are done from difference but
# chose sensible value nonetheless
co2_conc_pi = 722
co2_conc = co2_conc_pi * np.ones(no_timesteps)
co2_conc_df = pd.DataFrame(
{
"time": time,
"scenario": "idealised",
"model": "unspecified",
"climate_model": "unspecified",
"variable": "Atmospheric Concentrations|CO2",
"unit": "ppm",
"todo": "SET",
"region": "World",
"value": co2_conc,
}
)
co2_conc_writer = MAGICCData(co2_conc_df)
co2_conc_filename = "HIST_CONSTANT_CO2_CONC.IN"
co2_conc_writer.metadata = {
"header": "Constant pre-industrial CO2 concentrations"
}
co2_conc_writer.write(join(self.run_dir, co2_conc_filename), self.version)
ch4_conc_pi = 722
ch4_conc = ch4_conc_pi * np.ones(no_timesteps)
ch4_conc_df = pd.DataFrame(
{
"time": time,
"scenario": "idealised",
"model": "unspecified",
"climate_model": "unspecified",
"variable": "Atmospheric Concentrations|CH4",
"unit": "ppb",
"todo": "SET",
"region": "World",
"value": ch4_conc,
}
)
ch4_conc_writer = MAGICCData(ch4_conc_df)
ch4_conc_filename = "HIST_CONSTANT_CH4_CONC.IN"
ch4_conc_writer.metadata = {
"header": "Constant pre-industrial CH4 concentrations"
}
ch4_conc_writer.write(join(self.run_dir, ch4_conc_filename), self.version)
fgas_conc_pi = 0
fgas_conc = fgas_conc_pi * np.ones(no_timesteps)
varname = "FGAS_CONC"
fgas_conc_df = pd.DataFrame(
{
"time": time,
"scenario": "idealised",
"model": "unspecified",
"climate_model": "unspecified",
"variable": varname,
"unit": "ppt",
"todo": "SET",
"region": "World",
"value": fgas_conc,
}
)
fgas_conc_writer = MAGICCData(fgas_conc_df)
fgas_conc_filename = "HIST_ZERO_{}.IN".format(varname)
fgas_conc_writer.metadata = {"header": "Zero concentrations"}
fgas_conc_writer.write(join(self.run_dir, fgas_conc_filename), self.version)
def_config = self.default_config
tmp_nml = f90nml.Namelist({"nml_allcfgs": {"fgas_files_conc": 1}})
fgas_files_conc_flag = list(
self._fix_legacy_keys(tmp_nml, conflict="ignore")["nml_allcfgs"].keys()
)[0]
fgas_conc_files = [fgas_conc_filename] * len(
def_config["nml_allcfgs"][fgas_files_conc_flag]
)
self.set_config(
conflict="ignore",
file_emisscen=self._scen_file_name,
rf_initialization_method="ZEROSTARTSHIFT",
rf_total_constantafteryr=10000,
file_co2i_emis="",
file_co2b_emis="",
file_co2_conc=co2_conc_filename,
co2_switchfromconc2emis_year=10000,
file_ch4i_emis="",
file_ch4b_emis="",
file_ch4n_emis="",
file_ch4_conc=ch4_conc_filename,
ch4_switchfromconc2emis_year=10000,
file_n2oi_emis="",
file_n2ob_emis="",
file_n2on_emis="",
file_n2o_conc="",
n2o_switchfromconc2emis_year=1750,
file_noxi_emis="",
file_noxb_emis="",
file_noxi_ot="",
file_noxb_ot="",
file_noxt_rf="",
file_soxnb_ot="",
file_soxi_ot="",
file_soxt_rf="",
file_soxi_emis="",
file_soxb_emis="",
file_soxn_emis="",
file_oci_emis="",
file_ocb_emis="",
file_oci_ot="",
file_ocb_ot="",
file_oci_rf="",
file_ocb_rf="",
file_bci_emis="",
file_bcb_emis="",
file_bci_ot="",
file_bcb_ot="",
file_bci_rf="",
file_bcb_rf="",
bcoc_switchfromrf2emis_year=1750,
file_nh3i_emis="",
file_nh3b_emis="",
file_nmvoci_emis="",
file_nmvocb_emis="",
file_coi_emis="",
file_cob_emis="",
file_mineraldust_rf="",
file_landuse_rf="",
file_bcsnow_rf="",
# rf_fgassum_scale=0, # this appears to do nothing, hence the next two lines
fgas_switchfromconc2emis_year=10000,
rf_mhalosum_scale=0,
stratoz_o3scale=0,
rf_volcanic_scale=0,
rf_solar_scale=0,
mhalo_switchfromconc2emis_year=1750,
fgas_files_conc=fgas_conc_files,
)
def _check_and_format_config(self, config_dict):
self._check_for_duplicate_keys(config_dict)
config_dict = self._convert_out_config_flags_to_integers(config_dict)
return config_dict
@staticmethod
def _check_for_duplicate_keys(config_dict):
keys_lower = [v.lower() for v in config_dict.keys()]
counts = Counter(keys_lower)
if any([v > 1 for v in counts.values()]):
duplicate_keys = [
[ck for ck in config_dict.keys() if ck.lower() == k.lower()]
for k, v in counts.items()
if v > 1
]
error_msg = (
"The following configuration keys clash because configs are "
"case insensitive: {}".format(
", ".join([str(v) for v in duplicate_keys])
)
)
raise ValueError(error_msg)
@staticmethod
def _convert_out_config_flags_to_integers(config_dict):
valid_out_flags = [
"out_emissions",
"out_gwpemissions",
"out_sum_gwpemissions",
"out_concentrations",
"out_carboncycle",
"out_forcing",
"out_forcing_subannual",
"out_temperature",
"out_temperature_subannual",
"out_sealevel",
"out_parameters",
"out_misc",
"out_lifetimes",
"out_timeseriesmix",
"out_rcpdata",
"out_summaryidx",
"out_tempoceanlayers",
"out_oceanarea",
"out_heatuptake",
"out_warnings",
"out_precipinput",
"out_aogcmtuning",
"out_ccycletuning",
"out_observationaltuning",
"out_keydata_1",
"out_keydata_2",
"out_inverseemis",
"out_surfaceforcing",
"out_permafrost",
"out_allowanydynamicvars",
]
for key in valid_out_flags:
if key in config_dict:
# MAGICC expects 1 and 0 instead of True/False
config_dict[key] = 1 if config_dict[key] else 0
return config_dict
def set_years(self, startyear=1765, endyear=2100):
"""
Set the start and end dates of the simulations.
Parameters
----------
startyear : int
Start year of the simulation
endyear : int
End year of the simulation
Returns
-------
dict
The contents of the namelist
"""
# TODO: test altering stepsperyear, I think 1, 2 and 24 should all work
return self.set_config(
"MAGCFG_NMLYEARS.CFG",
"nml_years",
endyear=endyear,
startyear=startyear,
stepsperyear=12,
)
def set_output_variables(self, write_ascii=True, write_binary=False, **kwargs):
"""Set the output configuration, minimising output as much as possible
There are a number of configuration parameters which control which variables
are written to file and in which format. Limiting the variables that are
written to file can greatly speed up the running of MAGICC. By default,
calling this function without specifying any variables will disable all output
by setting all of MAGICC's ``out_xx`` flags to ``0``.
This convenience function should not be confused with ``set_config`` or
``update_config`` which allow the user to set/update the configuration flags
directly, without the more convenient syntax and default behaviour provided by
this function.
Parameters
----------
write_ascii : bool
If true, MAGICC is configured to write output files as human readable ascii files.
write_binary : bool
If true, MAGICC is configured to write binary output files. These files are much faster
to process and write, but are not human readable.
**kwargs:
List of variables to write out. A list of possible options are as follows. This
may not be a complete list.
'emissions',
'gwpemissions',
'sum_gwpemissions',
'concentrations',
'carboncycle',
'forcing',
'surfaceforcing',
'permafrost',
'temperature',
'sealevel',
'parameters',
'misc',
'lifetimes',
'timeseriesmix',
'rcpdata',
'summaryidx',
'inverseemis',
'tempoceanlayers',
'oceanarea',
'heatuptake',
'warnings',
'precipinput',
'aogcmtuning',
'ccycletuning',
'observationaltuning',
'keydata_1',
'keydata_2'
"""
if not (write_ascii or write_binary):
raise AssertionError("write_binary and/or write_ascii must be configured")
if write_binary and write_ascii:
ascii_binary = "BOTH"
elif write_ascii:
ascii_binary = "ASCII"
else:
ascii_binary = "BINARY"
# defaults
outconfig = {
"out_emissions": 0,
"out_gwpemissions": 0,
"out_sum_gwpemissions": 0,
"out_concentrations": 0,
"out_carboncycle": 0,
"out_forcing": 0,
"out_surfaceforcing": 0,
"out_permafrost": 0,
"out_temperature": 0,
"out_sealevel": 0,
"out_parameters": 0,
"out_misc": 0,
"out_timeseriesmix": 0,
"out_rcpdata": 0,
"out_summaryidx": 0,
"out_inverseemis": 0,
"out_tempoceanlayers": 0,
"out_heatuptake": 0,
"out_ascii_binary": ascii_binary,
"out_warnings": 0,
"out_precipinput": 0,
"out_aogcmtuning": 0,
"out_ccycletuning": 0,
"out_observationaltuning": 0,
"out_keydata_1": 0,
"out_keydata_2": 0,
}
if self.version == 7:
outconfig["out_oceanarea"] = 0
outconfig["out_lifetimes"] = 0
for kw in kwargs:
val = 1 if kwargs[kw] else 0 # convert values to 0/1 instead of booleans
outconfig["out_" + kw.lower()] = val
self.update_config(**outconfig)
def get_executable(self):
"""
Get path to MAGICC executable being used
Returns
-------
str
Path to MAGICC executable being used
"""
return config["executable_{}".format(self.version)]
def diagnose_tcr_ecs_tcre(self, **kwargs):
"""
Diagnose TCR, ECS and TCRE
The transient climate response (TCR), is the global-mean temperature response
per unit cumulative |CO2| emissions at the time at which atmospheric |CO2|
concentrations double in an experiment where atmospheric |CO2| concentrations
are increased at 1% per year from pre-industrial levels (1pctCO2 experiment).
The equilibrium climate sensitivity (ECS), is the equilibrium global-mean
temperature response to an instantaneous doubling of atmospheric |CO2|
concentrations (abrupt-2xCO2 experiment).
The transient climate response to emissions (TCRE), is the global-mean
temperature response per unit cumulative |CO2| emissions at the time at which
atmospheric |CO2| concentrations double in the 1pctCO2 experiment.
Please note that sometimes the run length won't be long enough to allow
MAGICC's oceans to fully equilibrate and hence the ECS value might not be what
you expect (it should match the value of ``core_climatesensitivity``).
Parameters
----------
**kwargs
parameter values to use in the diagnosis e.g. ``core_climatesensitivity=4``
Returns
-------
dict
Dictionary with keys: "ecs" - the diagnosed ECS; "tcr" - the diagnosed
TCR; "tcre" - the diagnosed TCRE; "timeseries" - the relevant model input
and output timeseries used in the experiment i.e. atmospheric |CO2|
concentrations, inverse |CO2| emissions, total radiative forcing and
global-mean surface temperature
"""
ecs_res = self.diagnose_ecs(**kwargs)
tcr_tcre_res = self.diagnose_tcr_tcre(**kwargs)
out = {**ecs_res, **tcr_tcre_res}
out["timeseries"] = run_append(
[ecs_res["timeseries"], tcr_tcre_res["timeseries"]]
)
return out
def diagnose_ecs(self, **kwargs):
"""
Diagnose ECS
The equilibrium climate sensitivity (ECS), is the equilibrium global-mean
temperature response to an instantaneous doubling of atmospheric |CO2|
concentrations (abrupt-2xCO2 experiment).
Please note that sometimes the run length won't be long enough to allow
MAGICC's oceans to fully equilibrate and hence the ECS value might not be what
you expect (it should match the value of ``core_climatesensitivity``).
Parameters
----------
**kwargs
parameter values to use in the diagnosis e.g. ``core_climatesensitivity=4``
Returns
-------
dict
Dictionary with keys: "ecs" - the diagnosed ECS; "timeseries" - the
relevant model input and output timeseries used in the experiment i.e.
atmospheric |CO2| concentrations, inverse |CO2| emissions, total radiative
forcing and global-mean surface temperature
"""
self._diagnose_ecs_config_setup(**kwargs)
timeseries = self.run(
scenario=None,
only=[
"Atmospheric Concentrations|CO2",
"Radiative Forcing",
"Surface Temperature",
],
)
timeseries["scenario"] = "abrupt-2xCO2"
ecs = self.get_ecs_from_diagnosis_results(timeseries)
return {"ecs": ecs, "timeseries": timeseries}
def diagnose_tcr_tcre(self, **kwargs):
"""
Diagnose TCR and TCRE
The transient climate response (TCR), is the global-mean temperature response
per unit cumulative |CO2| emissions at the time at which atmospheric |CO2|
concentrations double in an experiment where atmospheric |CO2| concentrations
are increased at 1% per year from pre-industrial levels (1pctCO2 experiment).
The transient climate response to emissions (TCRE), is the global-mean
temperature response per unit cumulative |CO2| emissions at the time at which
atmospheric |CO2| concentrations double in the 1pctCO2 experiment.
Parameters
----------
**kwargs
parameter values to use in the diagnosis e.g. ``core_climatesensitivity=4``
Returns
-------
dict
Dictionary with keys: "tcr" - the diagnosed TCR; "tcre" - the diagnosed
TCRE; "timeseries" - the relevant model input and output timeseries used
in the experiment i.e. atmospheric |CO2| concentrations, inverse |CO2|
emissions, total radiative forcing and global-mean surface temperature
"""
self._diagnose_tcr_tcre_config_setup(**kwargs)
timeseries = self.run(
scenario=None,
only=[
"Atmospheric Concentrations|CO2",
"INVERSEEMIS",
"Radiative Forcing",
"Surface Temperature",
],
)
# drop all the irrelevant inverse emissions
timeseries = timeseries.filter(
variable="Inverse Emissions*", level=1, keep=False
)
# drop the final year as concs stay constant from some reason,
# MAGICC bug...
timeseries = timeseries.filter(time=timeseries["time"].max(), keep=False)
timeseries["scenario"] = "1pctCO2"
tcr, tcre = self.get_tcr_tcre_from_diagnosis_results(timeseries)
return {"tcr": tcr, "tcre": tcre, "timeseries": timeseries}
def _diagnose_ecs_config_setup(self, **kwargs):
self.set_years(
startyear=1750, endyear=4200
) # 4200 seems to be the max I can push too without an error
self.update_config(
FILE_CO2_CONC="ABRUPT2XCO2_CO2_CONC.IN",
CO2_SWITCHFROMCONC2EMIS_YEAR=30000,
RF_TOTAL_RUNMODUS="CO2",
RF_TOTAL_CONSTANTAFTERYR=2000,
**kwargs,
)
def _diagnose_tcr_tcre_config_setup(self, **kwargs):
self.set_years(startyear=1750, endyear=2020)
self.update_config(
FILE_CO2_CONC="1PCTCO2_CO2_CONC.IN",
CO2_SWITCHFROMCONC2EMIS_YEAR=30000,
RF_TOTAL_RUNMODUS="CO2",
RF_TOTAL_CONSTANTAFTERYR=3000,
OUT_INVERSEEMIS=1,
**kwargs,
)
def get_ecs_from_diagnosis_results(self, results_ecs_run):
"""
Diagnose ECS from the results of the abrupt-2xCO2 experiment
Parameters
----------
results_ecs_run : :obj:`ScmRun`
Results of the abrupt-2xCO2 experiment, must contain atmospheric |CO2|
concentrations, total radiative forcing and surface temperature.
Returns
-------
ecs : :obj:`pint.quantity.Quantity`
ECS diagnosed from ``results_ecs_run``
"""
global_co2_concs = results_ecs_run.filter(
variable="Atmospheric Concentrations|CO2", region="World"
)
ecs_time, ecs_start_time = self._get_ecs_ecs_start_yr_from_CO2_concs(
global_co2_concs
)
global_total_rf = results_ecs_run.filter(
variable="Radiative Forcing", region="World"
)
self._check_ecs_total_RF(global_total_rf, jump_time=ecs_start_time)
global_temp = results_ecs_run.filter(
variable="Surface Temperature", region="World"
)
self._check_ecs_temp(global_temp)
ecs = float(global_temp.filter(time=ecs_time).values.squeeze())
unit = global_temp.get_unique_meta("unit", no_duplicates=True)
ecs = ecs * unit_registry(unit)
return ecs
def get_tcr_tcre_from_diagnosis_results(self, results_tcr_tcre_run):
"""
Diagnose TCR and TCRE from the results of the 1pctCO2 experiment
Parameters
----------
results_tcr_tcre_run : :obj:`ScmRun`
Results of the 1pctCO2 experiment, must contain atmospheric |CO2|
concentrations, inverse |CO2| emissions, total radiative forcing and
surface temperature.
Returns
-------
tcr, tcre : :obj:`pint.quantity.Quantity`, :obj:`pint.quantity.Quantity`
TCR and TCRE diagnosed from ``results_tcr_tcre_run``
"""
global_co2_concs = results_tcr_tcre_run.filter(
variable="Atmospheric Concentrations|CO2", region="World"
)
(tcr_time, tcr_start_time,) = self._get_tcr_tcr_start_yr_from_CO2_concs(
global_co2_concs
)
if tcr_time.year != tcr_start_time.year + 70: # pragma: no cover # emergency
raise AssertionError("Has the definition of TCR and TCRE changed?")
global_inverse_co2_emms = results_tcr_tcre_run.filter(
variable="Inverse Emissions|CO2|MAGICC Fossil and Industrial",
region="World",
)
global_total_rf = results_tcr_tcre_run.filter(
variable="Radiative Forcing", region="World"
)
self._check_tcr_tcre_total_RF(global_total_rf, tcr_time=tcr_time)
global_temp = results_tcr_tcre_run.filter(
variable="Surface Temperature", region="World"
)
self._check_tcr_tcre_temp(global_temp)
tcr = float(global_temp.filter(time=tcr_time).values.squeeze())
tcr_unit = global_temp.get_unique_meta("unit", no_duplicates=True)
tcr = tcr * unit_registry(tcr_unit)
tcre_cumulative_emms = float(
global_inverse_co2_emms.filter(
year=range(tcr_start_time.year, tcr_time.year)
).values.sum()
)
emms_unit = global_inverse_co2_emms.get_unique_meta("unit", no_duplicates=True)
years = global_inverse_co2_emms["year"].values.squeeze()
if not np.all((years[1:] - years[:-1]) == 1): # pragma: no cover
raise AssertionError(
"TCR/TCRE diagnosis assumed to be on annual timestep. Please "
"raise an issue at "
"https://github.com/openscm/pymagicc/issues to discuss "
"your use case"
)
# can now safely assume that our simple sum has done the right thing
tcre_cumulative_emms_unit = unit_registry(emms_unit) * unit_registry("yr")
tcre_cumulative_emms = tcre_cumulative_emms * tcre_cumulative_emms_unit
tcre = tcr / tcre_cumulative_emms
return tcr, tcre
def _get_ecs_ecs_start_yr_from_CO2_concs(self, df_co2_concs):
co2_concs = df_co2_concs.timeseries()
co2_conc_0 = co2_concs.iloc[0, 0]
t_start = co2_concs.columns.min()
t_end = co2_concs.columns.max()
ecs_start_time = co2_concs.iloc[
:, co2_concs.values.squeeze() > co2_conc_0
].columns[0]
spin_up_co2_concs = (
_filter_time_range(df_co2_concs, lambda x: t_start <= x < ecs_start_time)
.timeseries()
.values.squeeze()
)
if not (spin_up_co2_concs == co2_conc_0).all():
raise ValueError(
"The ECS CO2 concs look wrong, they are not constant before they start rising"
)
co2_conc_final = 2 * co2_conc_0
eqm_co2_concs = (
_filter_time_range(df_co2_concs, lambda x: ecs_start_time <= x <= t_end)
.timeseries()
.values.squeeze()
)
if not np.isclose(eqm_co2_concs, co2_conc_final).all():
raise ValueError(
"The ECS CO2 concs look wrong, they are not constant after doubling"
)
ecs_time = df_co2_concs["time"].iloc[-1]
return ecs_time, ecs_start_time
def _get_tcr_tcr_start_yr_from_CO2_concs(self, df_co2_concs):
co2_concs = df_co2_concs.timeseries()
co2_conc_0 = co2_concs.iloc[0, 0]
t_start = co2_concs.columns.min()
t_end = co2_concs.columns.max()
tcr_start_time = co2_concs.iloc[
:, co2_concs.values.squeeze() > co2_conc_0
].columns[0] - relativedelta(years=1)
tcr_time = tcr_start_time + relativedelta(years=70)
spin_up_co2_concs = (
_filter_time_range(df_co2_concs, lambda x: t_start <= x <= tcr_start_time)
.timeseries()
.values.squeeze()
)
if not (spin_up_co2_concs == co2_conc_0).all():
raise ValueError(
"The TCR/TCRE CO2 concs look wrong, they are not constant before they start rising"
)
actual_rise_co2_concs = (
_filter_time_range(df_co2_concs, lambda x: tcr_start_time <= x <= t_end)
.timeseries()
.values.squeeze()
)
# this will blow up if we switch to diagnose tcr/ecs with a monthly run...
expected_rise_co2_concs = co2_conc_0 * 1.01 ** np.arange(
len(actual_rise_co2_concs)
)
rise_co2_concs_correct = np.isclose(
actual_rise_co2_concs, expected_rise_co2_concs
).all()
if not rise_co2_concs_correct:
raise ValueError("The TCR/TCRE CO2 concs look wrong during the rise period")
return tcr_time, tcr_start_time
def _check_ecs_total_RF(self, df_total_rf, jump_time):
total_rf = df_total_rf.timeseries()
total_rf_max = total_rf.values.squeeze().max()
t_start = total_rf.columns.min()
t_end = total_rf.columns.max()
spin_up_rf = (
_filter_time_range(df_total_rf, lambda x: t_start <= x < jump_time)
.timeseries()
.values.squeeze()
)
if not (spin_up_rf == 0).all():
raise ValueError(
"The ECS total radiative forcing looks wrong, it is not all zero before concentrations start rising"
)
eqm_rf = (
_filter_time_range(df_total_rf, lambda x: jump_time <= x <= t_end)
.timeseries()
.values.squeeze()
)
if not (eqm_rf == total_rf_max).all():
raise ValueError(
"The ECS total radiative forcing looks wrong, it is not constant after concentrations double"
)
def _check_tcr_tcre_total_RF(self, df_total_rf, tcr_time):
total_rf = df_total_rf.timeseries()
t_start = total_rf.columns.min()
tcr_start_time = tcr_time - relativedelta(years=70)
spin_up_rf = (
_filter_time_range(df_total_rf, lambda x: t_start <= x <= tcr_start_time)
.timeseries()
.values.squeeze()
)
if not (spin_up_rf == 0).all():
raise ValueError(
"The TCR/TCRE total radiative forcing looks wrong, it is not all zero before concentrations start rising"
)
rf_vls = total_rf.values.squeeze()
rf_minus_previous_yr = rf_vls[1:] - rf_vls[:-1]
if not np.all(rf_minus_previous_yr >= 0):
raise ValueError(
"The TCR/TCRE total radiative forcing looks wrong, it is not rising after concentrations start rising"
)
def _check_ecs_temp(self, df_temp):
self._check_tcr_ecs_tcre_temp(
df_temp, "The ECS surface temperature looks wrong, it decreases"
)
def _check_tcr_tcre_temp(self, df_temp):
self._check_tcr_ecs_tcre_temp(
df_temp, "The TCR/TCRE surface temperature looks wrong, it decreases"
)
def _check_tcr_ecs_tcre_temp(self, df_temp, message):
tmp_vls = df_temp.timeseries().values.squeeze()
tmp_minus_previous_yr = tmp_vls[1:] - tmp_vls[:-1]
if not np.all(tmp_minus_previous_yr >= 0):
raise ValueError(message)
def set_emission_scenario_setup(self, scenario, config_dict):
"""Set the emissions flags correctly.
Parameters
----------
scenario : :obj:`pymagicc.io.MAGICCData`
Scenario to run.
config_dict : dict
Dictionary with current input configurations which is to be validated and
updated where necessary.
Returns
-------
dict
Updated configuration
"""
self.write(scenario, self._scen_file_name)
emis_flag = list(
self._fix_legacy_keys(
f90nml.Namelist({"nml_allcfgs": {"file_emisscen": "junk"}}),
conflict="ignore",
)["nml_allcfgs"].keys()
)[0]
config_dict[emis_flag] = self._scen_file_name
return config_dict
def _check_config(self):
"""
Check config above and beyond those checked by ``self.check_config``
"""
pass
class MAGICC6(MAGICCBase):
version = 6
_scen_file_name = "SCENARIO.SCEN"
_config_renamings = {
"file_emisscen": "file_emissionscenario",
"fgas_files_conc": "file_fgas_conc",
"mhalo_switchfromconc2emis_year": "mhalo_switch_conc2emis_yr",
}
@property
def default_config(self):
"""
Default configuration to use in a run
"""
base = f90nml.read(join(self.run_dir, "MAGCFG_DEFAULTALL_69.CFG"))
user = f90nml.read(join(self.run_dir, "MAGCFG_USER.CFG"))
self._default_config = deepcopy(base)
self._default_config.update(user)
return self._default_config
def _check_tcr_ecs_tcre_total_RF(self, df_total_rf, tcr_time, ecs_time):
super()._check_tcr_ecs_tcre_total_RF(df_total_rf, tcr_time, ecs_time)
# can be more careful with checks MAGICC6 only has logarithmic CO2 forcing
# i.e. linear rise in forcing
total_rf = df_total_rf.timeseries()
total_rf_max = total_rf.values.squeeze().max()
tcre_start_time = tcr_time - relativedelta(years=70)
actual_rise_rf = (
_filter_time_range(df_total_rf, lambda x: tcre_start_time <= x <= tcr_time)
.timeseries()
.values.squeeze()
)
# this will blow up if we switch to diagnose tcr/ecs with a monthly run...
expected_rise_rf = total_rf_max / 70.0 * np.arange(71)
rise_rf_correct = np.isclose(actual_rise_rf, expected_rise_rf).all()
if not rise_rf_correct:
raise ValueError(
"The TCR/ECS/TCRE total radiative forcing looks wrong during the rise period"
)
def _check_config(self):
cfg = self.update_config()
if "file_emissionscenario" in cfg["nml_allcfgs"]:
if cfg["nml_allcfgs"]["file_emissionscenario"].endswith("SCEN7"):
self._check_failed("MAGICC6 cannot run SCEN7 files")
class MAGICC7(MAGICCBase):
version = 7
_config_renamings = {
"file_emissionscenario": "file_emisscen",
"file_fgas_conc": "fgas_files_conc",
"mhalo_switch_conc2emis_yr": "mhalo_switchfromconc2emis_year",
}
def create_copy(self):
"""
Initialises a temporary directory structure and copy of MAGICC
configuration files and binary.
This will also overwrite the value of all ``file_tuningmodel_x`` flags to
ensure that Pymagicc's configurations will be read. If ``self.strict``, this
will also overwrite the value of all ``file_emisscen_x`` flags to ensure that
only Pymagicc's scenario input is used. This overwrite behaviour can be
removed once the MAGICC7 binary is publicly released as we can then create a
Pymagicc specific MAGCFG_USER.CFG rather than relying on whatever is in the
user's current copy.
"""
super(MAGICC7, self).create_copy()
self.update_config(
"MAGCFG_USER.CFG",
**{
"file_tuningmodel_1": "PYMAGICC",
"file_tuningmodel_2": "USER",
"file_tuningmodel_3": "USER",
"file_tuningmodel_4": "USER",
"file_tuningmodel_5": "USER",
"file_tuningmodel_6": "USER",
"file_tuningmodel_7": "USER",
"file_tuningmodel_8": "USER",
"file_tuningmodel_9": "USER",
"file_tuningmodel_10": "USER",
},
)
if self.strict:
self.update_config(
"MAGCFG_USER.CFG",
**{
"file_emisscen_2": "NONE",
"file_emisscen_3": "NONE",
"file_emisscen_4": "NONE",
"file_emisscen_5": "NONE",
"file_emisscen_6": "NONE",
"file_emisscen_7": "NONE",
"file_emisscen_8": "NONE",
},
)
def _diagnose_tcr_ecs_tcre_config_setup(self, **kwargs):
super()._diagnose_tcr_ecs_tcre_config_setup(**kwargs)
# also need to lock CH4 and N2O in case OLBL forcing mode is being used
self.update_config(
FILE_CH4_CONC="TCRECS_CH4_CONC.IN",
CH4_SWITCHFROMCONC2EMIS_YEAR=30000,
FILE_N2O_CONC="TCRECS_N2O_CONC.IN",
N2O_SWITCHFROMCONC2EMIS_YEAR=30000,
)
def _check_config(self):
pass
def _filter_time_range(scmdf, filter_func):
# TODO: move into openscm
tdf = scmdf.timeseries()
tdf = tdf.iloc[:, tdf.columns.map(filter_func)]
return MAGICCData(tdf)
|
51918
|
from lib.plugins import Driver
import os
from paramiko import SSHClient, RSAKey, AutoAddPolicy
from io import StringIO
class Ssh(Driver):
DEFAULT_KEY_PATH = "~/.ssh/id_rsa"
def __init__(self, host, username='root', password = None, key = None, port = 22, path = "/proc"):
Driver.__init__(self)
self._host = host
self._username = username
self._password = password
self._port = port
self._path = path
self._client = None
self._ftp = None
if not password or key:
self._key = RSAKey.from_private_key_file(os.path.expanduser(key or Ssh.DEFAULT_KEY_PATH))
else:
self._key = None
def readProc(self, path):
sftp = self._connectFtp()
o = StringIO()
for line in sftp.open(os.path.join(self._path, path)):
o.write(line)
return o.getvalue()
def sh(self, cmd):
client = self._connect()
stdin, stdout, stderr = client.exec_command(cmd)
return {
"stdout": stdout.read().decode('utf-8'),
"stderr": stderr.read().decode('utf-8'),
"status": stdout.channel.recv_exit_status()
}
def _connect(self):
if not self._client:
client = SSHClient()
client.set_missing_host_key_policy(AutoAddPolicy())
client.connect(hostname = self._host, username=self._username, password=self._password, pkey=self._key, port=self._port, look_for_keys=False)
self._client = client
return self._client
def _connectFtp(self):
if not self._ftp:
client = self._connect()
self._ftp = client.open_sftp()
return self._ftp
def getHost(self):
return self._host
def create(args):
return Ssh(**args)
|
51941
|
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, LSTM, Embedding
from tensorflow.keras.optimizers import RMSprop
from datagen import *
# Defining the layers to be used in the model
transfer_values_input = Input(shape=(2048,), name='transfer_values_input')
decoder_transfer_map = Dense(256, activation='tanh')
decoder_input = Input(shape=(None, ), name='decoder_input')
decoder_embedding = Embedding(input_dim=5000, output_dim=128, name='decoder_embedding')
decoderlstm = LSTM(256, return_sequences=True)
decoder_dense = Dense(5000, activation='softmax', name='decoder_output')
# Function to get the output of the decoder, given output of encoder
def connect_decoder(transfer_values):
state = decoder_transfer_map(transfer_values)
initial_state = [state, state]
# Start the decoder-network with its input-layer.
net = decoder_input
net = decoder_embedding(net)
net = decoderlstm(net, initial_state=initial_state)
decoder_output1 = decoder_dense(net)
return decoder_output1
decoder_output = connect_decoder(transfer_values=transfer_values_input)
# Defining, compiling, training, saving the model
decoder_model = Model(inputs=[transfer_values_input, decoder_input], outputs=[decoder_output])
decoder_model.compile(optimizer=RMSprop(lr=1e-3), loss='sparse_categorical_crossentropy')
decoder_model.fit(generator, steps_per_epoch=1700, epochs=25)
# Enter the path of output directory where model_weights can be saved
output_dir = './'
decoder_model.save_weights(output_dir)
|
51956
|
import snap
Graph = snap.GenFull(snap.PNEANet, 10)
Src = 1
Dst = 2
EI = Graph.GetEI(Src,Dst)
EId = EI.GetId()
print(EId, Graph.GetEI(Src,Dst).GetId())
print(Graph.GetEI(Src,Dst).GetSrcNId(), Graph.GetEI(Src,Dst).GetDstNId())
print(Graph.GetEI(EId).GetSrcNId(), Graph.GetEI(EId).GetDstNId())
if EId != Graph.GetEI(Src,Dst).GetId():
print("*** error1")
if Graph.GetEI(Src,Dst).GetSrcNId() != Graph.GetEI(EId).GetSrcNId():
print("*** error2")
if Graph.GetEI(Src,Dst).GetDstNId() != Graph.GetEI(EId).GetDstNId():
print("*** error3")
|
51971
|
from typer.testing import CliRunner
from indic_transliteration.sanscript_cli import app
runner = CliRunner()
test_input = "rAmAyaNa"
expected_output = "rāmāyaṇa"
def test_argument_input():
result = runner.invoke(app, ["--from", "hk", "--to", "iast", test_input])
assert result.exit_code == 0
assert expected_output in result.stdout
def test_stdin_input():
result = runner.invoke(
app, ["--from", "hk", "--to", "iast", "--input-file", "-"], input=test_input
)
assert result.exit_code == 0
assert expected_output in result.stdout
def test_file_input(tmp_path):
test_input_file = tmp_path / "test_input_file.txt"
test_input_file.write_text(test_input)
result = runner.invoke(
app, ["--from", "hk", "--to", "iast", "--input-file", test_input_file]
)
assert result.exit_code == 0
assert expected_output in result.stdout
def test_file_output(tmp_path):
test_output_file = tmp_path / "test_file_output.txt"
result = runner.invoke(
app,
[
"--from",
"hk",
"--to",
"iast",
"--output-file",
test_output_file,
test_input,
],
)
assert result.exit_code == 0
assert f"Output written to: {test_output_file}" in result.stdout
assert test_output_file.read_text() == expected_output
|
51976
|
from mission.constants.missions import Gate, Path
from conf.vehicle import is_mainsub
HYDROPHONES_PINGER_DEPTH = 3.0
NONSURFACE_MIN_DEPTH = 0.6
# gate = Gate(
# depth=1.0,
# initial_approach_target_percent_of_screen=.45,
# gate_width_threshold=0.4,
# pre_spin_charge_dist=16 if is_mainsub else 12,
# spin_charge_dist=16 if is_mainsub else 12,
# post_spin_charge_dist=16 if is_mainsub else 12
# )
path = Path(
depth=1.0,
search_forward=6 if is_mainsub else 2,
search_stride = 10 if is_mainsub else 8,
search_right_first=True,
search_speed=0.1,
post_dist=2.5,
failure_back_up_dist=0.5 if is_mainsub else 0.1,
failure_back_up_speed=0.2 if is_mainsub else 0.1,
)
#dice = Dice(
# depth=3.3,
# max_depth=4,
# search_forward=3,
# search_stride=8,
# search_speed=0.1,
# min_dot_radius=0.03,
# ram_dist=1.0,
# ram_speed=0.1,
# rammed_back_up_timeout=20,
# lost_sight_back_up_timeout=5,
# search_default_zero_timeout=60,
#)
#
#highway = Highway(
# high_depth=1.0,
# low_depth=1.2,
# dist=6 if is_mainsub else 2,
# speed=0.4 if is_mainsub else 0.2,
#)
#
#track = Track(
# depth=1.6,
# slow_down_dist=5,
# max_speed=0.3 if is_mainsub else 0.2,
# min_speed=0.1,
# vision_frame_period=0.5,
#)
#
#roulette = Roulette(
# depth_search=1.0,
# depth_realign=2.5,
# depth_drop=3.0,
# heading_offset=30,
#)
#
#cash_in = CashIn(
# approach_funnel_depth=0.5,
# drop_approach_dist=0.2,
# # (right, left)
# drop_dvl_forward_correct_dist=(0.1, -0.13),
# drop_heading_correct=(0, -7),
# pick_up_both_depth=1.0,
# pick_up_search_depth_1=2.0,
# pick_up_search_depth_2=2.25,
# pick_up_search_depth_3=2.5,
# pick_up_start_follow_depth=3.2,
# attempt_surface_depth=-1,
# attempt_funnel_depth=0,
#)
|
51995
|
from astra import models
import redis
db = redis.StrictRedis(host='127.0.0.1', decode_responses=True)
class SiteColorModel(models.Model):
color = models.CharField()
def get_db(self):
return db
|
51998
|
from abc import ABCMeta, abstractmethod
import json
import logging
import copy
import boto3
import botocore
from botocore.exceptions import ClientError
from endgame.shared.response_message import ResponseMessage
from endgame.shared.list_resources_response import ListResourcesResponse
from endgame.shared.response_message import ResponseGetRbp
logger = logging.getLogger(__name__)
class ResourceType(object):
__meta_class__ = ABCMeta
def __init__(
self,
name: str,
resource_type: str,
service: str,
region: str,
client: boto3.Session.client,
current_account_id: str,
override_action: str = None,
include_resource_block: bool = True,
override_resource_block: str = None,
override_account_id_instead_of_principal: bool = False
):
self.name = name
self.resource_type = resource_type
self.client = client
self.current_account_id = current_account_id
self.service = service
self.region = region
self.include_resource_block = include_resource_block # Override for IAM
self.override_action = override_action # Override for IAM
self.override_resource_block = override_resource_block # Override for EFS
self.override_account_id_instead_of_principal = override_account_id_instead_of_principal # Override for logs, sns, sqs, and lambda
self.policy_document = self._get_rbp().policy_document
# Store an original copy of the policy so we can compare it later.
self.original_policy = copy.deepcopy(json.loads(json.dumps(self.policy_document.original_policy)))
def __str__(self):
return '%s' % (json.dumps(json.loads(self.policy_document.__str__())))
@abstractmethod
def _get_rbp(self) -> ResponseGetRbp:
raise NotImplementedError("Must override _get_rbp")
@property
@abstractmethod
def arn(self) -> str:
raise NotImplementedError("Must override arn")
@abstractmethod
def set_rbp(self, evil_policy: dict) -> ResponseMessage:
raise NotImplementedError("Must override set_rbp")
def add_myself(self, evil_principal: str, dry_run: bool = False) -> ResponseMessage:
"""Add your rogue principal to the AWS resource"""
logger.debug(f"Adding {evil_principal} to {self.arn}")
evil_policy = self.policy_document.policy_plus_evil_principal(
victim_account_id=self.current_account_id,
evil_principal=evil_principal,
resource_arn=self.arn
)
if not dry_run:
set_rbp_response = self.set_rbp(evil_policy=evil_policy)
operation = "ADD_MYSELF"
message = set_rbp_response.message
success = set_rbp_response.success
else:
# new_policy = evil_policy
operation = "DRY_RUN_ADD_MYSELF"
message = "DRY_RUN_ADD_MYSELF"
try:
tmp = self._get_rbp()
success = tmp.success
except botocore.exceptions.ClientError as error:
message = str(error)
success = False
response_message = ResponseMessage(message=message, operation=operation, success=success,
evil_principal=evil_principal, victim_resource_arn=self.arn,
original_policy=self.original_policy, updated_policy=evil_policy,
resource_type=self.resource_type, resource_name=self.name,
service=self.service)
return response_message
def undo(self, evil_principal: str, dry_run: bool = False) -> ResponseMessage:
"""Remove all traces"""
logger.debug(f"Removing {evil_principal} from {self.arn}")
policy_stripped = self.policy_document.policy_minus_evil_principal(
victim_account_id=self.current_account_id,
evil_principal=evil_principal,
resource_arn=self.arn
)
if not dry_run:
operation = "UNDO"
set_rbp_response = self.set_rbp(evil_policy=policy_stripped)
message = set_rbp_response.message
success = set_rbp_response.success
else:
operation = "DRY_RUN_UNDO"
message = "DRY_RUN_UNDO"
success = True
response_message = ResponseMessage(message=message, operation=operation, success=success,
evil_principal=evil_principal, victim_resource_arn=self.arn,
original_policy=self.original_policy, updated_policy=policy_stripped,
resource_type=self.resource_type, resource_name=self.name,
service=self.service)
return response_message
class ResourceTypes(object):
__meta_class__ = ABCMeta
def __init__(self, client: boto3.Session.client, current_account_id: str, region: str):
self.client = client
self.current_account_id = current_account_id
self.region = region
def __str__(self):
return '%s' % (json.dumps(self.resources.arn))
@property
@abstractmethod
def resources(self) -> [ListResourcesResponse]:
raise NotImplementedError("Must override property 'resources'")
|
52024
|
import numpy as np
from scipy.spatial.distance import pdist, squareform
import scipy.cluster.hierarchy as hy
import matplotlib.pyplot as plt
# Creating a cluster of clusters function
def clusters(number=20, cnumber=5, csize=10):
# Note that the way the clusters are positioned is Gaussian randomness.
rnum = np.random.rand(cnumber, 2)
rn = rnum[:, 0] * number
rn = rn.astype(int)
rn[np.where(rn < 5)] = 5
rn[np.where(rn > number / 2.)] = round(number / 2., 0)
ra = rnum[:, 1] * 2.9
ra[np.where(ra < 1.5)] = 1.5
cls = np.random.randn(number, 3) * csize
# Random multipliers for central point of cluster
rxyz = np.random.randn(cnumber - 1, 3)
for i in xrange(cnumber - 1):
tmp = np.random.randn(rn[i + 1], 3)
x = tmp[:, 0] + (rxyz[i, 0] * csize)
y = tmp[:, 1] + (rxyz[i, 1] * csize)
z = tmp[:, 2] + (rxyz[i, 2] * csize)
tmp = np.column_stack([x, y, z])
cls = np.vstack([cls, tmp])
return cls
# Generate a cluster of clusters and distance matrix.
cls = clusters()
D = pdist(cls[:, 0:2])
D = squareform(D)
# Compute and plot first dendrogram.
fig = plt.figure(figsize=(8, 8))
ax1 = fig.add_axes([0.09, 0.1, 0.2, 0.6])
Y1 = hy.linkage(D, method='complete')
cutoff = 0.3 * np.max(Y1[:, 2])
Z1 = hy.dendrogram(Y1, orientation='right', color_threshold=cutoff)
ax1.xaxis.set_visible(False)
ax1.yaxis.set_visible(False)
# Compute and plot second dendrogram.
ax2 = fig.add_axes([0.3, 0.71, 0.6, 0.2])
Y2 = hy.linkage(D, method='average')
cutoff = 0.3 * np.max(Y2[:, 2])
Z2 = hy.dendrogram(Y2, color_threshold=cutoff)
ax2.xaxis.set_visible(False)
ax2.yaxis.set_visible(False)
# Plot distance matrix.
ax3 = fig.add_axes([0.3, 0.1, 0.6, 0.6])
idx1 = Z1['leaves']
idx2 = Z2['leaves']
D = D[idx1, :]
D = D[:, idx2]
ax3.matshow(D, aspect='auto', origin='lower', cmap=plt.cm.YlGnBu)
ax3.xaxis.set_visible(False)
ax3.yaxis.set_visible(False)
# Plot colorbar.
fig.savefig('scipy_352_ex1.pdf', bbox='tight')
|
52030
|
from tkinter import *
# the game data for the initial game state
def init():
data.playerX = 250
data.playerY = 550
data.circleX = 250
data.circleY = 0
data.gameOver = False
# events updating the game data
def keyPressed(event):
if event.keysym == "Right" and data.playerX < 550:
data.playerX += 5
elif event.keysym == "Left" and data.playerX > 0:
data.playerX -= 5
def moveCircle():
if not data.gameOver:
data.circleY += 10
# the game data updating the game state
def timerFired():
moveCircle()
if checkCollision(data.playerX, data.playerY,
data.circleX, data.circleY,
10, 50):
data.gameOver = True
if data.circleY > 600:
data.gameOver = True
def checkCollision(x1, y1, x2, y2, r1, r2):
distance = ((x2 - x1) ** 2 + (y2 - y1) ** 2) ** 0.5
return distance <= r1 + r2
# the game state updating what is drawn
def redrawAll(canvas):
canvas.create_oval(data.playerX - 10, data.playerY - 10,
data.playerX + 10, data.playerY + 10,
fill="red")
canvas.create_oval(data.circleX - 50, data.circleY - 50,
data.circleX + 50, data.circleY + 50, \
fill="yellow")
if data.gameOver:
canvas.create_text(300, 250, text="Game Over", font=" Arial 20")
# Challenge 2.1 - Make it so that at the top of the screen it says "Score: __"
# where the __ is a number that increases every time timerFired() happens.
# Challenge 2.2 - Make it so that every third time timerFired() happens a new
# circle is added to the top of the screen with random x position, color,
# and size.
# Suggested way to do this:
# 1. Make a data.timer variable that starts at 0 and increases by 1 every
# timerFired() and a data.circles variable that starts as [].
# 2. When data.timer gets to 3, reset it to 0 and call a new function
# createNewCircle().
# 3. Write createNewCircle() to append a tuple to data.circles of the
# format:
# (xCoordinate, yCoordinate, radiusSize, colorString)
# 4. In redrawAll(), loop over the data.circles list and draw each circle.
# 5. In timerFired(), every second, move each circle's yPosition down by
# 10 pixels.
# BONUS Challenge 2.3 - Make the game better with your own ideas!
# The coding world is now yours for the exploring :)
# ***** DO NOT MODIFY BELOW HERE ***** #
# animation setup code
class Struct(object): pass
data = Struct()
def run(width=600, height=600):
def redrawAllWrapper(canvas):
canvas.delete(ALL)
redrawAll(canvas)
canvas.update()
def keyPressedWrapper(event, canvas):
keyPressed(event)
redrawAllWrapper(canvas)
def timerFiredWrapper(canvas):
timerFired()
redrawAllWrapper(canvas)
# pause, then call timerFired again
canvas.after(data.timerDelay, timerFiredWrapper, canvas)
# Set up data and call init
data.width = width
data.height = height
data.timerDelay = 200 # milliseconds
init()
# create the root and the canvas
root = Tk()
canvas = Canvas(root, width=data.width, height=data.height)
canvas.pack()
# set up events
root.bind("<Key>", lambda event:
keyPressedWrapper(event, canvas))
timerFiredWrapper(canvas)
# and launch the app
root.mainloop() # blocks until window is closed
print("bye!")
run()
|
52049
|
from perfrunner.helpers.local import restart_memcached, run_ycsb
from perfrunner.settings import PhaseSettings, TargetSettings
def ycsb_data_load(workload_settings: PhaseSettings,
target: TargetSettings,
timer: int,
instance: int):
soe_params = None
if workload_settings.recorded_load_cache_size:
restart_memcached()
soe_params = {
'insertstart': (instance + 1) * workload_settings.inserts_per_workerinstance,
'recorded_load_cache_size': workload_settings.recorded_load_cache_size,
}
phase_params = None
if workload_settings.phase:
phase_params = {
'insertstart': instance * workload_settings.inserts_per_workerinstance +
workload_settings.insertstart,
'inserts_per_workerinstance': workload_settings.inserts_per_workerinstance,
}
host = target.node
if target.cloud:
host = target.cloud['cluster_svc']
run_ycsb(host=host,
bucket=target.bucket,
password=<PASSWORD>,
action='load',
ycsb_client=workload_settings.ycsb_client,
workload=workload_settings.workload_path,
items=workload_settings.items,
workers=workload_settings.workers,
target=int(workload_settings.target),
soe_params=soe_params,
instance=instance,
epoll=workload_settings.epoll,
boost=workload_settings.boost,
persist_to=workload_settings.persist_to,
replicate_to=workload_settings.replicate_to,
fieldlength=workload_settings.field_length,
fieldcount=workload_settings.field_count,
durability=workload_settings.durability,
kv_endpoints=workload_settings.kv_endpoints,
enable_mutation_token=workload_settings.enable_mutation_token,
transactionsenabled=workload_settings.transactionsenabled,
documentsintransaction=workload_settings.documentsintransaction,
transactionreadproportion=workload_settings.transactionreadproportion,
transactionupdateproportion=workload_settings.transactionupdateproportion,
transactioninsertproportion=workload_settings.transactioninsertproportion,
requestdistribution=workload_settings.requestdistribution,
num_atrs=workload_settings.num_atrs,
ycsb_jvm_args=workload_settings.ycsb_jvm_args,
collections_map=workload_settings.collections,
timeseries=workload_settings.timeseries,
phase_params=phase_params,
cloud=target.cloud)
def ycsb_workload(workload_settings: PhaseSettings,
target: TargetSettings,
timer: int,
instance: int):
soe_params = None
if workload_settings.recorded_load_cache_size:
soe_params = {
'insertstart': (instance + 1) * workload_settings.inserts_per_workerinstance,
'recorded_load_cache_size': workload_settings.recorded_load_cache_size,
}
if workload_settings.ycsb_split_workload:
split_instance = workload_settings.workload_instances // 2
if instance < split_instance:
workload_settings.workload_path = workload_settings.workload_path.split(",")[0]
elif instance >= split_instance:
workload_settings.workload_path = workload_settings.workload_path.split(",")[1]
insert_test_params = None
if workload_settings.insert_test_flag:
insert_test_params = {
'insertstart': int(instance * workload_settings.inserts_per_workerinstance +
workload_settings.items),
'recordcount': int((instance+1) * workload_settings.inserts_per_workerinstance +
workload_settings.items),
}
host = target.node
if target.cloud:
host = target.cloud['cluster_svc']
run_ycsb(host=host,
bucket=target.bucket,
password=<PASSWORD>,
action='run',
ycsb_client=workload_settings.ycsb_client,
workload=workload_settings.workload_path,
items=workload_settings.items,
workers=workload_settings.workers,
target=int(workload_settings.target),
soe_params=soe_params,
ops=int(workload_settings.ops),
instance=instance,
epoll=workload_settings.epoll,
boost=workload_settings.boost,
persist_to=workload_settings.persist_to,
replicate_to=workload_settings.replicate_to,
execution_time=workload_settings.time,
ssl_keystore_file=workload_settings.ssl_keystore_file,
ssl_keystore_password=workload_settings.ssl_keystore_password,
ssl_mode=workload_settings.ssl_mode,
certificate_file=workload_settings.certificate_file,
timeseries=workload_settings.timeseries,
cbcollect=workload_settings.cbcollect,
fieldlength=workload_settings.field_length,
fieldcount=workload_settings.field_count,
durability=workload_settings.durability,
kv_endpoints=workload_settings.kv_endpoints,
enable_mutation_token=workload_settings.enable_mutation_token,
retry_strategy=workload_settings.retry_strategy,
retry_lower=workload_settings.retry_lower,
retry_upper=workload_settings.retry_upper,
retry_factor=workload_settings.retry_factor,
transactionsenabled=workload_settings.transactionsenabled,
documentsintransaction=workload_settings.documentsintransaction,
transactionreadproportion=workload_settings.transactionreadproportion,
transactionupdateproportion=workload_settings.transactionupdateproportion,
transactioninsertproportion=workload_settings.transactioninsertproportion,
requestdistribution=workload_settings.requestdistribution,
num_atrs=workload_settings.num_atrs,
ycsb_jvm_args=workload_settings.ycsb_jvm_args,
collections_map=workload_settings.collections,
out_of_order=workload_settings.ycsb_out_of_order,
insert_test_params=insert_test_params,
cloud=target.cloud)
|
52058
|
import pytest
import os
import toml
from tempfile import gettempdir
import neo.libs.login
from neo.libs import login
class TestLogin:
def test_check_env(self, fs):
home = os.path.expanduser("~")
fs.create_file(os.path.join(home, ".neo", "config.toml"))
assert login.check_env()
def fake_load_env_file(self):
pass
def fake_check_env(self):
return True
def dummy_config_toml(self):
config = ""
config += "[auth]\n"
config += "os_username = 'john'\n"
config += "os_password = '<PASSWORD>'\n"
config += "\n"
config += "[region.wjv]\n"
config += "os_auth_url = 'https://foo.id:443/v1'\n"
config += "os_project_id = 'g7ia30trlk'\n"
config += "os_user_domain_name = 'foo.id'\n"
config += "status = 'ACTIVE'\n"
config += "[region.jkt]\n"
config += "os_auth_url = 'https://bar.id:443/v1'\n"
config += "os_project_id = 'iqn1a69tolj'\n"
config += "os_user_domain_name = 'bar.id'\n"
config += "status = 'IDLE'\n"
config += "\n"
return toml.loads(config)
def test_get_env_values(self, monkeypatch):
monkeypatch.setattr(neo.libs.login, "load_env_file", self.dummy_config_toml)
monkeypatch.setattr(neo.libs.login, "check_env", self.fake_check_env)
assert login.get_env_values()
def fake_get_env_values(self):
env = [
{
"username": "john",
"password": "<PASSWORD>",
"region": "zone-1",
"auth_url": "https://foo.id:443/v1",
"project_id": "g7ia30trlk",
"user_domain_name": "foo.id",
"status": "ACTIVE",
},
{
"username": "john",
"password": "<PASSWORD>",
"region": "zone-2",
"auth_url": "https://bar.id:443/v1",
"project_id": "iqn1a69tolj",
"user_domain_name": "bar.id",
"status": "IDLE",
},
]
return env
def test_is_current_env(self, monkeypatch):
monkeypatch.setattr(neo.libs.login, "get_env_values", self.fake_get_env_values)
assert login.is_current_env("https://foo.id:443/v1", "foo.id", "john")
def test_is_current_env_false(self, monkeypatch):
monkeypatch.setattr(neo.libs.login, "get_env_values", self.fake_get_env_values)
assert login.is_current_env("https://bar.id:443/v1", "bar.id", "merry") is None
def fake_check_session(self):
return True
def test_do_logout(self, monkeypatch, fs):
monkeypatch.setattr(neo.libs.login, "check_session", self.fake_check_session)
home = os.path.expanduser("~")
tmp_dir = os.path.join(gettempdir(), ".neo")
fs.create_file(tmp_dir + "/session.pkl")
fs.create_file(os.path.join(home, ".neo", "config.toml"))
assert os.path.exists(tmp_dir + "/session.pkl")
assert os.path.exists(os.path.join(home, ".neo", "config.toml"))
login.do_logout()
assert os.path.exists(tmp_dir + "/session.pkl") is False
assert os.path.exists(os.path.join(home, ".neo", "config.toml")) is False
def test_check_session(self, fs):
tmp_dir = os.path.join(gettempdir(), ".neo")
fs.create_file(tmp_dir + "/session.pkl")
assert login.check_session()
|
52106
|
import tensorflow as tf
import csv
import time
from datetime import timedelta
import sys
import numpy as np
from tensorflow.python.training import training_util
from tensorflow.contrib import slim
from tensorflow.python.ops import variables as tf_variables
from ..configuration import *
from .. import trainer, evaluator, metrics
from ..task_spec import get_task_spec
from .text_classification_dataset import TextClassificationDataset
def _load_embeddings(vocabulary_size, embeddings_size,
filename_prefix='embeddings', from_dir=DIR_DATA_WORD2VEC):
embeddings = []
embeddings_file = '{}_{}_{}'.format(filename_prefix, vocabulary_size, embeddings_size)
with open(os.path.join(from_dir, embeddings_file), 'r') as file:
reader = csv.reader(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for row in reader:
embeddings.append([float(r) for r in row])
return embeddings
class TextClassificationTrainer(trainer.Trainer):
"""
Helper class to run the training and create the model for the training. See trainer.Trainer for
more details.
"""
def __init__(self, dataset, text_classification_model, log_dir=DIR_TC_LOGDIR,
use_end_sequence=False, task_spec=None, max_steps=None):
self.text_classification_model = text_classification_model
self.use_end_sequence = use_end_sequence
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
super(TextClassificationTrainer, self).__init__(log_dir=log_dir, dataset=dataset,
task_spec=task_spec, max_steps=max_steps,
monitored_training_session_config=config)
def model(self, input_text_begin, input_text_end, gene, variation, expected_labels, batch_size,
vocabulary_size=VOCABULARY_SIZE, embeddings_size=EMBEDDINGS_SIZE, output_classes=9):
# embeddings
embeddings = _load_embeddings(vocabulary_size, embeddings_size)
# global step
self.global_step = training_util.get_or_create_global_step()
# model
with slim.arg_scope(self.text_classification_model.model_arg_scope()):
outputs = self.text_classification_model.model(input_text_begin, input_text_end,
gene, variation, output_classes,
embeddings=embeddings,
batch_size=batch_size)
# loss
targets = self.text_classification_model.targets(expected_labels, output_classes)
self.loss = self.text_classification_model.loss(targets, outputs)
tf.summary.scalar('loss', self.loss)
# learning rate
self.optimizer, self.learning_rate = \
self.text_classification_model.optimize(self.loss, self.global_step)
if self.learning_rate is not None:
tf.summary.scalar('learning_rate', self.learning_rate)
# metrics
self.metrics = metrics.single_label(outputs['prediction'], targets)
# saver to save the model
self.saver = tf.train.Saver()
# check a nan value in the loss
self.loss = tf.check_numerics(self.loss, 'loss is nan')
return None
def create_graph(self, dataset_tensor, batch_size):
input_text_begin, input_text_end, gene, variation, expected_labels = dataset_tensor
if not self.use_end_sequence:
input_text_end = None
return self.model(input_text_begin, input_text_end, gene, variation, expected_labels, batch_size)
def step(self, session, graph_data):
lr, _, loss, step, metrics = \
session.run([self.learning_rate, self.optimizer, self.loss, self.global_step,
self.metrics])
if self.is_chief and time.time() > self.print_timestamp + 5 * 60:
self.print_timestamp = time.time()
elapsed_time = str(timedelta(seconds=time.time() - self.init_time))
m = 'step: {} loss: {:0.4f} learning_rate = {:0.6f} elapsed seconds: {} ' \
'precision: {} recall: {} accuracy: {}'
logging.info(m.format(step, loss, lr, elapsed_time,
metrics['precision'], metrics['recall'], metrics['accuracy']))
def after_create_session(self, session, coord):
self.init_time = time.time()
self.print_timestamp = time.time()
class TextClassificationTest(evaluator.Evaluator):
"""Evaluator for distributed training"""
def __init__(self, dataset, text_classification_model, output_path, log_dir=DIR_TC_LOGDIR,
use_end_sequence=False,max_steps=None):
self.use_end_sequence = use_end_sequence
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
super(TextClassificationTest, self).__init__(checkpoints_dir=log_dir, dataset=dataset,
output_path=output_path, max_steps=max_steps,
singular_monitored_session_config=config)
self.text_classification_model = text_classification_model
self.eval_writer = tf.summary.FileWriter(log_dir)
def model(self, input_text_begin, input_text_end, gene, variation, expected_labels, batch_size,
vocabulary_size=VOCABULARY_SIZE, embeddings_size=EMBEDDINGS_SIZE, output_classes=9):
# embeddings
embeddings = _load_embeddings(vocabulary_size, embeddings_size)
# model
with slim.arg_scope(self.text_classification_model.model_arg_scope()):
outputs = self.text_classification_model.model(input_text_begin, input_text_end,
gene, variation, output_classes,
embeddings=embeddings,
batch_size=batch_size,
training=False)
# loss
targets = self.text_classification_model.targets(expected_labels, output_classes)
loss = self.text_classification_model.loss(targets, outputs)
self.accumulated_loss = tf.Variable(0.0, dtype=tf.float32, name='accumulated_loss',
trainable=False)
self.accumulated_loss = tf.assign_add(self.accumulated_loss, loss)
step = tf.Variable(0, dtype=tf.int32, name='eval_step', trainable=False)
step_increase = tf.assign_add(step, 1)
self.loss = self.accumulated_loss / tf.cast(step_increase, dtype=tf.float32)
tf.summary.scalar('loss', self.loss)
# metrics
self.metrics = metrics.single_label(outputs['prediction'], targets, moving_average=False)
return None
def create_graph(self, dataset_tensor, batch_size):
input_text_begin, input_text_end, gene, variation, expected_labels = dataset_tensor
if not self.use_end_sequence:
input_text_end = None
graph_data = self.model(input_text_begin, input_text_end, gene, variation,
expected_labels, batch_size)
return graph_data
def step(self, session, graph_data, summary_op):
summary, self.loss_result, self.metrics_results = \
session.run([summary_op, self.loss, self.metrics])
return summary
def end(self, session):
super(TextClassificationTest, self).end(session)
chk_step = int(self.lastest_checkpoint.split('-')[-1])
m = 'step: {} loss: {:0.4f} precision: {} recall: {} accuracy: {}'
logging.info(m.format(chk_step, self.loss_result, self.metrics_results['precision'],
self.metrics_results['recall'], self.metrics_results['accuracy']))
def after_create_session(self, session, coord):
# checkpoints_file = os.path.join(self.checkpoints_dir, 'checkpoint')
# alt_checkpoints_dir = '{}_tp'.format(self.checkpoints_dir)
# import glob
# files = glob.glob('{}/model.ckpt-*.data-*'.format(alt_checkpoints_dir))
# chk_step = 0
# for f in files:
# num = f.split('model.ckpt-')[1].split('.')[0]
# num = int(num)
# if chk_step == 0 or num < chk_step:
# chk_step = num
# if chk_step != 0:
# ckpt_files = glob.glob('{}/model.ckpt-{}.data-*'.format(alt_checkpoints_dir, chk_step))
# ckpt_files = [x.split('/')[-1] for x in ckpt_files]
# for f in ckpt_files + ['model.ckpt-{}.index', 'model.ckpt-{}.meta']:
# f = f.format(chk_step)
# os.rename(os.path.join(alt_checkpoints_dir, f), os.path.join(self.checkpoints_dir, f))
# with open(checkpoints_file, 'wb') as f:
# f.write('model_checkpoint_path: "./model.ckpt-{}"\n'.format(chk_step))
# f.write('all_model_checkpoint_paths: "./model.ckpt-{}"\n'.format(chk_step))
super(TextClassificationTest, self).after_create_session(session, coord)
# with open(checkpoints_file, 'wb') as f:
# f.write('model_checkpoint_path: "./model.ckpt-"\n')
# f.write('all_model_checkpoint_paths: "./model.ckpt-"\n')
class TextClassificationEval(evaluator.Evaluator):
"""Evaluator for text classification"""
def __init__(self, dataset, text_classification_model, output_path, log_dir=DIR_TC_LOGDIR,
use_end_sequence=False):
self.use_end_sequence = use_end_sequence
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
super(TextClassificationEval, self).__init__(checkpoints_dir=log_dir,
output_path=output_path,
infinite_loop=False,
singular_monitored_session_config=config)
self.dataset = dataset
self.text_classification_model = text_classification_model
def model(self, input_text_begin, input_text_end, gene, variation, batch_size,
vocabulary_size=VOCABULARY_SIZE, embeddings_size=EMBEDDINGS_SIZE, output_classes=9):
# embeddings
embeddings = _load_embeddings(vocabulary_size, embeddings_size)
# global step
self.global_step = training_util.get_or_create_global_step()
self.global_step = tf.assign_add(self.global_step, 1)
# model
with tf.control_dependencies([self.global_step]):
with slim.arg_scope(self.text_classification_model.model_arg_scope()):
self.outputs = self.text_classification_model.model(input_text_begin, input_text_end,
gene, variation, output_classes,
embeddings=embeddings,
batch_size=batch_size,
training=False)
# restore only the trainable variables
self.saver = tf.train.Saver(var_list=tf_variables.trainable_variables())
return self.outputs
def create_graph(self, dataset_tensor, batch_size):
input_text_begin, input_text_end, gene, variation = dataset_tensor
if not self.use_end_sequence:
input_text_end = None
return self.model(input_text_begin, input_text_end, gene, variation, batch_size)
def after_create_session(self, session, coord):
super(TextClassificationEval, self).after_create_session(session, coord)
print('ID,class1,class2,class3,class4,class5,class6,class7,class8,class9')
def step(self, session, graph_data, summary_op):
step, predictions = session.run([self.global_step, self.outputs['prediction']])
predictions = predictions[0]
predictions = [p + 0.01 for p in predictions] # penalize less the mistakes
sum = np.sum(predictions)
predictions = [p / sum for p in predictions]
print('{},{}'.format(step, ','.join(['{:.3f}'.format(x) for x in predictions])))
return None
import logging
def main(model, name, sentence_split=False, end_sequence=USE_END_SEQUENCE, batch_size=TC_BATCH_SIZE):
"""
Main method to execute the text_classification models
:param ModelSimple model: object model based on ModelSimple
:param str name: name of the model
:param bool sentence_split: whether to split the dataset in sentneces or not,
only used for hatt model
:param bool end_sequence: whether to use or not the end of the sequences in the models
:param int batch_size: batch size of the models
"""
logging.getLogger().setLevel(logging.INFO)
log_dir = '{}_{}'.format(DIR_TC_LOGDIR, name)
if len(sys.argv) > 1 and sys.argv[1] == 'test':
# execute the test with the train dataset
dataset = TextClassificationDataset(type='train', sentence_split=sentence_split)
tester = TextClassificationTest(dataset=dataset, text_classification_model=model,
log_dir=log_dir,
output_path=os.path.join(log_dir, 'test_trainset'),
use_end_sequence=end_sequence)
tester.run()
elif len(sys.argv) > 1 and sys.argv[1] == 'validate':
dataset = TextClassificationDataset(type='val', sentence_split=sentence_split)
tester = TextClassificationTest(dataset=dataset, text_classification_model=model,
log_dir=log_dir,
output_path=os.path.join(log_dir, 'validate'),
use_end_sequence=end_sequence)
tester.run()
elif len(sys.argv) > 1 and sys.argv[1] == 'eval':
# evaluate the data of the test dataset. We submit this output to kaggle
dataset = TextClassificationDataset(type='test', sentence_split=sentence_split)
evaluator = TextClassificationEval(dataset=dataset, text_classification_model=model,
log_dir=log_dir,
output_path=os.path.join(log_dir, 'test'),
use_end_sequence=end_sequence)
evaluator.run()
elif len(sys.argv) > 1 and sys.argv[1] == 'eval_stage2':
# evaluate the data of the test dataset. We submit this output to kaggle
dataset = TextClassificationDataset(type='stage2_test', sentence_split=sentence_split)
evaluator = TextClassificationEval(dataset=dataset, text_classification_model=model,
log_dir=log_dir,
output_path=os.path.join(log_dir, 'test_stage2'),
use_end_sequence=end_sequence)
evaluator.run()
else:
# training
task_spec = get_task_spec(with_evaluator=USE_LAST_WORKER_FOR_VALIDATION)
if task_spec.join_if_ps():
# join if it is a parameters server and do nothing else
return
with(tf.gfile.Open(os.path.join(DIR_DATA_TEXT_CLASSIFICATION, 'train_set'))) as f:
max_steps = int(TC_EPOCHS * len(f.readlines()) / batch_size)
if task_spec.is_evaluator():
dataset = TextClassificationDataset(type='val', sentence_split=sentence_split)
# evaluator running in the last worker
tester = TextClassificationTest(dataset=dataset, text_classification_model=model,
log_dir=log_dir,
output_path=os.path.join(log_dir, 'val'),
use_end_sequence=end_sequence,
max_steps=max_steps)
tester.run()
else:
dataset = TextClassificationDataset(type='train', sentence_split=sentence_split)
trainer = TextClassificationTrainer(dataset=dataset, text_classification_model=model,
log_dir=log_dir, use_end_sequence=end_sequence,
task_spec=task_spec, max_steps=max_steps)
trainer.run(epochs=TC_EPOCHS, batch_size=batch_size)
|
52108
|
import click
from retrieval.elastic_retriever import ElasticRetriever
import os
@click.command()
@click.option('--sections-parquet', type=str, help='', default='')
@click.option('--documents-parquet', type=str, help='', default='')
@click.option('--tables-parquet', type=str, help='', default='')
@click.option('--figures-parquet', type=str, help='', default='')
@click.option('--equations-parquet', type=str, help='', default='')
@click.option('--entities-parquet', type=str, help='', default='')
@click.option('--aws-host', type=str, help='', default='')
@click.option('--host', type=str, help='', default='localhost')
def run(sections_parquet, documents_parquet, tables_parquet, figures_parquet, equations_parquet, entities_parquet, aws_host, host):
if aws_host != '':
auth = AWS4Auth(os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY'), os.environ.get('AWS_DEFAULT_REGION'), 'es', session_token=os.environ.get('AWS_SESSION_TOKEN'))
ret = ElasticRetriever(hosts=[{'host':aws_host, 'port':443}], awsauth=auth)
else:
ret = ElasticRetriever(hosts=[host])
print('Connected to retriever, building indices')
ret.build_index(documents_parquet, entities_parquet, sections_parquet, tables_parquet, figures_parquet, equations_parquet)
print('Done building index')
if __name__ == '__main__':
run()
|
52113
|
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import tqdm
from data import dataset as dset
import torchvision.models as tmodels
import tqdm
from models import models
import os
import itertools
import glob
from utils import utils
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='mitstates', help='mitstates|zappos')
parser.add_argument('--data_dir', default='data/mit-states/', help='data root dir')
parser.add_argument('--cv_dir', default='cv/tmp/', help='dir to save checkpoints to')
parser.add_argument('--load', default=None, help='path to checkpoint to load from')
# model parameters
parser.add_argument('--model', default='visprodNN', help='visprodNN|redwine|labelembed+|attributeop')
parser.add_argument('--emb_dim', type=int, default=300, help='dimension of common embedding space')
parser.add_argument('--nlayers', type=int, default=2, help='number of layers for labelembed+')
parser.add_argument('--glove_init', action='store_true', default=False, help='initialize inputs with word vectors')
parser.add_argument('--clf_init', action='store_true', default=False, help='initialize inputs with SVM weights')
parser.add_argument('--static_inp', action='store_true', default=False, help='do not optimize input representations')
# regularizers
parser.add_argument('--lambda_aux', type=float, default=0.0)
parser.add_argument('--lambda_inv', type=float, default=0.0)
parser.add_argument('--lambda_comm', type=float, default=0.0)
parser.add_argument('--lambda_ant', type=float, default=0.0)
# optimization
parser.add_argument('--batch_size', type=int, default=512)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--wd', type=float, default=5e-5)
parser.add_argument('--save_every', type=int, default=100)
parser.add_argument('--eval_val_every', type=int, default=20)
parser.add_argument('--max_epochs', type=int, default=1000)
args = parser.parse_args()
def test(epoch):
model.eval()
accuracies = []
for idx, data in tqdm.tqdm(enumerate(testloader), total=len(testloader)):
data = [d.cuda() for d in data]
_, predictions = model(data)
attr_truth, obj_truth = data[1], data[2]
results = evaluator.score_model(predictions, obj_truth)
match_stats = evaluator.evaluate_predictions(results, attr_truth, obj_truth)
accuracies.append(match_stats)
accuracies = zip(*accuracies)
accuracies = map(torch.mean, map(torch.cat, accuracies))
attr_acc, obj_acc, closed_acc, open_acc, objoracle_acc = accuracies
print ('(test) E: %d | A: %.3f | O: %.3f | Cl: %.3f | Op: %.4f | OrO: %.4f'%(epoch, attr_acc, obj_acc, closed_acc, open_acc, objoracle_acc))
#----------------------------------------------------------------#
testset = dset.CompositionDatasetActivations(root=args.data_dir, phase='test', split='compositional-split')
testloader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=2)
if args.model == 'visprodNN':
model = models.VisualProductNN(testset, args)
elif args.model == 'redwine':
model = models.RedWine(testset, args)
elif args.model =='labelembed+':
model = models.LabelEmbedPlus(testset, args)
elif args.model =='attributeop':
model = models.AttributeOperator(testset, args)
model.cuda()
evaluator = models.Evaluator(testset, model)
checkpoint = torch.load(args.load)
model.load_state_dict(checkpoint['net'])
start_epoch = checkpoint['epoch']
print ('loaded model from', os.path.basename(args.load))
with torch.no_grad():
test(0)
|
52215
|
import numpy.testing as test
import numpy as np
from unittest import TestCase
from PyFVCOM.ocean import *
class OceanToolsTest(TestCase):
def setUp(self):
""" Make a set of data for the various ocean tools functions """
self.lat = 30
self.z = np.array(9712.02)
self.t = np.array(40)
self.s = np.array(40)
self.p = np.array(10000)
self.pr = np.array(0)
self.c = np.array(1.888091)
self.td = np.array(20) # for dens_jackett
self.sd = np.array(20) # for dens_jackett
self.pd = np.array(1000) # for dens_jackett
self.cond = np.array(53000) # for cond2salt
self.h = np.array((10, 20, 30, 100)) # depths for stokes
self.U = 0.25 # U for stokes and dissipation
self.omega = 1 / 44714.1647021416 # omega for stokes
self.z0 = np.array((0.0025)) # z0 for stokes
self.rho = 1025
self.temp = np.arange(-20, 50, 10)
self.dew = np.linspace(0, 20, len(self.temp))
# Use some of the Fofonoff and Millard (1983) checks.
def test_sw_svan(self):
""" Specific volume anomaly """
test_svan = 9.8130210e-6
res_svan = sw_svan(self.t, self.s, self.p)
test.assert_almost_equal(res_svan, test_svan, decimal=1)
def test_res_z(self):
""" Pressure to depth """
test_z = 9712.02
res_z = pressure2depth(self.p, self.lat)
# Hmmm, not very accurate!
test.assert_almost_equal(res_z, test_z, decimal=-1)
# The return to depth is a bit inaccurate, not sure why.
def test_depth2pressure(self):
""" Depth to pressure """
test_p = 9712.653
res_pres = depth2pressure(self.z, self.lat)
# Hmmm, horribly inaccurate!
test.assert_almost_equal(res_pres, test_p, decimal=-4)
def test_cp_sw(self):
""" Specific heat of seawater """
test_cp = 3849.5
res_cp = cp_sw(self.t, self.s, self.p)
test.assert_almost_equal(res_cp, test_cp, decimal=1)
def test_dT_adiab_sw(self):
""" Adiabatic temperature gradient """
test_atg = 0.0003255976
res_atg = dT_adiab_sw(self.t, self.s, self.p)
test.assert_almost_equal(res_atg, test_atg, decimal=6)
def test_theta_sw(self):
""" Potential temperature for sea water """
test_theta = 36.89073
res_theta = theta_sw(self.t, self.s, self.p, self.pr)
test.assert_almost_equal(res_theta, test_theta, decimal=2)
def test_sw_sal78(self):
""" Salinity from conductivity, temperature and pressure (sw_sal78) """
test_salinity = 40
res_sal78 = sw_sal78(self.c, self.t, self.p)
test.assert_almost_equal(res_sal78, test_salinity, decimal=5)
def test_dens_jackett(self):
""" Density from temperature, salinity and pressure """
test_dens = 1017.728868019642
res_dens = dens_jackett(self.td, self.sd, self.pd)
test.assert_equal(res_dens, test_dens)
def test_cond2salt(self):
""" Conductivity to salinity """
test_salt = 34.935173507811783
res_salt = cond2salt(self.cond)
test.assert_equal(res_salt, test_salt)
# def test_stokes(self):
# """ Stokes number """
# test_stokes, test_u_star, test_delta = np.nan, np.nan, np.nan
# res_stokes, res_u_star, res_delta = stokes(self.h, self.U, self.omega, self.z0, U_star=True, delta=True)
# test.assert_equal(res_stokes, test_stokes)
# test.assert_equal(res_u_star, test_u_star)
# test.assert_equal(res_delta, test_delta)
def test_dissipation(self):
""" Tidal dissipation for a given tidal harmonic """
test_dissipation = 0.0400390625
res_dissipation = dissipation(self.rho, self.U)
test.assert_equal(res_dissipation, test_dissipation)
def test_rhum(self):
""" Relative humidity from dew temperature and air temperature """
test_rhum = np.array((487.36529085, 270.83391406, 160.16590946, 100.0, 65.47545095, 44.70251971, 31.67003471))
res_rhum = rhum(self.dew, self.temp)
test.assert_almost_equal(res_rhum, test_rhum)
|
52237
|
from __future__ import print_function
from src import cli
from os import environ as ENV
PROFILE=False
if PROFILE:
print("PROFILING")
import cProfile
cProfile.run("cli.main()", "restats")
import pstats
p = pstats.Stats('restats')
p.strip_dirs().sort_stats('cumulative').print_stats(50)
else:
cli.main()
|
52244
|
import math
import numpy as np
class Strategy:
"""Options strategy class.
Takes in a number of `StrategyLeg`'s (option contracts), and filters that determine
entry and exit conditions.
"""
def __init__(self, schema):
self.schema = schema
self.legs = []
self.conditions = []
self.exit_thresholds = (math.inf, math.inf)
def add_leg(self, leg):
"""Adds leg to the strategy"""
assert self.schema == leg.schema
leg.name = "leg_{}".format(len(self.legs) + 1)
self.legs.append(leg)
return self
def add_legs(self, legs):
"""Adds legs to the strategy"""
for leg in legs:
self.add_leg(leg)
return self
def remove_leg(self, leg_number):
"""Removes leg from the strategy"""
self.legs.pop(leg_number)
return self
def clear_legs(self):
"""Removes *all* legs from the strategy"""
self.legs = []
return self
def add_exit_thresholds(self, profit_pct=math.inf, loss_pct=math.inf):
"""Adds maximum profit/loss thresholds. Both **must** be >= 0.0
Args:
profit_pct (float, optional): Max profit level. Defaults to math.inf
loss_pct (float, optional): Max loss level. Defaults to math.inf
"""
assert profit_pct >= 0
assert loss_pct >= 0
self.exit_thresholds = (profit_pct, loss_pct)
def filter_thresholds(self, entry_cost, current_cost):
"""Returns a `pd.Series` of booleans indicating where profit (loss) levels
exceed the given thresholds.
Args:
entry_cost (pd.Series): Total _entry_ cost of inventory row.
current_cost (pd.Series): Present cost of inventory row.
Returns:
pd.Series: Indicator series with `True` for every row that
exceeds the specified profit/loss thresholds.
"""
profit_pct, loss_pct = self.exit_thresholds
excess_return = (current_cost / entry_cost + 1) * -np.sign(entry_cost)
return (excess_return >= profit_pct) | (excess_return <= -loss_pct)
def __repr__(self):
return "Strategy(legs={}, exit_thresholds={})".format(self.legs, self.exit_thresholds)
|
52262
|
import os
EXAMPLE_SVG_PATH = os.path.join(os.path.dirname(__file__), 'example.svg')
IDS_IN_EXAMPLE_SVG = {'red', 'yellow', 'blue', 'green'}
IDS_IN_EXAMPLE2_SVG = {'punainen', 'keltainen', 'sininen', 'vihrea'}
with open(EXAMPLE_SVG_PATH, 'rb') as infp:
EXAMPLE_SVG_DATA = infp.read()
EXAMPLE2_SVG_DATA = (
EXAMPLE_SVG_DATA
.replace(b'"red"', b'"punainen"')
.replace(b'"green"', b'"vihrea"')
.replace(b'"blue"', b'"sininen"')
.replace(b'"yellow"', b'"keltainen"')
)
|
52305
|
import bleach
import bleach_whitelist
from django.conf import settings
from rest_framework.pagination import PageNumberPagination
def sanitize(string):
# bleach doesn't handle None so let's not pass it
if string and getattr(settings, "RESPONSE_SANITIZE_USER_INPUT", True):
return bleach.clean(
string,
tags=bleach_whitelist.markdown_tags,
attributes=bleach_whitelist.markdown_attrs,
styles=bleach_whitelist.all_styles,
)
return string
class LargeResultsSetPagination(PageNumberPagination):
page_size = 500
max_page_size = 1000
page_size_query_param = "page_size"
|
52306
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.lost_letter import lost_letter
def test_lost_letter():
"""Test module lost_letter.py by downloading
lost_letter.csv and testing shape of
extracted data has 140 rows and 8 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = lost_letter(test_path)
try:
assert x_train.shape == (140, 8)
except:
shutil.rmtree(test_path)
raise()
|
52316
|
import os
from aztk.models.plugins.plugin_configuration import PluginConfiguration, PluginPort, PluginTargetRole
from aztk.models.plugins.plugin_file import PluginFile
dir_path = os.path.dirname(os.path.realpath(__file__))
class SparkUIProxyPlugin(PluginConfiguration):
def __init__(self):
super().__init__(
name="spark_ui_proxy",
ports=[PluginPort(internal=9999, public=True)],
target_role=PluginTargetRole.Master,
execute="spark_ui_proxy.sh",
args=["localhost:8080", "9999"],
files=[
PluginFile("spark_ui_proxy.sh", os.path.join(dir_path, "spark_ui_proxy.sh")),
PluginFile("spark_ui_proxy.py", os.path.join(dir_path, "spark_ui_proxy.py")),
],
)
|
52328
|
import torch
import random
import torch.nn as nn
from abc import abstractmethod
from abc import ABCMeta
from torch import Tensor
from typing import Any
from typing import Dict
from typing import List
from typing import Tuple
from typing import Optional
from .losses import GANLoss
from .losses import GANTarget
from .discriminators import DiscriminatorBase
from ..protocol import GaussianGeneratorMixin
from ....data import CVLoader
from ....types import tensor_dict_type
from ....protocol import StepOutputs
from ....protocol import TrainerState
from ....protocol import MetricsOutputs
from ....protocol import ModelWithCustomSteps
from ....constants import LOSS_KEY
from ....constants import INPUT_KEY
from ....constants import LABEL_KEY
from ....constants import PREDICTIONS_KEY
from ....misc.toolkit import to_device
from ....misc.toolkit import mode_context
from ....misc.toolkit import toggle_optimizer
class GANMixin(ModelWithCustomSteps, GaussianGeneratorMixin, metaclass=ABCMeta):
def __init__(
self,
*,
num_classes: Optional[int] = None,
gan_mode: str = "vanilla",
gan_loss_config: Optional[Dict[str, Any]] = None,
):
super().__init__()
self.num_classes = num_classes
self.gan_mode = gan_mode
self.gan_loss = GANLoss(gan_mode)
if gan_loss_config is None:
gan_loss_config = {}
self.lambda_gp = gan_loss_config.get("lambda_gp", 10.0)
@property
@abstractmethod
def g_parameters(self) -> List[nn.Parameter]:
pass
@property
@abstractmethod
def d_parameters(self) -> List[nn.Parameter]:
pass
@abstractmethod
def _g_losses(
self,
batch: tensor_dict_type,
forward_kwargs: Dict[str, Any],
) -> Tuple[tensor_dict_type, tensor_dict_type, Optional[Tensor]]:
# g_losses, sampled, labels
pass
@abstractmethod
def _d_losses(
self,
batch: tensor_dict_type,
sampled: tensor_dict_type,
labels: Optional[Tensor],
) -> tensor_dict_type:
# d_losses
pass
# utilities
@property
def can_reconstruct(self) -> bool:
return False
def forward(
self,
batch_idx: int,
batch: tensor_dict_type,
state: Optional[TrainerState] = None,
**kwargs: Any,
) -> tensor_dict_type:
z = torch.randn(len(batch[INPUT_KEY]), self.latent_dim, device=self.device)
return {PREDICTIONS_KEY: self.decode(z, labels=batch[LABEL_KEY], **kwargs)}
def summary_forward(self, batch_idx: int, batch: tensor_dict_type) -> None:
self._g_losses(batch, {})
class OneStageGANMixin(GANMixin, metaclass=ABCMeta):
def train_step(
self,
batch_idx: int,
batch: tensor_dict_type,
trainer: Any,
forward_kwargs: Dict[str, Any],
loss_kwargs: Dict[str, Any],
) -> StepOutputs:
opt_g = trainer.optimizers["g_parameters"]
opt_d = trainer.optimizers["d_parameters"]
# generator step
toggle_optimizer(self, opt_g)
with torch.cuda.amp.autocast(enabled=trainer.use_amp):
g_losses, sampled, labels = self._g_losses(batch, forward_kwargs)
g_loss = g_losses.pop(LOSS_KEY)
trainer.grad_scaler.scale(g_loss).backward()
if trainer.clip_norm > 0.0:
trainer._clip_norm_step()
trainer.grad_scaler.step(opt_g)
trainer.grad_scaler.update()
opt_g.zero_grad()
# discriminator step
toggle_optimizer(self, opt_d)
with torch.no_grad():
sampled = {k: v.detach().clone() for k, v in sampled.items()}
with torch.cuda.amp.autocast(enabled=trainer.use_amp):
d_losses = self._d_losses(batch, sampled, labels)
d_loss = d_losses.pop(LOSS_KEY)
trainer.grad_scaler.scale(d_loss).backward()
if trainer.clip_norm > 0.0:
trainer._clip_norm_step()
trainer.grad_scaler.step(opt_d)
trainer.grad_scaler.update()
opt_d.zero_grad()
# finalize
trainer._scheduler_step()
forward_results = {PREDICTIONS_KEY: sampled}
loss_dict = {"g": g_loss.item(), "d": d_loss.item()}
loss_dict.update({k: v.item() for k, v in g_losses.items()})
loss_dict.update({k: v.item() for k, v in d_losses.items()})
return StepOutputs(forward_results, loss_dict)
def evaluate_step( # type: ignore
self,
loader: CVLoader,
portion: float,
trainer: Any,
) -> MetricsOutputs:
loss_items: Dict[str, List[float]] = {}
for i, batch in enumerate(loader):
if i / len(loader) >= portion:
break
batch = to_device(batch, self.device)
g_losses, sampled, labels = self._g_losses(batch, {})
d_losses = self._d_losses(batch, sampled, labels)
g_loss = g_losses.pop(LOSS_KEY)
d_loss = d_losses.pop(LOSS_KEY)
loss_dict = {"g": g_loss.item(), "d": d_loss.item()}
loss_dict.update({k: v.item() for k, v in g_losses.items()})
loss_dict.update({k: v.item() for k, v in d_losses.items()})
for k, v in loss_dict.items():
loss_items.setdefault(k, []).append(v)
# gather
mean_loss_items = {k: sum(v) / len(v) for k, v in loss_items.items()}
mean_loss_items[LOSS_KEY] = sum(mean_loss_items.values())
score = trainer._weighted_loss_score(mean_loss_items)
return MetricsOutputs(score, mean_loss_items)
class VanillaGANMixin(OneStageGANMixin, metaclass=ABCMeta):
def __init__(
self,
in_channels: int,
*,
discriminator: str = "basic",
discriminator_config: Optional[Dict[str, Any]] = None,
num_classes: Optional[int] = None,
gan_mode: str = "vanilla",
gan_loss_config: Optional[Dict[str, Any]] = None,
):
super().__init__(
num_classes=num_classes,
gan_mode=gan_mode,
gan_loss_config=gan_loss_config,
)
if discriminator_config is None:
discriminator_config = {}
discriminator_config["in_channels"] = in_channels
discriminator_config["num_classes"] = num_classes
self.discriminator = DiscriminatorBase.make(
discriminator,
config=discriminator_config,
)
@property
def d_parameters(self) -> List[nn.Parameter]:
return list(self.discriminator.parameters())
def _g_losses(
self,
batch: tensor_dict_type,
forward_kwargs: Dict[str, Any],
) -> Tuple[tensor_dict_type, tensor_dict_type, Optional[Tensor]]:
labels = batch.get(LABEL_KEY)
if labels is not None:
labels = labels.view(-1)
sampled = self.sample(len(batch[INPUT_KEY]), labels=labels, **forward_kwargs)
pred_fake = self.discriminator(sampled)
loss_g = self.gan_loss(pred_fake, GANTarget(True, labels))
return {LOSS_KEY: loss_g}, {"sampled": sampled}, labels
def _d_losses(
self,
batch: tensor_dict_type,
sampled: tensor_dict_type,
labels: Optional[Tensor],
) -> tensor_dict_type:
net = batch[INPUT_KEY]
sampled_tensor = sampled["sampled"]
pred_real = self.discriminator(net)
loss_d_real = self.gan_loss(pred_real, GANTarget(True, labels))
pred_fake = self.discriminator(sampled_tensor)
loss_d_fake = self.gan_loss(pred_fake, GANTarget(False, labels))
d_loss = 0.5 * (loss_d_fake + loss_d_real)
losses = {"d_fake": loss_d_fake, "d_real": loss_d_real}
if self.gan_mode == "wgangp":
eps = random.random()
merged = eps * net + (1.0 - eps) * sampled_tensor
with mode_context(self.discriminator, to_train=None, use_grad=True):
pred_merged = self.discriminator(merged.requires_grad_(True)).output # type: ignore
loss_gp = self.gan_loss.loss(merged, pred_merged)
d_loss = d_loss + self.lambda_gp * loss_gp
losses["d_gp"] = loss_gp
losses[LOSS_KEY] = d_loss
return losses
__all__ = [
"GANMixin",
"OneStageGANMixin",
"VanillaGANMixin",
]
|
52330
|
import os
from dynaconf import Dynaconf # type: ignore
current_directory = os.path.dirname(os.path.realpath(__file__))
settings = Dynaconf(
envvar_prefix="PROMED",
settings_files=[
f"{current_directory}/settings.toml",
],
)
settings["DEBUG"] = True if settings.LOG_LEVEL == "DEBUG" else False
s = settings
|
52358
|
import timeit
import os
def timeFunction(function, setup):
print 'timing', function
t = timeit.Timer(stmt=function, setup=setup)
times = []
for i in range(0,3):
os.system('sudo sh -c "sync; echo 3 > /proc/sys/vm/drop_caches"')
times.append(str(t.timeit(number=1)))
return min(times)
ints1 = timeFunction('blazeopt.loadtxt("ints1", dtype="u4,u4,u4,u4,u4", delimiter=",")', 'import blazeopt')
ints2 = timeFunction('blazeopt.loadtxt("ints2", dtype="u4,u4,u4,u4,u4", delimiter=",")', 'import blazeopt')
ints3 = timeFunction('blazeopt.loadtxt("ints3", dtype="u4,u4,u4,u4,u4", delimiter=",")', 'import blazeopt')
print ints1, ints2, ints3
floats1 = timeFunction('blazeopt.loadtxt("floats1", dtype="f8,f8,f8,f8,f8", delimiter=",")', 'import blazeopt')
floats2 = timeFunction('blazeopt.loadtxt("floats2", dtype="f8,f8,f8,f8,f8", delimiter=",")', 'import blazeopt')
floats3 = timeFunction('blazeopt.loadtxt("floats3", dtype="f8,f8,f8,f8,f8", delimiter=",")', 'import blazeopt')
print floats1, floats2, floats3
ints1 = timeFunction('blazeopt.genfromtxt("ints1", dtype="u4,u4,u4,u4,u4", delimiter=",")', 'import blazeopt')
ints2 = timeFunction('blazeopt.genfromtxt("ints2", dtype="u4,u4,u4,u4,u4", delimiter=",")', 'import blazeopt')
ints3 = timeFunction('blazeopt.genfromtxt("ints3", dtype="u4,u4,u4,u4,u4", delimiter=",")', 'import blazeopt')
print ints1, ints2, ints3
floats1 = timeFunction('blazeopt.genfromtxt("floats1", dtype="f8,f8,f8,f8,f8", delimiter=",")', 'import blazeopt')
floats2 = timeFunction('blazeopt.genfromtxt("floats2", dtype="f8,f8,f8,f8,f8", delimiter=",")', 'import blazeopt')
floats3 = timeFunction('blazeopt.genfromtxt("floats3", dtype="f8,f8,f8,f8,f8", delimiter=",")', 'import blazeopt')
print floats1, floats2, floats3
missingValues1 = timeFunction('blazeopt.genfromtxt("missingvalues1", dtype="u4,u4,u4,u4,u4", delimiter=",", missing_values={0:["NA","NaN"], 1:["xx","inf"]}, filling_values="999")', 'import blazeopt')
missingValues2 = timeFunction('blazeopt.genfromtxt("missingvalues2", dtype="u4,u4,u4,u4,u4", delimiter=",", missing_values={0:["NA","NaN"], 1:["xx","inf"]}, filling_values="999")', 'import blazeopt')
missingValues3 = timeFunction('blazeopt.genfromtxt("missingvalues3", dtype="u4,u4,u4,u4,u4", delimiter=",", missing_values={0:["NA","NaN"], 1:["xx","inf"]}, filling_values="999")', 'import blazeopt')
print missingValues1, missingValues2, missingValues3
fixedwidth1 = timeFunction('blazeopt.genfromtxt("fixedwidth1", dtype="u4,u4,u4,u4,u4", delimiter=[2,3,4,5,6])', 'import blazeopt')
fixedwidth2 = timeFunction('blazeopt.genfromtxt("fixedwidth2", dtype="u4,u4,u4,u4,u4", delimiter=[2,3,4,5,6])', 'import blazeopt')
fixedwidth3 = timeFunction('blazeopt.genfromtxt("fixedwidth3", dtype="u4,u4,u4,u4,u4", delimiter=[2,3,4,5,6])', 'import blazeopt')
print fixedwidth1, fixedwidth2, fixedwidth3
|
52381
|
import os
from shutil import SameFileError, copyfile
from urllib.request import Request, urlopen
import markdown
from bs4 import BeautifulSoup as BS
from blogger_cli.converter.extractor import (
extract_and_write_static,
extract_main_and_meta_from_md,
get_summary_limit,
extract_topic,
replace_ext,
)
def convert_and_copy_to_blog(ctx, md_file):
md_file_path = os.path.abspath(os.path.expanduser(md_file))
html_body, meta = convert(ctx, md_file_path)
html_filename_meta = write_html_and_md(ctx, html_body, md_file_path, meta)
return html_filename_meta
def convert(ctx, md_file_path):
with open(md_file_path, "r", encoding="utf8") as rf:
md_data = rf.read()
ctx.vlog(":: Extracting meta info")
main_md, metadata = extract_main_and_meta_from_md(ctx, md_data)
extensions = ["extra", "smarty"]
html = markdown.markdown(main_md, extensions=extensions, output_format="html5")
char_limit = get_summary_limit(ctx, file_type="md")
metadata["_summary_"] = main_md[:char_limit]
ctx.vlog(":: Extracted summary")
return html, metadata
def write_html_and_md(ctx, html_body, md_file_path, meta):
md_filename = os.path.basename(md_file_path)
destination_dir = ctx.conversion["destination_dir"]
topic = extract_topic(ctx, meta)
md_filename = os.path.join(topic, md_filename)
html_filename = replace_ext(md_filename, ".html")
html_file_path = os.path.join(destination_dir, html_filename)
new_md_file_path = os.path.join(destination_dir, md_filename)
new_blog_post_dir = os.path.dirname(html_file_path)
ctx.vlog(":: New blog_posts_dir finalized", new_blog_post_dir)
if not os.path.exists(new_blog_post_dir):
os.mkdir(new_blog_post_dir)
extract_static = ctx.conversion["extract_static"]
if extract_static:
html_body = extract_and_write_static(
ctx, html_body, new_blog_post_dir, md_filename
)
with open(html_file_path, "w", encoding="utf8") as wf:
wf.write(html_body)
ctx.log(":: Converted basic html to", html_file_path)
# skip copying md file if converting to and from same folder.
if md_file_path != new_md_file_path:
try:
copyfile(md_file_path, new_md_file_path)
ctx.log(":: Copied md file to", new_md_file_path)
except Exception as E:
os.remove(new_md_file_path)
copyfile(md_file_path, new_md_file_path)
ctx.log(":: ERROR", E, "Overwriting md file", new_md_file_path)
return (html_filename, meta)
|
52385
|
def consume_rec(xs, fn, n=0):
if n <= 0:
fn(xs)
else:
for x in xs:
consume_rec(x, fn, n=n - 1)
|
52434
|
from __future__ import absolute_import
from . import evaluation
from . import normalization
from . import extractors
from . import generators
from . import layers
from . import model
from . import utils
from . import sequence
# Also importable from root
from .generators import Generator, MultiGenerator
from .model import ModelWrapper
from .sequence import SeqIntervalDl, StringSeqIntervalDl
__version__ = '0.0.17'
|
52453
|
import matplotlib.widgets as mwidgets
class Slider(mwidgets.Slider):
"""Slider widget to select a value from a floating point range.
Parameters
----------
ax : :class:`~matplotlib.axes.Axes` instance
The parent axes for the widget
value_range : (float, float)
(min, max) value allowed for value.
label : str
The slider label.
value : float
Initial value. If None, set to value in middle of value range.
on_slide : function
Callback function for slide event. Function should expect slider value.
value_fmt : str
Format string for formatting the slider text.
slidermin, slidermax : float
Used to contrain the value of this slider to the values
of other sliders.
dragging : bool
If True, slider is responsive to mouse.
pad : float
Padding (in axes coordinates) between `label`/`value_fmt` and slider.
Attributes
----------
value : float
Current slider value.
"""
def __init__(self, ax, value_range, label='', value=None, on_slide=None,
value_fmt='%1.2f', slidermin=None, slidermax=None,
dragging=True, pad=0.02):
mwidgets.AxesWidget.__init__(self, ax)
self.valmin, self.valmax = value_range
if value is None:
value = 0.5 * (self.valmin + self.valmax)
self.val = value
self.valinit = value
self.valfmt = value_fmt
y0 = 0.5
x_low = [self.valmin, value]
x_high = [value, self.valmax]
self.line_low, = ax.plot(x_low, [y0, y0], color='0.5', lw=2)
self.line_high, = ax.plot(x_high, [y0, y0], color='0.7', lw=2)
self.val_handle, = ax.plot(value, y0, 'o',
mec='0.4', mfc='0.6', markersize=8)
ax.set_xlim(value_range)
ax.set_navigate(False)
ax.set_axis_off()
self.connect_event('button_press_event', self._update)
self.connect_event('button_release_event', self._update)
if dragging:
self.connect_event('motion_notify_event', self._update)
self.label = ax.text(-pad, y0, label, transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='right')
self.show_value = False if value_fmt is None else True
if self.show_value:
self.valtext = ax.text(1 + pad, y0, value_fmt % value,
transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='left')
self.slidermin = slidermin
self.slidermax = slidermax
self.drag_active = False
self.cnt = 0
self.observers = {}
if on_slide is not None:
self.on_changed(on_slide)
# Attributes for matplotlib.widgets.Slider compatibility
self.closedmin = self.closedmax = True
@property
def value(self):
return self.val
@value.setter
def value(self, value):
self.val = value
self.line_low.set_xdata([self.valmin, value])
self.line_high.set_xdata([value, self.valmax])
self.val_handle.set_xdata([value])
if self.show_value:
self.valtext.set_text(self.valfmt % value)
def set_val(self, value):
"""Set value of slider."""
# Override matplotlib.widgets.Slider to update graphics objects.
self.value = value
if self.drawon:
self.ax.figure.canvas.draw()
if not self.eventson:
return
for cid, func in self.observers.items():
func(value)
if __name__ == '__main__':
import numpy as np
import matplotlib.pyplot as plt
ax = plt.subplot2grid((10, 1), (0, 0), rowspan=8)
ax_slider = plt.subplot2grid((10, 1), (9, 0))
a0 = 5
x = np.arange(0.0, 1.0, 0.001)
y = np.sin(6 * np.pi * x)
line, = ax.plot(x, a0 * y, lw=2, color='red')
ax.axis([x.min(), x.max(), -10, 10])
def update(val):
amp = samp.value
line.set_ydata(amp * y)
samp = Slider(ax_slider, (0.1, 10.0), on_slide=update,
label='Amplitude:', value=a0)
plt.show()
|
52473
|
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
m_f = np.load('objects/simulation_model_freq.npy')[:50]
m_p = np.load('objects/simulation_model_power.npy')[:50]
eeg_f = np.load('objects/real_eeg_freq.npy0.npy')[:50]
eeg_p = np.load('objects/real_eeg_power_0.npy')[:50]
plt.figure()
plt.semilogy(eeg_f, eeg_p,linewidth=2.0,c = 'b')
plt.xlabel('frequency [Hz]')
plt.ylabel('Linear spectrum [V RMS]')
plt.title('Power spectrum (scipy.signal.welch)')
plt.show()
|
52546
|
class PID(object):
def __init__(self, kp, ki, kd):
self.kp = kp
self.ki = ki
self.kd = kd
self.error_int = 0
self.error_prev = None
def control(self, error):
self.error_int += error
if self.error_prev is None:
self.error_prev = error
error_deriv = error - self.error_prev
self.error_prev = error
return self.kp*error + self.ki*self.error_int + self.kd*error_deriv
def reset(self):
self.error_prev = None
self.error_int = 0
|
52611
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.forms import ModelForm
from .models import Course, Submission, Assignment
class SignUpForm(UserCreationForm):
def clean_email(self):
email = self.cleaned_data.get('email')
username = self.cleaned_data.get('username')
if email and User.objects.filter(email=email).exclude(username=username).exists():
raise forms.ValidationError(u'Email addresses must be unique.')
return email
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email' , '<PASSWORD>', '<PASSWORD>', )
class EnrollForm(forms.Form):
secret_key = forms.CharField(
widget=forms.TextInput(attrs={'placeholder': '<KEY>'}),
label='Secret Key',
required=False)
class Meta:
fields = ('secret_key')
class ChangeEmailForm(forms.Form):
email = forms.EmailField()
def clean_email(self):
email = self.cleaned_data.get('email')
if email and User.objects.filter(email=email).exists():
raise forms.ValidationError(u'That email is already used.')
return email
class Meta:
fields = ('email')
|
52624
|
from tempfile import mkstemp
import cProfile
import pstats
from artemis.general.display import surround_with_header
import os
def what_are_we_waiting_for(command, sort_by ='time', max_len = 20, print_here = True):
"""
An easy way to show what is taking all the time when you run something.
Taken from docs: https://docs.python.org/2/library/profile.html#module-cProfile
:param command: A string python command
:param sort_by: How to sort results. {'time', 'cumtime', 'calls', ...}.
See https://docs.python.org/2/library/profile.html#pstats.Stats.sort_stats
:param max_len: Maximum number of things to show in profile.
:param print_here: Print the results here (instead of returning them).
:return: A pstats.Stats object containing the profiling results.
"""
_, filepath = mkstemp()
try:
cProfile.run(command, filepath)
finally:
p = pstats.Stats(filepath)
os.remove(filepath)
p.strip_dirs()
p.sort_stats(sort_by)
if print_here:
print(surround_with_header('Profile for "{}"'.format(command), width=100, char='='))
p.print_stats(max_len)
print('='*100)
return p
|
52650
|
import pytest
from django.db import transaction
from django.utils import timezone
from ..models import Message, FOIRequest, Esic, PublicBody
@pytest.fixture
def public_body(esic):
return PublicBody(
name='example',
esic=esic
)
@pytest.fixture
def esic():
return Esic(
url='http://example.com'
)
@pytest.fixture
def foi_request():
return FOIRequest()
@pytest.fixture
def message(foi_request):
return Message(
foi_request=foi_request
)
@pytest.fixture
def foi_request_with_sent_user_message(foi_request, message_from_user):
with transaction.atomic():
message_from_user.approve()
message_from_user.foi_request = foi_request
message_from_user.sent_at = timezone.now()
save_message(message_from_user)
foi_request.refresh_from_db()
return foi_request
@pytest.fixture
def message_from_user(public_body):
return Message(
sender=None,
receiver=public_body
)
@pytest.fixture
def message_from_government(public_body):
return Message(
sender=public_body,
sent_at=timezone.now(),
receiver=None
)
def save_message(message):
# FIXME: Ideally a simple message.save() would save everything, but I
# couldn't find out how to do so in Django. Not yet.
with transaction.atomic():
if message.sender:
save_public_body(message.sender)
message.sender_id = message.sender.id
if message.receiver:
save_public_body(message.receiver)
message.receiver_id = message.receiver.id
message.foi_request.save()
message.foi_request_id = message.foi_request.id
message.save()
def save_public_body(public_body):
with transaction.atomic():
if public_body.esic:
public_body.esic.save()
public_body.esic_id = public_body.esic.id
public_body.save()
return public_body
|
52667
|
import requests
import zipfile
import os
import errno
import nltk
from nltk.tokenize import sent_tokenize
ALICE_URL = 'https://ota.bodleian.ox.ac.uk/repository/xmlui/bitstream/handle/20.500.12024/1476/alice28-1476.txt'
WIZARD_URL = 'https://ota.bodleian.ox.ac.uk/repository/xmlui/bitstream/handle/20.500.12024/1740/wizoz10-1740.txt'
def download_text(url, localfolder='texts'):
localfile = os.path.split(url)[-1]
try:
os.mkdir(f'{localfolder}')
except OSError as e:
if e.errno != errno.EEXIST:
raise
try:
r = requests.get(url, allow_redirects=True)
open(os.path.join(localfolder, localfile), 'wb').write(r.content)
except Exception as e:
print(f'Error downloading file: {str(e)}')
def sentence_tokenize(source, quote_char='\\', sep_char=',',
include_header=True, include_source=True,
extensions=('txt'), **kwargs):
nltk.download('punkt')
# If source is a folder, goes through all files inside it
# that match the desired extensions ('txt' by default)
if os.path.isdir(source):
filenames = [f for f in os.listdir(source)
if os.path.isfile(os.path.join(source, f)) and
os.path.splitext(f)[1][1:] in extensions]
elif isinstance(source, str):
filenames = [source]
# If there is a configuration file, builds a dictionary with
# the corresponding start and end lines of each text file
config_file = os.path.join(source, 'lines.cfg')
config = {}
if os.path.exists(config_file):
with open(config_file, 'r') as f:
rows = f.readlines()
for r in rows[1:]:
fname, start, end = r.strip().split(',')
config.update({fname: (int(start), int(end))})
new_fnames = []
# For each file of text
for fname in filenames:
# If there's a start and end line for that file, use it
try:
start, end = config[fname]
except KeyError:
start = None
end = None
# Opens the file, slices the configures lines (if any)
# cleans line breaks and uses the sentence tokenizer
with open(os.path.join(source, fname), 'r') as f:
contents = (''.join(f.readlines()[slice(start, end, None)])
.replace('\n', ' ').replace('\r', ''))
corpus = sent_tokenize(contents, **kwargs)
# Builds a CSV file containing tokenized sentences
base = os.path.splitext(fname)[0]
new_fname = f'{base}.sent.csv'
new_fname = os.path.join(source, new_fname)
with open(new_fname, 'w') as f:
# Header of the file
if include_header:
if include_source:
f.write('sentence,source\n')
else:
f.write('sentence\n')
# Writes one line for each sentence
for sentence in corpus:
if include_source:
f.write(f'{quote_char}{sentence}{quote_char}{sep_char}{fname}\n')
else:
f.write(f'{quote_char}{sentence}{quote_char}\n')
new_fnames.append(new_fname)
# Returns list of the newly generated CSV files
return sorted(new_fnames)
|
52682
|
import numpy as np
import xarray as xr
from numpy import asarray
import scipy.sparse
from itertools import product
from .util import get_shape_of_data
from .grid_stretching_transforms import scs_transform
from .constants import R_EARTH_m
def get_troposphere_mask(ds):
"""
Returns a mask array for picking out the tropospheric grid boxes.
Args:
ds: xarray Dataset
Dataset containing certain met field variables (i.e.
Met_TropLev, Met_BXHEIGHT).
Returns:
tropmask: numpy ndarray
Tropospheric mask. False denotes grid boxes that are
in the troposphere and True in the stratosphere
(as per Python masking logic).
"""
# ==================================================================
# Initialization
# ==================================================================
# Make sure ds is an xarray Dataset object
if not isinstance(ds, xr.Dataset):
raise TypeError("The ds argument must be an xarray Dataset!")
# Make sure certain variables are found
if "Met_BXHEIGHT" not in ds.data_vars.keys():
raise ValueError("Met_BXHEIGHT could not be found!")
if "Met_TropLev" not in ds.data_vars.keys():
raise ValueError("Met_TropLev could not be found!")
# Mask of tropospheric grid boxes in the Ref dataset
shape = get_shape_of_data(np.squeeze(ds["Met_BXHEIGHT"]))
# Determine if this is GCHP data
is_gchp = "nf" in ds["Met_BXHEIGHT"].dims
# ==================================================================
# Create the mask arrays for the troposphere
#
# Convert the Met_TropLev DataArray objects to numpy ndarrays of
# integer. Also subtract 1 to convert from Fortran to Python
# array index notation.
# ==================================================================
multi_time_slices = (is_gchp and len(shape) == 5) or \
(not is_gchp and len(shape) == 4)
if multi_time_slices:
# --------------------------------------------------------------
# GCC: There are multiple time slices
# --------------------------------------------------------------
# Create the tropmask array with dims
# (time, lev, nf*lat*lon) for GCHP, or
# (time, lev, lat*lon ) for GCC
tropmask = np.ones((shape[0], shape[1],
np.prod(np.array(shape[2:]))), bool)
# Loop over each time
for t in range(tropmask.shape[0]):
# Pick the tropopause level and make a 1-D array
values = ds["Met_TropLev"].isel(time=t).values
lev = np.int_(np.squeeze(values) - 1)
lev_1d = lev.flatten()
# Create the tropospheric mask array
for x in range(tropmask.shape[2]):
tropmask[t, 0: lev_1d[x], x] = False
else:
# --------------------------------------------------------------
# There is only one time slice
# --------------------------------------------------------------
# Create the tropmask array with dims (lev, lat*lon)
tropmask = np.ones((shape[0], np.prod(np.array(shape[1:]))), bool)
# Pick the tropopause level and make a 1-D array
values = ds["Met_TropLev"].values
lev = np.int_(np.squeeze(values) - 1)
lev_1d = lev.flatten()
# Create the tropospheric mask array
for x in range(tropmask.shape[1]):
tropmask[0: lev_1d[x], x] = False
# Reshape into the same shape as Met_BxHeight
return tropmask.reshape(shape)
def get_input_res(data):
"""
Returns resolution of dataset passed to compare_single_level or compare_zonal_means
Args:
data: xarray Dataset
Input GEOS-Chem dataset
Returns:
res: str or int
Lat/lon res of the form 'latresxlonres' or cubed-sphere resolution
gridtype: str
'll' for lat/lon or 'cs' for cubed-sphere
"""
vdims = data.dims
if "lat" in vdims and "lon" in vdims:
lat = data["lat"].values
lon = data["lon"].values
if lat.size / 6 == lon.size:
return lon.size, "cs"
else:
lat.sort()
lon.sort()
# use increment of second and third coordinates
# to avoid polar mischief
lat_res = np.abs(lat[2] - lat[1])
lon_res = np.abs(lon[2] - lon[1])
return str(lat_res) + "x" + str(lon_res), "ll"
else:
#print("grid is cs: ", vdims)
# GCHP data using MAPL v1.0.0+ has dims time, lev, nf, Ydim, and Xdim
if isinstance(data.dims, tuple):
return len(data["Xdim"].values), "cs"
else:
return data.dims["Xdim"], "cs"
def call_make_grid(res, gridtype, in_extent=[-180, 180, -90, 90],
out_extent=[-180, 180, -90, 90], sg_params=[1, 170, -90]):
"""
Create a mask with NaN values removed from an input array
Args:
res: str or int
Resolution of grid (format 'latxlon' or csres)
gridtype: str
'll' for lat/lon or 'cs' for cubed-sphere
Keyword Args (optional):
in_extent: list[float, float, float, float]
Describes minimum and maximum latitude and longitude of input data
in the format [minlon, maxlon, minlat, maxlat]
Default value: [-180, 180, -90, 90]
out_extent: list[float, float, float, float]
Desired minimum and maximum latitude and longitude of output grid
in the format [minlon, maxlon, minlat, maxlat]
Default value: [-180, 180, -90, 90]
sg_params: list[float, float, float] (stretch_factor, target_longitude, target_latitude)
Desired stretched-grid parameters in the format
[stretch_factor, target_longitude, target_latitude].
Will trigger stretched-grid creation if not default values.
Default value: [1, 170, -90] (no stretching)
Returns:
[grid, grid_list]: list(dict, list(dict))
Returns the created grid.
grid_list is a list of grids if gridtype is 'cs', else it is None
"""
# call appropriate make_grid function and return new grid
if gridtype == "ll":
return [make_grid_LL(res, in_extent, out_extent), None]
elif sg_params == [1, 170, -90]:
# standard CS
return make_grid_CS(res)
else:
return make_grid_SG(res, *sg_params)
def get_grid_extents(data, edges=True):
"""
Get min and max lat and lon from an input GEOS-Chem xarray dataset or grid dict
Args:
data: xarray Dataset or dict
A GEOS-Chem dataset or a grid dict
edges (optional): bool
Whether grid extents should use cell edges instead of centers
Default value: True
Returns:
minlon: float
Minimum longitude of data grid
maxlon: float
Maximum longitude of data grid
minlat: float
Minimum latitude of data grid
maxlat: float
Maximum latitude of data grid
"""
if isinstance(data, dict):
if "lon_b" in data and edges:
return np.min(
data["lon_b"]), np.max(
data["lon_b"]), np.min(
data["lat_b"]), np.max(
data["lat_b"])
elif not edges:
return np.min(
data["lon"]), np.max(
data["lon"]), np.min(
data["lat"]), np.max(
data["lat"])
else:
return -180, 180, -90, 90
elif "lat" in data.dims and "lon" in data.dims:
lat = data["lat"].values
lon = data["lon"].values
if lat.size / 6 == lon.size:
# No extents for CS plots right now
return -180, 180, -90, 90
else:
lat = np.sort(lat)
minlat = np.min(lat)
if abs(abs(lat[1]) - abs(lat[0])
) != abs(abs(lat[2]) - abs(lat[1])):
#pole is cutoff
minlat = minlat - 1
maxlat = np.max(lat)
if abs(abs(lat[-1]) - abs(lat[-2])
) != abs(abs(lat[-2]) - abs(lat[-3])):
maxlat = maxlat + 1
# add longitude res to max longitude
lon = np.sort(lon)
minlon = np.min(lon)
maxlon = np.max(lon) + abs(abs(lon[-1]) - abs(lon[-2]))
return minlon, maxlon, minlat, maxlat
else:
# GCHP data using MAPL v1.0.0+ has dims time, lev, nf, Ydim, and Xdim
return -180, 180, -90, 90
def get_vert_grid(dataset, AP=[], BP=[]):
"""
Determine vertical grid of input dataset
Args:
dataset: xarray Dataset
A GEOS-Chem output dataset
Keyword Args (optional):
AP: list-like type
Hybrid grid parameter A in hPa
Default value: []
BP: list-like type
Hybrid grid parameter B (unitless)
Default value: []
Returns:
p_edge: numpy array
Edge pressure values for vertical grid
p_mid: numpy array
Midpoint pressure values for vertical grid
nlev: int
Number of levels in vertical grid
"""
if dataset.sizes["lev"] in (72, 73):
return GEOS_72L_grid.p_edge(), GEOS_72L_grid.p_mid(), 72
elif dataset.sizes["lev"] in (47, 48):
return GEOS_47L_grid.p_edge(), GEOS_47L_grid.p_mid(), 47
elif AP == [] or BP == []:
if dataset.sizes["lev"] == 1:
AP = [1, 1]
BP = [1]
new_grid = vert_grid(AP, BP)
return new_grid.p_edge(), new_grid.p_mid(), np.size(AP)
else:
raise ValueError(
"Only 72/73 or 47/48 level vertical grids are automatically determined" +
"from input dataset by get_vert_grid(), please pass grid parameters AP and BP" +
"as keyword arguments")
else:
new_grid = vert_grid(AP, BP)
return new_grid.p_edge(), new_grid.p_mid(), np.size(AP)
def get_pressure_indices(pedge, pres_range):
"""
Get indices where edge pressure values are within a given pressure range
Args:
pedge: numpy array
A GEOS-Chem output dataset
pres_range: list(float, float)
Contains minimum and maximum pressure
Returns:
numpy array
Indices where edge pressure values are within a given pressure range
"""
return np.where(
(pedge <= np.max(pres_range)) & (
pedge >= np.min(pres_range)))[0]
def pad_pressure_edges(pedge_ind, max_ind, pmid_len):
"""
Add outer indices to edge pressure index list
Args:
pedge_ind: list
List of edge pressure indices
max_ind: int
Maximum index
pmid_len: int
Length of pmid which should not be exceeded by indices
Returns:
pedge_ind: list
List of edge pressure indices, possibly with new minimum and maximum indices
"""
if max_ind > pmid_len:
# don't overstep array bounds for full array
max_ind = max_ind - 1
if min(pedge_ind) != 0:
pedge_ind = np.append(min(pedge_ind) - 1, pedge_ind)
if max(pedge_ind) != max_ind:
pedge_ind = np.append(pedge_ind, max(pedge_ind) + 1)
return pedge_ind
def get_ind_of_pres(dataset, pres):
"""
Get index of pressure level that contains the requested pressure value.
Args:
dataset: xarray Dataset
GEOS-Chem dataset
pres: int or float
Desired pressure value
Returns:
index: int
Index of level in dataset that corresponds to requested pressure
"""
pedge, pmid, _ = get_vert_grid(dataset)
converted_dataset = convert_lev_to_pres(dataset, pmid, pedge)
return np.argmin(np.abs(converted_dataset['lev'] - pres).values)
def convert_lev_to_pres(dataset, pmid, pedge, lev_type='pmid'):
"""
Convert lev dimension to pressure in a GEOS-Chem dataset
Args:
dataset: xarray Dataset
GEOS-Chem dataset
pmid: np.array
Midpoint pressure values
pedge: np.array
Edge pressure values
lev_type (optional): str
Denote whether lev is 'pedge' or 'pmid' if grid is not 72/73 or 47/48 levels
Default value: 'pmid'
Returns:
dataset: xarray Dataset
Input dataset with "lev" dimension values replaced with pressure values
"""
if dataset.sizes["lev"] in (72, 47):
dataset["lev"] = pmid
elif dataset.sizes["lev"] in (73, 48):
dataset["lev"] = pedge
elif lev_type == 'pmid':
print('Warning: Assuming levels correspond with midpoint pressures')
dataset["lev"] = pmid
else:
dataset["lev"] = pedge
dataset["lev"].attrs["unit"] = "hPa"
dataset["lev"].attrs["long_name"] = "level pressure"
return dataset
class vert_grid:
def __init__(self, AP=None, BP=None, p_sfc=1013.25):
if (len(AP) != len(BP)) or (AP is None):
# Throw error?
print('Inconsistent vertical grid specification')
self.AP = np.array(AP)
self.BP = np.array(BP)
self.p_sfc = p_sfc
def p_edge(self):
# Calculate pressure edges using eta coordinate
return self.AP + self.BP * self.p_sfc
def p_mid(self):
p_edge = self.p_edge()
return (p_edge[1:] + p_edge[:-1]) / 2.0
# Standard vertical grids
_GEOS_72L_AP = np.array([0.000000e+00,
4.804826e-02,
6.593752e+00,
1.313480e+01,
1.961311e+01,
2.609201e+01,
3.257081e+01,
3.898201e+01,
4.533901e+01,
5.169611e+01,
5.805321e+01,
6.436264e+01,
7.062198e+01,
7.883422e+01,
8.909992e+01,
9.936521e+01,
1.091817e+02,
1.189586e+02,
1.286959e+02,
1.429100e+02,
1.562600e+02,
1.696090e+02,
1.816190e+02,
1.930970e+02,
2.032590e+02,
2.121500e+02,
2.187760e+02,
2.238980e+02,
2.243630e+02,
2.168650e+02,
2.011920e+02,
1.769300e+02,
1.503930e+02,
1.278370e+02,
1.086630e+02,
9.236572e+01,
7.851231e+01,
6.660341e+01,
5.638791e+01,
4.764391e+01,
4.017541e+01,
3.381001e+01,
2.836781e+01,
2.373041e+01,
1.979160e+01,
1.645710e+01,
1.364340e+01,
1.127690e+01,
9.292942e+00,
7.619842e+00,
6.216801e+00,
5.046801e+00,
4.076571e+00,
3.276431e+00,
2.620211e+00,
2.084970e+00,
1.650790e+00,
1.300510e+00,
1.019440e+00,
7.951341e-01,
6.167791e-01,
4.758061e-01,
3.650411e-01,
2.785261e-01,
2.113490e-01,
1.594950e-01,
1.197030e-01,
8.934502e-02,
6.600001e-02,
4.758501e-02,
3.270000e-02,
2.000000e-02,
1.000000e-02])
_GEOS_72L_BP = np.array([1.000000e+00,
9.849520e-01,
9.634060e-01,
9.418650e-01,
9.203870e-01,
8.989080e-01,
8.774290e-01,
8.560180e-01,
8.346609e-01,
8.133039e-01,
7.919469e-01,
7.706375e-01,
7.493782e-01,
7.211660e-01,
6.858999e-01,
6.506349e-01,
6.158184e-01,
5.810415e-01,
5.463042e-01,
4.945902e-01,
4.437402e-01,
3.928911e-01,
3.433811e-01,
2.944031e-01,
2.467411e-01,
2.003501e-01,
1.562241e-01,
1.136021e-01,
6.372006e-02,
2.801004e-02,
6.960025e-03,
8.175413e-09,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00])
GEOS_72L_grid = vert_grid(_GEOS_72L_AP, _GEOS_72L_BP)
# Reduced grid
_GEOS_47L_AP = np.zeros(48)
_GEOS_47L_BP = np.zeros(48)
# Fill in the values for the surface
_GEOS_47L_AP[0] = _GEOS_72L_AP[0]
_GEOS_47L_BP[0] = _GEOS_72L_BP[0]
# Build the GEOS 72-layer to 47-layer mapping matrix at the same time
_xmat_i = np.zeros((72))
_xmat_j = np.zeros((72))
_xmat_s = np.zeros((72))
# Index here is the 1-indexed layer number
for _i_lev in range(1, 37):
# Map from 1-indexing to 0-indexing
_x_lev = _i_lev - 1
# Sparse matrix for regridding
# Below layer 37, it's 1:1
_xct = _x_lev
_xmat_i[_xct] = _x_lev
_xmat_j[_xct] = _x_lev
_xmat_s[_xct] = 1.0
# Copy over the pressure edge for the top of the grid cell
_GEOS_47L_AP[_i_lev] = _GEOS_72L_AP[_i_lev]
_GEOS_47L_BP[_i_lev] = _GEOS_72L_BP[_i_lev]
# Now deal with the lumped layers
_skip_size_vec = [2, 4]
_number_lumped = [4, 7]
# Initialize
_i_lev = 36
_i_lev_72 = 36
for _lump_seg in range(2):
_skip_size = _skip_size_vec[_lump_seg]
# 1-indexed starting point in the 47-layer grid
_first_lev_47 = _i_lev + 1
_first_lev_72 = _i_lev_72 + 1
# Loop over the coarse vertical levels (47-layer grid)
for _i_lev_offset in range(_number_lumped[_lump_seg]):
# i_lev is the index for the current level on the 47-level grid
_i_lev = _first_lev_47 + _i_lev_offset
# Map from 1-indexing to 0-indexing
_x_lev = _i_lev - 1
# Get the 1-indexed location of the last layer in the 72-layer grid
# which is below the start of the current lumping region
_i_lev_72_base = _first_lev_72 + (_i_lev_offset * _skip_size) - 1
# Get the 1-indexed location of the uppermost level in the 72-layer
# grid which is within the target layer on the 47-layer grid
_i_lev_72 = _i_lev_72_base + _skip_size
# Do the pressure edges first
# These are the 0-indexed locations of the upper edge for the
# target layers in 47- and 72-layer grids
_GEOS_47L_AP[_i_lev] = _GEOS_72L_AP[_i_lev_72]
_GEOS_47L_BP[_i_lev] = _GEOS_72L_BP[_i_lev_72]
# Get the total pressure delta across the layer on the lumped grid
# We are within the fixed pressure levels so don't need to account
# for variations in surface pressure
_dp_total = _GEOS_47L_AP[_i_lev - 1] - _GEOS_47L_AP[_i_lev]
# Now figure out the mapping
for _i_lev_offset_72 in range(_skip_size):
# Source layer in the 72 layer grid (0-indexed)
_x_lev_72 = _i_lev_72_base + _i_lev_offset_72
_xct = _x_lev_72
_xmat_i[_xct] = _x_lev_72
# Target in the 47 layer grid
_xmat_j[_xct] = _x_lev
# Proportion of 72-layer grid cell, by pressure, within expanded
# layer
_xmat_s[_xct] = (_GEOS_72L_AP[_x_lev_72] -
_GEOS_72L_AP[_x_lev_72 + 1]) / _dp_total
_start_pt = _i_lev
# Do last entry separately (no layer to go with it)
_xmat_72to47 = scipy.sparse.coo_matrix(
(_xmat_s, (_xmat_i, _xmat_j)), shape=(72, 47))
GEOS_47L_grid = vert_grid(_GEOS_47L_AP, _GEOS_47L_BP)
# CAM 26-layer grid
_CAM_26L_AP = np.flip(np.array([219.4067, 489.5209, 988.2418, 1805.201,
2983.724, 4462.334, 6160.587, 7851.243,
7731.271, 7590.131, 7424.086, 7228.744,
6998.933, 6728.574, 6410.509, 6036.322,
5596.111, 5078.225, 4468.96, 3752.191,
2908.949, 2084.739, 1334.443, 708.499,
252.136, 0., 0.]), axis=0) * 0.01
_CAM_26L_BP = np.flip(np.array([0., 0., 0., 0.,
0., 0., 0., 0.,
0.01505309, 0.03276228, 0.05359622, 0.07810627,
0.1069411, 0.14086370, 0.180772, 0.227722,
0.2829562, 0.3479364, 0.4243822, 0.5143168,
0.6201202, 0.7235355, 0.8176768, 0.8962153,
0.9534761, 0.9851122, 1.]), axis=0)
CAM_26L_grid = vert_grid(_CAM_26L_AP, _CAM_26L_BP)
def make_grid_LL(llres, in_extent=[-180, 180, -90, 90], out_extent=[]):
"""
Creates a lat/lon grid description.
Args:
llres: str
lat/lon resolution in 'latxlon' format (e.g. '4x5')
Keyword Args (optional):
in_extent: list[float, float, float, float]
Describes minimum and maximum latitude and longitude of initial grid
in the format [minlon, maxlon, minlat, maxlat]
Default value: [-180, 180, -90, 90]
out_extent: list[float, float, float, float]
Describes minimum and maximum latitude and longitude of target grid
in the format [minlon, maxlon, minlat, maxlat]. Needed when intending
to use grid to trim extent of input data
Default value: [] (assumes value of in_extent)
Returns:
llgrid: dict
dict grid description of format {'lat' : lat midpoints,
'lon' : lon midpoints,
'lat_b' : lat edges,
'lon_b' : lon edges}
"""
# get initial bounds of grid
[minlon, maxlon, minlat, maxlat] = in_extent
[dlat, dlon] = list(map(float, llres.split('x')))
lon_b = np.linspace(minlon - dlon / 2, maxlon - dlon /
2, int((maxlon - minlon) / dlon) + 1)
lat_b = np.linspace(minlat - dlat / 2, maxlat + dlat / 2,
int((maxlat - minlat) / dlat) + 2)
if minlat <= -90:
lat_b = lat_b.clip(-90, None)
if maxlat >= 90:
lat_b = lat_b.clip(None, 90)
lat = (lat_b[1:] + lat_b[:-1]) / 2
lon = (lon_b[1:] + lon_b[:-1]) / 2
# trim grid bounds when your desired extent is not the same as your
# initial grid extent
if out_extent == []:
out_extent = in_extent
if out_extent != in_extent:
[minlon, maxlon, minlat, maxlat] = out_extent
minlon_ind = np.nonzero(lon >= minlon)
maxlon_ind = np.nonzero(lon <= maxlon)
lon_inds = np.intersect1d(minlon_ind, maxlon_ind)
lon = lon[lon_inds]
# make sure to get edges of grid correctly
lon_inds = np.append(lon_inds, np.max(lon_inds) + 1)
lon_b = lon_b[lon_inds]
minlat_ind = np.nonzero(lat >= minlat)
maxlat_ind = np.nonzero(lat <= maxlat)
lat_inds = np.intersect1d(minlat_ind, maxlat_ind)
lat = lat[lat_inds]
# make sure to get edges of grid correctly
lat_inds = np.append(lat_inds, np.max(lat_inds) + 1)
lat_b = lat_b[lat_inds]
llgrid = {'lat': lat,
'lon': lon,
'lat_b': lat_b,
'lon_b': lon_b}
return llgrid
def make_grid_CS(csres):
"""
Creates a cubed-sphere grid description.
Args:
csres: int
cubed-sphere resolution of target grid
Returns:
[csgrid, csgrid_list]: list[dict, list[dict]]
csgrid is a dict of format {'lat' : lat midpoints,
'lon' : lon midpoints,
'lat_b' : lat edges,
'lon_b' : lon edges}
where each value has an extra face dimension of length 6.
csgrid_list is a list of dicts separated by face index
"""
csgrid = csgrid_GMAO(csres)
csgrid_list = [None] * 6
for i in range(6):
csgrid_list[i] = {'lat': csgrid['lat'][i],
'lon': csgrid['lon'][i],
'lat_b': csgrid['lat_b'][i],
'lon_b': csgrid['lon_b'][i]}
return [csgrid, csgrid_list]
def make_grid_SG(csres, stretch_factor, target_lon, target_lat):
"""
Creates a stretched-grid grid description.
Args:
csres: int
cubed-sphere resolution of target grid
stretch_factor: float
stretch factor of target grid
target_lon: float
target stretching longitude of target grid
target_lon: float
target stretching latitude of target grid
Returns:
[csgrid, csgrid_list]: list[dict, list[dict]]
csgrid is a dict of format {'lat' : lat midpoints,
'lon' : lon midpoints,
'lat_b' : lat edges,
'lon_b' : lon edges}
where each value has an extra face dimension of length 6.
csgrid_list is a list of dicts separated by face index
"""
csgrid = csgrid_GMAO(csres, offset=0)
csgrid_list = [None] * 6
for i in range(6):
lat = csgrid['lat'][i].flatten()
lon = csgrid['lon'][i].flatten()
lon, lat = scs_transform(
lon, lat, stretch_factor, target_lon, target_lat)
lat = lat.reshape((csres, csres))
lon = lon.reshape((csres, csres))
lat_b = csgrid['lat_b'][i].flatten()
lon_b = csgrid['lon_b'][i].flatten()
lon_b, lat_b = scs_transform(
lon_b, lat_b, stretch_factor, target_lon, target_lat)
lat_b = lat_b.reshape((csres + 1, csres + 1))
lon_b = lon_b.reshape((csres + 1, csres + 1))
csgrid_list[i] = {'lat': lat,
'lon': lon,
'lat_b': lat_b,
'lon_b': lon_b}
for i in range(6):
csgrid['lat'][i] = csgrid_list[i]['lat']
csgrid['lon'][i] = csgrid_list[i]['lon']
csgrid['lat_b'][i] = csgrid_list[i]['lat_b']
csgrid['lon_b'][i] = csgrid_list[i]['lon_b']
return [csgrid, csgrid_list]
def calc_rectilinear_lon_edge(lon_stride, center_at_180):
""" Compute longitude edge vector for a rectilinear grid.
Parameters
----------
lon_stride: float
Stride length in degrees. For example, for a standard GEOS-Chem Classic
4x5 grid, lon_stride would be 5.
center_at_180: bool
Whether or not the grid should have a cell center at 180 degrees (i.e.
on the date line). If true, the first grid cell is centered on the date
line; if false, the first grid edge is on the date line.
Returns
-------
Longitudes of cell edges in degrees East.
Notes
-----
All values are forced to be between [-180,180]. For a grid with N cells in
each band, N+1 edges will be returned, with the first and last value being
duplicates.
Examples
--------
>>> from gcpy.grid.horiz import calc_rectilinear_lon_edge
>>> calc_rectilinear_lon_edge(5.0,true)
np.array([177.5,-177.5,-172.5,...,177.5])
See Also
--------
[NONE]
"""
n_lon = np.round(360.0 / lon_stride)
lon_edge = np.linspace(-180.0, 180.0, num=n_lon + 1)
if center_at_180:
lon_edge = lon_edge - (lon_stride / 2.0)
lon_edge[lon_edge < -180.0] = lon_edge[lon_edge < -180] + 360.0
lon_edge[lon_edge > 180.0] = lon_edge[lon_edge > 180.0] - 360.0
return lon_edge
def calc_rectilinear_lat_edge(lat_stride, half_polar_grid):
""" Compute latitude edge vector for a rectilinear grid.
Parameters
----------
lat_stride: float
Stride length in degrees. For example, for a standard GEOS-Chem Classic
4x5 grid, lat_stride would be 4.
half_polar_grid: bool
Whether or not the grid should be "half-polar" (i.e. bands at poles are
half the size). In either case the grid will start and end at -/+ 90,
but when half_polar_grid is True, the first and last bands will have a
width of 1/2 the normal lat_stride.
Returns
-------
Latitudes of cell edges in degrees North.
Notes
-----
All values are forced to be between [-90,90]. For a grid with N cells in
each band, N+1 edges will be returned, with the first and last value being
duplicates.
Examples
--------
>>> from gcpy.grid.horiz import calc_rectilinear_lat_edge
>>> calc_rectilinear_lat_edge(4.0,true)
np.array([-90,-88,-84,-80,...,84,88,90])
See Also
--------
[NONE]
"""
if half_polar_grid:
start_pt = 90.0 + (lat_stride / 2.0)
else:
start_pt = 90.0
lat_edge = np.linspace(-1.0 * start_pt, start_pt,
num=1 + np.round(2.0 * start_pt / lat_stride))
# Force back onto +/- 90
lat_edge[lat_edge > 90.0] = 90.0
lat_edge[lat_edge < -90.0] = -90.0
return lat_edge
def calc_rectilinear_grid_area(lon_edge, lat_edge):
""" Compute grid cell areas (in m2) for a rectilinear grid.
Parameters
----------
#TODO
Returns
-------
#TODO
Notes
-----
#TODO
Examples
--------
#TODO
See Also
--------
[NONE]
"""
# Convert from km to m
_radius_earth_m = R_EARTH_m
lon_edge = asarray(lon_edge, dtype=float)
lat_edge = asarray(lat_edge, dtype=float)
n_lon = (lon_edge.size) - 1
n_lat = (lat_edge.size) - 1
grid_area = np.zeros((n_lat, n_lon))
sfc_area_const = 2.0 * np.pi * _radius_earth_m * _radius_earth_m
# Longitudes loop, so need to be careful
lon_delta = calc_delta_lon(lon_edge)
# Convert into weights relative to the total circle
lon_delta = lon_delta / 360.0
# Precalculate this
sin_lat_edge = np.sin(np.deg2rad(lat_edge))
for i_lat in range(0, n_lat):
sin_diff = sin_lat_edge[i_lat + 1] - sin_lat_edge[i_lat]
grid_area[i_lat, :] = sin_diff * sfc_area_const * lon_delta
return grid_area
def calc_delta_lon(lon_edge):
""" Compute grid cell longitude widths from an edge vector.
Parameters
----------
lon_edge: float
Vector of longitude edges, in degrees East.
Returns
-------
Width of each cell, degrees East
Notes
-----
Accounts for looping over the domain.
Examples
--------
#TODO
"""
n_lon = (lon_edge.size) - 1
lon_edge = asarray(lon_edge)
# Set up output array
lon_delta = np.zeros((n_lon))
offset = 0.0
next_lon = lon_edge[0]
for i_lon in range(0, n_lon):
last_lon = next_lon
next_lon = lon_edge[i_lon + 1] + offset
while next_lon < last_lon:
offset = offset + 360.0
next_lon = next_lon + 360.0
lon_delta[i_lon] = next_lon - last_lon
return lon_delta
def csgrid_GMAO(res, offset=-10):
"""
Return cubedsphere coordinates with GMAO face orientation
Parameters
----------
res: cubed-sphere Resolution
This function was originally written by <NAME> and included
in package cubedsphere: https://github.com/JiaweiZhuang/cubedsphere
"""
CS = CSGrid(res, offset=offset)
lon = CS.lon_center.transpose(2, 0, 1)
lon_b = CS.lon_edge.transpose(2, 0, 1)
lat = CS.lat_center.transpose(2, 0, 1)
lat_b = CS.lat_edge.transpose(2, 0, 1)
lon[lon < 0] += 360
lon_b[lon_b < 0] += 360
for a in [lon, lon_b, lat, lat_b]:
for tile in [0, 1, 3, 4]:
a[tile] = a[tile].T
for tile in [3, 4]:
a[tile] = np.flip(a[tile], 1)
for tile in [3, 4, 2, 5]:
a[tile] = np.flip(a[tile], 0)
a[2], a[5] = a[5].copy(), a[2].copy() # swap north&south pole
return {'lon': lon, 'lat': lat, 'lon_b': lon_b, 'lat_b': lat_b}
_INV_SQRT_3 = 1.0 / np.sqrt(3.0)
_ASIN_INV_SQRT_3 = np.arcsin(_INV_SQRT_3)
class CSGrid(object):
"""Generator for cubed-sphere grid geometries.
CSGrid computes the latitutde and longitudes of cell centers and edges
on a cubed-sphere grid, providing a way to retrieve these geometries
on-the-fly if your model output data does not include them.
Attributes
----------
{lon,lat}_center: np.ndarray
lat/lon coordinates for each cell center along the cubed-sphere mesh
{lon,lat}_edge: np.ndarray
lat/lon coordinates for the midpoint of the edges separating each
element on the cubed-sphere mesh.
xyz_{center,edge}: np.ndarray
As above, except coordinates are projected into a 3D cartesian space
with common origin to the original lat/lon coordinate system, assuming
a unit sphere.
This class was originally written by <NAME> and included
in package cubedsphere: https://github.com/JiaweiZhuang/cubedsphere
"""
def __init__(self, c, offset=None):
"""
Parameters
----------
c: int
Number edges along each cubed-sphere edge.
======= ====================
C Lat/Lon Resolution
------- --------------------
24 4 deg x 5 deg
48,45 2 deg x 2.5 deg
96,90 1 deg x 1.25 deg
192,180 0.5 deg x 0.625 deg
384,360 0.25 deg x 0.3125 deg
720 0.12g deg x 0.15625 deg
offset: float (optional)
Degrees to offset the first faces' edge in the latitudinal
direction. If not passed, then the western edge of the first face
will align with the prime meridian.
This function was originally written by <NAME> and included
in package cubedsphere: https://github.com/JiaweiZhuang/cubedsphere
"""
self.c = c
self.delta_y = 2. * _ASIN_INV_SQRT_3 / c
self.nx = self.ny = c + 1
self.offset = offset
self._initialize()
def _initialize(self):
c = self.c
nx, ny = self.nx, self.ny
lambda_rad = np.zeros((nx, ny))
lambda_rad[0, :] = 3. * np.pi / 4. # West edge
lambda_rad[-1, :] = 5. * np.pi / 4. # East edge
theta_rad = np.zeros((nx, ny))
theta_rad[0, :] = -_ASIN_INV_SQRT_3 + \
(self.delta_y * np.arange(c + 1)) # West edge
theta_rad[-1, :] = theta_rad[0, :] # East edge
# Cache the reflection points - our upper-left and lower-right corners
lonMir1, lonMir2 = lambda_rad[0, 0], lambda_rad[-1, -1]
latMir1, latMir2 = theta_rad[0, 0], theta_rad[-1, -1]
xyzMir1 = latlon_to_cartesian(lonMir1, latMir1)
xyzMir2 = latlon_to_cartesian(lonMir2, latMir2)
xyzCross = np.cross(xyzMir1, xyzMir2)
norm = np.sqrt(np.sum(xyzCross**2))
xyzCross /= norm
for i in range(1, c):
lonRef, latRef = lambda_rad[0, i], theta_rad[0, i]
xyzRef = np.asarray(latlon_to_cartesian(lonRef, latRef, ))
xyzDot = np.sum(xyzCross * xyzRef)
xyzImg = xyzRef - (2. * xyzDot * xyzCross)
xsImg, ysImg, zsImg = xyzImg
lonImg, latImg = cartesian_to_latlon(xsImg, ysImg, zsImg)
lambda_rad[i, 0] = lonImg
lambda_rad[i, -1] = lonImg
theta_rad[i, 0] = latImg
theta_rad[i, -1] = -latImg
pp = np.zeros([3, c + 1, c + 1])
# Set the four corners
# print("CORNERS")
for i, j in product([0, -1], [0, -1]):
# print(i, j)
pp[:, i, j] = latlon_to_cartesian(
lambda_rad[i, j], theta_rad[i, j])
# Map the edges on the sphere back to the cube.
#Note that all intersections are at x = -rsq3
# print("EDGES")
for ij in range(1, c + 1):
# print(ij)
pp[:, 0, ij] = latlon_to_cartesian(
lambda_rad[0, ij], theta_rad[0, ij])
pp[1, 0, ij] = -pp[1, 0, ij] * _INV_SQRT_3 / pp[0, 0, ij]
pp[2, 0, ij] = -pp[2, 0, ij] * _INV_SQRT_3 / pp[0, 0, ij]
pp[:, ij, 0] = latlon_to_cartesian(
lambda_rad[ij, 0], theta_rad[ij, 0])
pp[1, ij, 0] = -pp[1, ij, 0] * _INV_SQRT_3 / pp[0, ij, 0]
pp[2, ij, 0] = -pp[2, ij, 0] * _INV_SQRT_3 / pp[0, ij, 0]
# # Map interiors
pp[0, :, :] = -_INV_SQRT_3
# print("INTERIOR")
for i in range(1, c + 1):
for j in range(1, c + 1):
# Copy y-z face of the cube along j=1
pp[1, i, j] = pp[1, i, 0]
# Copy along i=1
pp[2, i, j] = pp[2, 0, j]
_pp = pp.copy()
llr, ttr = vec_cartesian_to_latlon(_pp[0], _pp[1], _pp[2])
lambda_rad, theta_rad = llr.copy(), ttr.copy()
# Make grid symmetrical to i = im/2 + 1
for j in range(1, c + 1):
for i in range(1, c + 1):
# print("({}, {}) -> ({}, {})".format(i, 0, i, j))
lambda_rad[i, j] = lambda_rad[i, 0]
for j in range(c + 1):
for i in range(c // 2):
isymm = c - i
# print(isymm)
avgPt = 0.5 * (lambda_rad[i, j] - lambda_rad[isymm, j])
# print(lambda_rad[i, j], lambda_rad[isymm, j], avgPt)
lambda_rad[i, j] = avgPt + np.pi
lambda_rad[isymm, j] = np.pi - avgPt
avgPt = 0.5 * (theta_rad[i, j] + theta_rad[isymm, j])
theta_rad[i, j] = avgPt
theta_rad[isymm, j] = avgPt
# Make grid symmetrical to j = im/2 + 1
for j in range(c // 2):
jsymm = c - j
for i in range(1, c + 1):
avgPt = 0.5 * (lambda_rad[i, j] + lambda_rad[i, jsymm])
lambda_rad[i, j] = avgPt
lambda_rad[i, jsymm] = avgPt
avgPt = 0.5 * (theta_rad[i, j] - theta_rad[i, jsymm])
theta_rad[i, j] = avgPt
theta_rad[i, jsymm] = -avgPt
# Final correction
lambda_rad -= np.pi
llr, ttr = lambda_rad.copy(), theta_rad.copy()
#######################################################################
# MIRROR GRIDS
#######################################################################
new_xgrid = np.zeros((c + 1, c + 1, 6))
new_ygrid = np.zeros((c + 1, c + 1, 6))
xgrid = llr.copy()
ygrid = ttr.copy()
new_xgrid[..., 0] = xgrid.copy()
new_ygrid[..., 0] = ygrid.copy()
# radius = 6370.0e3
radius = 1.
for face in range(1, 6):
for j in range(c + 1):
for i in range(c + 1):
x = xgrid[i, j]
y = ygrid[i, j]
z = radius
if face == 1:
# Rotate about z only
new_xyz = rotate_sphere_3D(x, y, z, -np.pi / 2., 'z')
elif face == 2:
# Rotate about z, then x
temp_xyz = rotate_sphere_3D(x, y, z, -np.pi / 2., 'z')
x, y, z = temp_xyz[:]
new_xyz = rotate_sphere_3D(x, y, z, np.pi / 2., 'x')
elif face == 3:
temp_xyz = rotate_sphere_3D(x, y, z, np.pi, 'z')
x, y, z = temp_xyz[:]
new_xyz = rotate_sphere_3D(x, y, z, np.pi / 2., 'x')
if ((c % 2) != 0) and (j == c // 2 - 1):
print(i, j, face)
new_xyz = (np.pi, *new_xyz)
elif face == 4:
temp_xyz = rotate_sphere_3D(x, y, z, np.pi / 2., 'z')
x, y, z = temp_xyz[:]
new_xyz = rotate_sphere_3D(x, y, z, np.pi / 2., 'y')
elif face == 5:
temp_xyz = rotate_sphere_3D(x, y, z, np.pi / 2., 'y')
x, y, z = temp_xyz[:]
new_xyz = rotate_sphere_3D(x, y, z, 0., 'z')
# print((x, y, z), "\n", new_xyz, "\n" + "--"*40)
new_x, new_y, _ = new_xyz
new_xgrid[i, j, face] = new_x
new_ygrid[i, j, face] = new_y
lon_edge, lat_edge = new_xgrid.copy(), new_ygrid.copy()
#######################################################################
# CLEANUP GRID
#######################################################################
for i, j, f in product(range(c + 1), range(c + 1), range(6)):
new_lon = lon_edge[i, j, f]
if new_lon < 0:
new_lon += 2 * np.pi
if np.abs(new_lon) < 1e-10:
new_lon = 0.
lon_edge[i, j, f] = new_lon
if np.abs(lat_edge[i, j, f]) < 1e-10:
lat_edge[i, j, f] = 0.
lon_edge_deg = np.rad2deg(lon_edge)
lat_edge_deg = np.rad2deg(lat_edge)
#######################################################################
# COMPUTE CELL CENTROIDS
#######################################################################
lon_ctr = np.zeros((c, c, 6))
lat_ctr = np.zeros((c, c, 6))
xyz_ctr = np.zeros((3, c, c, 6))
xyz_edge = np.zeros((3, c + 1, c + 1, 6))
for f in range(6):
for i in range(c):
last_x = (i == (c - 1))
for j in range(c):
last_y = (j == (c - 1))
# Get the four corners
lat_corner = [
lat_edge[i, j, f],
lat_edge[i + 1, j, f],
lat_edge[i + 1, j + 1, f],
lat_edge[i, j + 1, f]]
lon_corner = [
lon_edge[i, j, f],
lon_edge[i + 1, j, f],
lon_edge[i + 1, j + 1, f],
lon_edge[i, j + 1, f]]
# Convert from lat-lon back to cartesian
xyz_corner = np.asarray(
vec_latlon_to_cartesian(
lon_corner, lat_corner))
# Store the edge information
xyz_edge[:, i, j, f] = xyz_corner[:, 0]
if last_x:
xyz_edge[:, i + 1, j, f] = xyz_corner[:, 1]
if last_x or last_y:
xyz_edge[:, i + 1, j + 1, f] = xyz_corner[:, 2]
if last_y:
xyz_edge[:, i, j + 1, f] = xyz_corner[:, 3]
e_mid = np.sum(xyz_corner, axis=1)
e_abs = np.sqrt(np.sum(e_mid * e_mid))
if e_abs > 0:
e_mid = e_mid / e_abs
xyz_ctr[:, i, j, f] = e_mid
_lon, _lat = cartesian_to_latlon(*e_mid)
lon_ctr[i, j, f] = _lon
lat_ctr[i, j, f] = _lat
lon_ctr_deg = np.rad2deg(lon_ctr)
lat_ctr_deg = np.rad2deg(lat_ctr)
if self.offset is not None:
lon_edge_deg += self.offset
lon_ctr_deg += self.offset
#######################################################################
# CACHE
#######################################################################
self.lon_center = lon_ctr_deg
self.lat_center = lat_ctr_deg
self.lon_edge = lon_edge_deg
self.lat_edge = lat_edge_deg
self.xyz_center = xyz_ctr
self.xyz_edge = xyz_edge
def latlon_to_cartesian(lon, lat):
""" Convert latitude/longitude coordinates along the unit sphere to cartesian
coordinates defined by a vector pointing from the sphere's center to its
surface.
This function was originally written by <NAME> and included
in package cubedsphere: https://github.com/JiaweiZhuang/cubedsphere
"""
x = np.cos(lat) * np.cos(lon)
y = np.cos(lat) * np.sin(lon)
z = np.sin(lat)
return x, y, z
vec_latlon_to_cartesian = np.vectorize(latlon_to_cartesian)
def cartesian_to_latlon(x, y, z, ret_xyz=False):
""" Convert a cartesian coordinate to latitude/longitude coordinates.
Optionally return the original cartesian coordinate as a tuple.
This function was originally written by <NAME> and included
in package cubedsphere: https://github.com/JiaweiZhuang/cubedsphere
"""
xyz = np.array([x, y, z])
vector_length = np.sqrt(np.sum(xyz * xyz, axis=0))
xyz /= vector_length
x, y, z = xyz
if (np.abs(x) + np.abs(y)) < 1e-20:
lon = 0.
else:
lon = np.arctan2(y, x)
if lon < 0.:
lon += 2 * np.pi
lat = np.arcsin(z)
# If not normalizing vector, take lat = np.arcsin(z/vector_length)
if ret_xyz:
return lon, lat, xyz
else:
return lon, lat
vec_cartesian_to_latlon = np.vectorize(cartesian_to_latlon)
def spherical_to_cartesian(theta, phi, r=1):
""" Convert spherical coordinates in the form (theta, phi[, r]) to
cartesian, with the origin at the center of the original spherical
coordinate system.
This function was originally written by <NAME> and included
in package cubedsphere: https://github.com/JiaweiZhuang/cubedsphere
"""
x = r * np.cos(phi) * np.cos(theta)
y = r * np.cos(phi) * np.sin(theta)
z = r * np.sin(phi)
return x, y, z
vec_spherical_to_cartesian = np.vectorize(spherical_to_cartesian)
def cartesian_to_spherical(x, y, z):
""" Convert cartesian coordinates to spherical in the form
(theta, phi[, r]) with the origin remaining at the center of the
original spherical coordinate system.
This function was originally written by <NAME> and included
in package cubedsphere: https://github.com/JiaweiZhuang/cubedsphere
"""
r = np.sqrt(x**2 + y**2 + z**2)
#theta = np.arccos(z / r)
theta = np.arctan2(y, x)
phi = np.arctan2(z, np.sqrt(x**2 + y**2))
# if np.abs(x) < 1e-16:
# phi = np.pi
# else:
# phi = np.arctan(y / x)
return theta, phi, r
vec_cartesian_to_spherical = np.vectorize(cartesian_to_spherical)
def rotate_sphere_3D(theta, phi, r, rot_ang, rot_axis='x'):
""" Rotate a spherical coordinate in the form (theta, phi[, r])
about the indicating axis, 'rot_axis'.
This method accomplishes the rotation by projecting to a
cartesian coordinate system and performing a solid body rotation
around the requested axis.
This function was originally written by <NAME> and included
in package cubedsphere: https://github.com/JiaweiZhuang/cubedsphere
"""
cos_ang = np.cos(rot_ang)
sin_ang = np.sin(rot_ang)
x, y, z = spherical_to_cartesian(theta, phi, r)
if rot_axis == 'x':
x_new = x
y_new = cos_ang * y + sin_ang * z
z_new = -sin_ang * y + cos_ang * z
elif rot_axis == 'y':
x_new = cos_ang * x - sin_ang * z
y_new = y
z_new = sin_ang * x + cos_ang * z
elif rot_axis == 'z':
x_new = cos_ang * x + sin_ang * y
y_new = -sin_ang * x + cos_ang * y
z_new = z
theta_new, phi_new, r_new = cartesian_to_spherical(x_new, y_new, z_new)
return theta_new, phi_new, r_new
|
52765
|
import pytest
import sys
sys.path.append(".")
sys.path.append("../.")
from boxdetect import config
from boxdetect import pipelines
def test_save_load_config(capsys):
cfg = config.PipelinesConfig()
cfg.morph_kernels_thickness = 10
cfg.save_yaml('test_cfg.yaml')
cfg2 = config.PipelinesConfig('test_cfg.yaml')
assert(cfg.__dict__ == cfg2.__dict__)
cfg.new_var = 10
cfg.save_yaml('test_cfg.yaml')
cfg2.load_yaml('test_cfg.yaml')
captured = capsys.readouterr()
assert("WARNING" in captured.out)
def test_update_num_iterations():
cfg = config.PipelinesConfig()
cfg.height_range = (5, 5)
cfg.width_range = [(10, 10), (20, 20)]
cfg.update_num_iterations()
assert(cfg.num_iterations == 2)
assert(len(cfg.height_range) == 2)
assert(len(cfg.width_range) == 2)
def test_autoconfig_simple():
box_sizes = [(42, 44), (41, 47), (41, 44), (41, 44), (125, 54), (92, 103)]
file_path = "tests/data/autoconfig_simple/dummy_example.png"
cfg = config.PipelinesConfig()
cfg.autoconfigure(box_sizes)
checkboxes = pipelines.get_checkboxes(
file_path, cfg=cfg, px_threshold=0.01, plot=False, verbose=False)
assert(len(checkboxes) == 12)
cfg = config.PipelinesConfig()
cfg.autoconfigure(box_sizes)
rects, groups, _, _ = pipelines.get_boxes(
file_path, cfg=cfg, plot=False)
assert(len(rects) == 23)
assert(len(groups) == 14)
def test_autoconfig_from_vott_simple():
vott_dir = "tests/data/autoconfig_simple"
file_path = "tests/data/autoconfig_simple/dummy_example.png"
cfg = config.PipelinesConfig()
cfg.autoconfigure_from_vott(vott_dir, class_tags=['box'])
checkboxes = pipelines.get_checkboxes(
file_path, cfg=cfg, px_threshold=0.01, plot=False, verbose=False)
assert(len(checkboxes) == 12)
cfg = config.PipelinesConfig()
cfg.autoconfigure_from_vott(vott_dir, class_tags=['box'])
rects, groups, _, _ = pipelines.get_boxes(
file_path, cfg=cfg, plot=False)
assert(len(rects) == 23)
assert(len(groups) == 14)
|
52782
|
import torch
import torch.nn as nn
def channel_split(z, dim=1, odd=False):
C = z.size(dim)
z0, z1 = torch.split(z, C // 2, dim=dim)
if odd:
z0, z1 = z1, z0
return z0, z1
def channel_merge(z0, z1, dim=1, odd=False):
if odd:
z0, z1 = z1, z0
z = torch.cat([z0, z1], dim=dim)
return z
def get_checker_mask(H, W, odd=False, device=None):
ix = torch.arange(W).to(device).long()
iy = torch.arange(H).to(device).long()
iy, ix = torch.meshgrid([iy, ix])
mod = 0 if odd else 1
mask = ((ix + iy) % 2 == mod).float()
mask = mask.view(1, 1, H, W)
return mask
def checker_split(z, odd=False):
assert z.dim() == 4
B, C, H, W = z.size()
z = z.view(B, C, H // 2, 2, W // 2, 2) # (B, C, sH, 2, sW, 2)
z = z.permute(0, 1, 3, 5, 2, 4).contiguous() # (B, C, 2, 2, sH, sW)
z = z.view(B, C * 4, H // 2, W // 2) # (B, C * 4, sH, sW)
za, zb, zc, zd = torch.split(z, C, dim=1)
z0 = torch.cat([za, zd], dim=1)
z1 = torch.cat([zb, zc], dim=1)
if odd:
z0, z1 = z1, z0
return z0, z1
def checker_merge(z0, z1, odd=False):
assert z0.dim() == 4 and z1.dim() == 4
B, C2, sH, sW = z0.size()
C = C2 // 2
if odd:
z0, z1 = z1, z0
za, zd = torch.split(z0, C, dim=1)
zb, zc = torch.split(z1, C, dim=1)
z = torch.cat([za, zb, zc, zd], dim=1)
z = z.view(B, C, 2, 2, sH, sW).permute(0, 1, 4, 2, 5, 3).contiguous()
z = z.view(B, C, sH * 2, sW * 2)
return z
def squeeze1d(z, odd=False):
assert z.dim() == 2
B, C = z.size()
z = z.view(B, C // 2, 2)
z0 = z[:, :, 0]
z1 = z[:, :, 1]
if odd:
z0, z1 = z1, z0
return z0, z1
def unsqueeze1d(z0, z1, odd=False):
assert z0.dim() == 2 and z1.dim() == 2
B, hC = z0.size()
if odd:
z0, z1 = z1, z0
z = torch.stack([z0, z1], dim=-1)
z = z.view(B, -1).contiguous()
return z
def squeeze2d(z, odd=False):
assert z.dim() == 4
B, C, H, W = z.size()
z = z.view(B, C, H // 2, 2, W // 2, 2) # (B, C, sH, 2, sW, 2)
z = z.permute(0, 1, 3, 5, 2, 4).contiguous() # (B, C, 2, 2, sH, sW)
z = z.view(B, C * 4, H // 2, W // 2) # (B, C * 4, sH, sW)
z0, z1 = torch.split(z, C * 2, dim=1)
if odd:
z0, z1 = z1, z0
return z0, z1
def unsqueeze2d(z0, z1, odd=False):
assert z0.dim() == 4 and z1.dim() == 4
B, C2, sH, sW = z0.size()
C = C2 // 2
if odd:
z0, z1 = z1, z0
z = torch.cat([z0, z1], dim=1)
z = z.view(B, C, 2, 2, sH, sW).permute(0, 1, 4, 2, 5, 3).contiguous()
z = z.view(B, C, sH * 2, sW * 2)
return z
class Squeeze1d(nn.Module):
"""
split 1D vector into two half-size vectors
by extracting entries alternatingly
"""
def __init__(self, odd=False):
super(Squeeze1d, self).__init__()
self.odd = odd
def forward(self, z, log_df_dz):
z0, z1 = squeeze1d(z, self.odd)
z = torch.cat([z0, z1], dim=1)
return z, log_df_dz
def backward(self, z, log_df_dz):
z0, z1 = torch.split(z, z.size(1) // 2, dim=1)
z = unsqueeze1d(z0, z1, self.odd)
return z, log_df_dz
class Unsqueeze1d(nn.Module):
"""
merge 1D vectors given by Squeeze1d
"""
def __init__(self, odd=False):
super(Unsqueeze1d, self).__init__()
self.odd = odd
def forward(self, z, log_df_dz):
z0, z1 = torch.split(z, z.size(1) // 2, dim=1)
z = unsqueeze1d(z0, z1, self.odd)
return z, log_df_dz
def backward(self, z, log_df_dz):
z0, z1 = squeeze1d(z, self.odd)
z = torch.cat([z0, z1], dim=1)
return z, log_df_dz
class Squeeze2d(nn.Module):
"""
split an 2D feature map into two maps by
extracting pixels using checkerboard pattern
"""
def __init__(self, odd=False):
super(Squeeze2d, self).__init__()
self.odd = odd
def forward(self, z, log_df_dz):
z0, z1 = squeeze2d(z, self.odd)
z = torch.cat([z0, z1], dim=1)
return z, log_df_dz
def backward(self, z, log_df_dz):
z0, z1 = torch.split(z, z.size(1) // 2, dim=1)
z = unsqueeze2d(z0, z1, self.odd)
return z, log_df_dz
class Unsqueeze2d(nn.Module):
"""
Merge two 2D feature maps given by Squeeze2d
"""
def __init__(self, odd=False):
super(Unsqueeze2d, self).__init__()
self.odd = odd
def forward(self, z, log_df_dz):
z0, z1 = torch.split(z, z.size(1) // 2, dim=1)
z = unsqueeze2d(z0, z1, self.odd)
return z, log_df_dz
def backward(self, z, log_df_dz):
z0, z1 = squeeze2d(z, self.odd)
z = torch.cat([z0, z1], dim=1)
return z, log_df_dz
|
52792
|
from __future__ import absolute_import
import os.path
import argparse
import logging
import json
from six import iteritems
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.externals import joblib
from keras.models import load_model
from tensorflow.python.client import device_lib
from utils import load_data, Embeds, Logger, Params, embed_aggregate, similarity
from features import catboost_features
from preprocessing import clean_text, convert_text2seq, split_data, parse_seq
from models import cnn, dense, rnn, TFIDF, CatBoost, save_predictions
from train import train
from metrics import get_metrics, print_metrics
def get_kwargs(kwargs):
parser = argparse.ArgumentParser(description='-f TRAIN_FILE -t TEST_FILE -o OUTPUT_FILE -e EMBEDS_FILE [-l LOGGER_FILE] [--swear-words SWEAR_FILE] [--wrong-words WRONG_WORDS_FILE] [--format-embeds FALSE]')
parser.add_argument('-f', '--train', dest='train', action='store', help='/path/to/trian_file', type=str)
parser.add_argument('-t', '--test', dest='test', action='store', help='/path/to/test_file', type=str)
parser.add_argument('-o', '--output', dest='output', action='store', help='/path/to/output_file', type=str)
parser.add_argument('-we', '--word_embeds', dest='word_embeds', action='store', help='/path/to/embeds_file', type=str)
parser.add_argument('-ce', '--char_embeds', dest='char_embeds', action='store', help='/path/to/embeds_file', type=str)
parser.add_argument('-c','--config', dest='config', action='store', help='/path/to/config.json', type=str)
parser.add_argument('-l', '--logger', dest='logger', action='store', help='/path/to/log_file', type=str, default=None)
parser.add_argument('--mode', dest='mode', action='store', help='preprocess / train / validate / all', type=str, default='all')
parser.add_argument('--max-words', dest='max_words', action='store', type=int, default=300000)
parser.add_argument('--use-only-exists-words', dest='use_only_exists_words', action='store_true')
parser.add_argument('--swear-words', dest='swear_words', action='store', help='/path/to/swear_words_file', type=str, default=None)
parser.add_argument('--wrong-words', dest='wrong_words', action='store', help='/path/to/wrong_words_file', type=str, default=None)
parser.add_argument('--format-embeds', dest='format_embeds', action='store', help='file | json | pickle | binary', type=str, default='raw')
parser.add_argument('--output-dir', dest='output_dir', action='store', help='/path/to/dir', type=str, default='.')
parser.add_argument('--norm-prob', dest='norm_prob', action='store_true')
parser.add_argument('--norm-prob-koef', dest='norm_prob_koef', action='store', type=float, default=1)
parser.add_argument('--gpus', dest='gpus', action='store', help='count GPUs', type=int, default=0)
for key, value in iteritems(parser.parse_args().__dict__):
kwargs[key] = value
def main(*kargs, **kwargs):
get_kwargs(kwargs)
train_fname = kwargs['train']
test_fname = kwargs['test']
result_fname = kwargs['output']
word_embeds_fname = kwargs['word_embeds']
char_embeds_fname = kwargs['char_embeds']
logger_fname = kwargs['logger']
mode = kwargs['mode']
max_words = kwargs['max_words']
use_only_exists_words = kwargs['use_only_exists_words']
swear_words_fname = kwargs['swear_words']
wrong_words_fname = kwargs['wrong_words']
embeds_format = kwargs['format_embeds']
config = kwargs['config']
output_dir = kwargs['output_dir']
norm_prob = kwargs['norm_prob']
norm_prob_koef = kwargs['norm_prob_koef']
gpus = kwargs['gpus']
seq_col_name_words = 'comment_seq_lw_use_exist{}_{}k'.format(int(use_only_exists_words), int(max_words/1000))
seq_col_name_ll3 = 'comment_seq_ll3_use_exist{}_{}k'.format(int(use_only_exists_words), int(max_words/1000))
model_file = {
'dense': os.path.join(output_dir, 'dense.h5'),
'cnn': os.path.join(output_dir, 'cnn.h5'),
'lstm': os.path.join(output_dir, 'lstm.h5'),
'lr': os.path.join(output_dir, '{}_logreg.bin'),
'catboost': os.path.join(output_dir, '{}_catboost.bin')
}
# ====Create logger====
logger = Logger(logging.getLogger(), logger_fname)
# ====Detect GPUs====
logger.debug(device_lib.list_local_devices())
# ====Load data====
logger.info('Loading data...')
train_df = load_data(train_fname)
test_df = load_data(test_fname)
target_labels = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
num_classes = len(target_labels)
# ====Load additional data====
logger.info('Loading additional data...')
swear_words = load_data(swear_words_fname, func=lambda x: set(x.T[0]), header=None)
wrong_words_dict = load_data(wrong_words_fname, func=lambda x: {val[0] : val[1] for val in x})
# ====Load word vectors====
logger.info('Loading embeddings...')
embeds_word = Embeds().load(word_embeds_fname, embeds_format)
embeds_ll3 = Embeds().load(char_embeds_fname, embeds_format)
# ====Clean texts====
if mode in ('preprocess', 'all'):
logger.info('Cleaning text...')
train_df['comment_text_clear'] = clean_text(train_df['comment_text'], wrong_words_dict, autocorrect=True)
test_df['comment_text_clear'] = clean_text(test_df['comment_text'], wrong_words_dict, autocorrect=True)
train_df.to_csv(os.path.join(output_dir, 'train_clear.csv'), index=False)
test_df.to_csv(os.path.join(output_dir, 'test_clear.csv'), index=False)
# ====Calculate maximum seq length====
logger.info('Calc text length...')
train_df.fillna('__NA__', inplace=True)
test_df.fillna('__NA__', inplace=True)
train_df['text_len'] = train_df['comment_text_clear'].apply(lambda words: len(words.split()))
test_df['text_len'] = test_df['comment_text_clear'].apply(lambda words: len(words.split()))
max_seq_len = np.round(train_df['text_len'].mean() + 3*train_df['text_len'].std()).astype(int)
max_char_seq_len = 2000 # empirical
logger.debug('Max seq length = {}'.format(max_seq_len))
# ====Prepare data to NN====
logger.info('Converting texts to sequences...')
if mode in ('preprocess', 'all'):
train_df[seq_col_name_words], test_df[seq_col_name_words], word_index, train_df[seq_col_name_ll3], test_df[seq_col_name_ll3], ll3_index = convert_text2seq(
train_df['comment_text_clear'].tolist(),
test_df['comment_text_clear'].tolist(),
max_words,
max_seq_len,
max_char_seq_len,
embeds_word,
lower=True,
oov_token='__<PASSWORD>',
uniq=False,
use_only_exists_words=use_only_exists_words)
logger.debug('Dictionary size use_exist{} = {}'.format(int(use_only_exists_words), len(word_index)))
logger.debug('Char dict size use_exist{} = {}'.format(int(use_only_exists_words), len(ll3_index)))
logger.info('Preparing embedding matrix...')
words_not_found = embeds_word.set_matrix(max_words, word_index)
embeds_ll3.matrix = np.random.normal(size=(len(ll3_index), embeds_word.shape[1]))
embeds_ll3.word_index = ll3_index
embeds_ll3.word_index_reverse = {val: key for key, val in ll3_index.items()}
embeds_ll3.shape = np.shape(embeds_ll3.matrix)
embeds_word.save(os.path.join(output_dir, 'wiki.embeds_lw.{}k'.format(int(max_words/1000))))
embeds_ll3.save(os.path.join(output_dir, 'wiki.embeds_ll3.{}k'.format(int(max_words/1000))))
# ====Get text vector====
pooling = {
'max': {'func': np.max},
'avg': {'func': np.sum, 'normalize': True},
'sum': {'func': np.sum, 'normalize': False}
}
for p in ['max', 'avg', 'sum']:
train_df['comment_vec_{}'.format(p)] = train_df[seq_col_name_words].apply(lambda x: embed_aggregate(x, embeds_word, **pooling[p]))
test_df['comment_vec_{}'.format(p)] = test_df[seq_col_name_words].apply(lambda x: embed_aggregate(x, embeds_word, **pooling[p]))
train_df.to_csv(os.path.join(output_dir, 'train_clear1.csv'), index=False)
test_df.to_csv(os.path.join(output_dir, 'test_clear1.csv'), index=False)
else:
for col in train_df.columns:
if col.startswith('comment_seq'):
train_df[col] = train_df[col].apply(lambda x: parse_seq(x, int))
test_df[col] = test_df[col].apply(lambda x: parse_seq(x, int))
elif col.startswith('comment_vec'):
train_df[col] = train_df[col].apply(lambda x: parse_seq(x, float))
test_df[col] = test_df[col].apply(lambda x: parse_seq(x, float))
logger.debug('Embedding matrix shape = {}'.format(embeds_word.shape))
logger.debug('Number of null word embeddings = {}'.format(np.sum(np.sum(embeds_word.matrix, axis=1) == 0)))
# ====END OF `PREPROCESS`====
if mode == 'preprocess':
return True
# ====Train/test split data====
x = np.array(train_df[seq_col_name_words].values.tolist())
y = np.array(train_df[target_labels].values.tolist())
x_train_nn, x_val_nn, y_train, y_val, train_idxs, val_idxs = split_data(x, y, test_size=0.2, shuffle=True, random_state=42)
x_test_nn = np.array(test_df[seq_col_name_words].values.tolist())
x_char = np.array(train_df[seq_col_name_ll3].values.tolist())
x_char_train_nn = x_char[train_idxs]
x_char_val_nn = x_char[val_idxs]
x_char_test_nn = np.array(test_df[seq_col_name_ll3].values.tolist())
x_train_tfidf = train_df['comment_text_clear'].values[train_idxs]
x_val_tfidf = train_df['comment_text_clear'].values[val_idxs]
x_test_tfidf = test_df['comment_text_clear'].values
catboost_cols = catboost_features(train_df, test_df)
x_train_cb = train_df[catboost_cols].values[train_idxs].T
x_val_cb = train_df[catboost_cols].values[val_idxs].T
x_test_cb = test_df[catboost_cols].values.T
# ====Train models====
nn_models = {
'cnn': cnn,
'dense': dense,
'rnn': rnn
}
params = Params(config)
metrics = {}
predictions = {}
for param in params['models']:
for model_label, model_params in param.items():
if model_params.get('common', {}).get('warm_start', False) and os.path.exists(model_params.get('common', {}).get('model_file', '')):
logger.info('{} warm starting...'.format(model_label))
model = load_model(model_params.get('common', {}).get('model_file', None))
elif model_label in nn_models:
model = nn_models[model_label](
embeds_word.matrix,
embeds_ll3.matrix,
num_classes,
max_seq_len,
max_char_seq_len,
gpus=gpus,
**model_params['init'])
model_alias = model_params.get('common', {}).get('alias', None)
if model_alias is None or not model_alias:
model_alias = '{}_{}'.format(model_label, i)
logger.info("training {} ...".format(model_label))
if model_label == 'dense':
x_tr = [x_train_nn, x_char_train_nn]
x_val = [x_val_nn, x_char_val_nn]
x_test = [x_test_nn, x_char_test_nn]
else:
x_tr = x_train_nn
x_val = x_val_nn
x_test = x_test_nn
hist = train(x_tr,
y_train,
model,
logger=logger,
**model_params['train'])
predictions[model_alias] = model.predict(x_val)
save_predictions(test_df, model.predict(x_test), target_labels, model_alias)
elif model_label == 'tfidf':
model = TFIDF(target_labels, **model_params['init'])
model.fit(x_train_tfidf, y_train, **model_params['train'])
predictions[model_alias] = model.predict(x_val_tfidf)
save_predictions(test_df, model.predict(x_test_tfidf), target_labels, model_alias)
elif model_label == 'catboost':
model = CatBoost(target_labels, **model_params['init'])
model.fit(x_train_cb, y_train, eval_set=(x_val_cb, y_val), use_best_model=True)
predictions[model_alias] = model.predict_proba(x_val_cb)
save_predictions(test_df, model.predict_proba(x_test_cb), target_labels, model_alias)
metrics[model_alias] = get_metrics(y_val, predictions[model_alias], target_labels)
logger.debug('{} params:\n{}'.format(model_alias, model_params))
logger.debug('{} metrics:\n{}'.format(model_alias, print_metrics(metrics[model_alias])))
model.save(os.path.join(output_dir, model_params['common']['model_file']))
logger.info('Saving metrics...')
with open(os.path.join(output_dir, 'metrics.json'), 'w') as f:
f.write(json.dumps(metrics))
# ====END OF `VALIDATE`====
if mode == 'validate':
return True
# Meta catboost
logger.info('training catboost as metamodel...')
x_meta = [predictions[model_alias] for model_alias in sorted(predictions.keys())]
x_meta = np.array(x_train_meta).T
x_train_meta, x_val_meta, y_train_meta, y_val_meta = train_test_split(x_meta, y_val, test_size=0.20, random_state=42)
meta_model = CatBoost(target_labels,
loss_function='Logloss',
iterations=1000,
depth=6,
learning_rate=0.03,
rsm=1
)
meta_model.fit(x_train_meta, y_train_meta, eval_set=(x_val_meta, y_val_meta), use_best_model=True)
y_hat_meta = meta_model.predict_proba(x_val_meta)
metrics_meta = get_metrics(y_val_meta, y_hat_meta, target_labels)
#model.save(os.path.join(output_dir, 'meta.catboost')
logger.debug('{} metrics:\n{}'.format('META', print_metrics(metrics_meta)))
# ====Predict====
logger.info('Applying models...')
test_cols = []
for model_alias in sorted(predictions.keys()):
for label in target_labels:
test_cols.append('{}_{}'.format(model_alias, label))
x_test = test_df[test_cols].values
preds = meta_model.predict_proba(x_test)
for i, label in enumerate(target_labels):
test_df[label] = preds[:, i]
# ====Normalize probabilities====
if norm_prob:
for label in target_labels:
test_df[label] = norm_prob_koef * test_df[label]
# ====Save results====
logger.info('Saving results...')
test_df[['id', 'toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']].to_csv(result_fname, index=False, header=True)
test_df.to_csv('{}_tmp'.format(result_fname), index=False, header=True)
if __name__=='__main__':
main()
|
52793
|
from django.conf.urls import url
from . import views
app_name = "restapi"
timestamp_regex = '\\d{4}[-]?\\d{1,2}[-]?\\d{1,2} \\d{1,2}:\\d{1,2}:\\d{1,2}'
urlpatterns = [
url(r'^about/$', views.AboutView.as_view(), name='about'),
url(r'^centralized-oracles/$', views.CentralizedOracleListView.as_view(), name='centralized-oracles'),
url(r'^centralized-oracles/(0x)?(?P<oracle_address>[a-fA-F0-9]+)/$', views.CentralizedOracleFetchView.as_view(), name='centralized-oracles-by-address'),
url(r'^events/$', views.EventListView.as_view(), name='events'),
url(r'^events/(0x)?(?P<event_address>[a-fA-F0-9]+)/$', views.EventFetchView.as_view(), name='events-by-address'),
url(r'^markets/$', views.MarketListView.as_view(), name='markets'),
url(r'^markets/(0x)?(?P<market_address>[a-fA-F0-9]+)/$', views.MarketFetchView.as_view(), name='markets-by-name'),
url(r'^markets/(0x)?(?P<market_address>[a-fA-F0-9]+)/shares/$', views.AllMarketSharesView.as_view(), name='all-shares'),
url(r'^markets/(0x)?(?P<market_address>[a-fA-F0-9]+)/shares/(0x)?(?P<owner_address>[a-fA-F0-9]+)/$', views.MarketSharesView.as_view(), name='shares-by-owner'),
url(r'^markets/(0x)?(?P<market_address>[a-fA-F0-9]+)/trades/$', views.MarketTradesView.as_view(), name='trades-by-market'),
url(r'^markets/(0x)?(?P<market_address>[a-fA-F0-9]+)/trades/(0x)?(?P<owner_address>[a-fA-F0-9]+)/$', views.MarketParticipantTradesView.as_view(), name='trades-by-owner'),
url(r'^account/(0x)?(?P<account_address>[a-fA-F0-9]+)/trades/$', views.AccountTradesView.as_view(), name='trades-by-account'),
url(r'^account/(0x)?(?P<account_address>[a-fA-F0-9]+)/shares/$', views.AccountSharesView.as_view(), name='shares-by-account'),
url(r'^factories/$', views.factories_view, name='factories'),
url(r'^scoreboard/$', views.ScoreboardView.as_view(), name='scoreboard'),
url(r'^scoreboard/(0x)?(?P<account_address>[a-fA-F0-9]+)$', views.ScoreboardUserView.as_view(), name='scoreboard'),
]
|
52808
|
from gekko import GEKKO
import numpy as np
import matplotlib.pyplot as plt
# generate training data
x = np.linspace(0.0,2*np.pi,20)
y = np.sin(x)
# option for fitting function
select = True # True / False
if select:
# Size with cosine function
nin = 1 # inputs
n1 = 1 # hidden layer 1 (linear)
n2 = 1 # hidden layer 2 (nonlinear)
n3 = 1 # hidden layer 3 (linear)
nout = 1 # outputs
else:
# Size with hyperbolic tangent function
nin = 1 # inputs
n1 = 2 # hidden layer 1 (linear)
n2 = 2 # hidden layer 2 (nonlinear)
n3 = 2 # hidden layer 3 (linear)
nout = 1 # outputs
# Initialize gekko
train = GEKKO()
test = GEKKO()
model = [train,test]
for m in model:
# input(s)
m.inpt = m.Param()
# layer 1
m.w1 = m.Array(m.FV, (nin,n1))
m.l1 = [m.Intermediate(m.w1[0,i]*m.inpt) for i in range(n1)]
# layer 2
m.w2a = m.Array(m.FV, (n1,n2))
m.w2b = m.Array(m.FV, (n1,n2))
if select:
m.l2 = [m.Intermediate(sum([m.cos(m.w2a[j,i]+m.w2b[j,i]*m.l1[j]) \
for j in range(n1)])) for i in range(n2)]
else:
m.l2 = [m.Intermediate(sum([m.tanh(m.w2a[j,i]+m.w2b[j,i]*m.l1[j]) \
for j in range(n1)])) for i in range(n2)]
# layer 3
m.w3 = m.Array(m.FV, (n2,n3))
m.l3 = [m.Intermediate(sum([m.w3[j,i]*m.l2[j] \
for j in range(n2)])) for i in range(n3)]
# output(s)
m.outpt = m.CV()
m.Equation(m.outpt==sum([m.l3[i] for i in range(n3)]))
# flatten matrices
m.w1 = m.w1.flatten()
m.w2a = m.w2a.flatten()
m.w2b = m.w2b.flatten()
m.w3 = m.w3.flatten()
# Fit parameter weights
m = train
m.inpt.value=x
m.outpt.value=y
m.outpt.FSTATUS = 1
for i in range(len(m.w1)):
m.w1[i].FSTATUS=1
m.w1[i].STATUS=1
m.w1[i].MEAS=1.0
for i in range(len(m.w2a)):
m.w2a[i].STATUS=1
m.w2b[i].STATUS=1
m.w2a[i].FSTATUS=1
m.w2b[i].FSTATUS=1
m.w2a[i].MEAS=1.0
m.w2b[i].MEAS=0.5
for i in range(len(m.w3)):
m.w3[i].FSTATUS=1
m.w3[i].STATUS=1
m.w3[i].MEAS=1.0
m.options.IMODE = 2
m.options.SOLVER = 3
m.options.EV_TYPE = 2
m.solve(disp=False)
# Test sample points
m = test
for i in range(len(m.w1)):
m.w1[i].MEAS=train.w1[i].NEWVAL
m.w1[i].FSTATUS = 1
print('w1['+str(i)+']: '+str(m.w1[i].MEAS))
for i in range(len(m.w2a)):
m.w2a[i].MEAS=train.w2a[i].NEWVAL
m.w2b[i].MEAS=train.w2b[i].NEWVAL
m.w2a[i].FSTATUS = 1
m.w2b[i].FSTATUS = 1
print('w2a['+str(i)+']: '+str(m.w2a[i].MEAS))
print('w2b['+str(i)+']: '+str(m.w2b[i].MEAS))
for i in range(len(m.w3)):
m.w3[i].MEAS=train.w3[i].NEWVAL
m.w3[i].FSTATUS = 1
print('w3['+str(i)+']: '+str(m.w3[i].MEAS))
m.inpt.value=np.linspace(-2*np.pi,4*np.pi,100)
m.options.IMODE = 2
m.options.SOLVER = 3
m.solve(disp=False)
plt.figure()
plt.plot(x,y,'bo',label='data')
plt.plot(test.inpt.value,test.outpt.value,'r-',label='predict')
plt.legend(loc='best')
plt.ylabel('y')
plt.xlabel('x')
plt.show()
|
52905
|
from setuptools import find_packages, setup
import re
# parse dyneusr/_version.py
try:
version_fn = 'dyneusr/_version.py'
with open(version_fn) as version_fd:
version = version_fd.read()
version_re = r"^__version__ = ['\"]([^'\"]*)['\"]"
version = re.findall(version_re, version, re.M)[0]
except:
raise RuntimeError("Unable to read version in {}.".format(version_fn))
# parse requirements.txt
with open('requirements.txt') as f:
install_requires = [_ for _ in f.read().split('\n')
if len(_) and _[0].isalpha()]
# parse README.md
with open('README.md') as f:
long_description = f.read()
# run setup
setup(
name='dyneusr',
version=version,
description='Dynamical Neural Spatiotemporal Representations.',
long_description=long_description,
long_description_content_type="text/markdown",
author='<NAME>',
author_email='<EMAIL>',
url='https://braindynamicslab.github.io/dyneusr',
license='BSD-3',
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
python_requires='>=3.6',
classifiers=[
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Visualization",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
keywords='brain dynamics, topology data analysis, neuroimaging, brain networks, mapper, visualization',
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.