content
stringlengths 5
1.05M
|
---|
from typer.testing import CliRunner
from cipher_typer.typer_cli import app
runner = CliRunner()
def test_app():
result = runner.invoke(app, ["encrypt", "Meet me at 6pm", "23"])
assert result.exit_code == 0
assert "+BBQwJBwxQwsMJ" in result.stdout
def test_app_decrypt():
result = runner.invoke(app, ["decrypt", "+BBQwJBwxQwsMJ", "23"])
assert result.exit_code == 0
assert "Meet me at 6pm" in result.stdout
def test_app_seed():
result = runner.invoke(
app, ["encrypt", "6000 feet underwater !?", "67", "--method", "seed"]
)
assert result.exit_code == 0
assert "%666}I##p}mw)#&Njp#&};r" in result.stdout
def test_app_seed_decrypt():
result = runner.invoke(
app, ["decrypt", "%666}I##p}mw)#&Njp#&};r", "67", "--method", "seed"]
)
assert result.exit_code == 0
assert "6000 feet underwater !?" in result.stdout
|
import os
import re
import requests
import datetime
import json
from joblib import Parallel, delayed
DATA_PATH = "/media/jerem/DATA/Eric/OpenScience/unpaywall_test/"
UNPAYWALL_SNAPSHOT_BASE_URL = \
"https://s3-us-west-2.amazonaws.com/unpaywall-data-snapshots/"
UNPAYWALL_SNAPSHOT_FILE = \
"unpaywall_snapshot_2018-06-21T164548_with_versions.jsonl.gz"
UNPAYWALL_SNAPSHOT_URL = UNPAYWALL_SNAPSHOT_BASE_URL + UNPAYWALL_SNAPSHOT_FILE
DATAESR_UNPAYWALL_DUMP = "http://0.0.0.0:5000/publications/dumps_unpaywall"
YEAR_START = 2017
YEAR_END = 2017
NB_JOBS = 4 # nb jobs in parrallel
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def normalize_doi(doi):
""" Remove / at the end of the doi and lower it """
doi_normalized = re.sub("(/){1,}$", "", doi.lower())
return doi_normalized
def post_unpaywall_data(d):
d['doi'] = normalize_doi(d['doi'])
d['treated'] = False
r = requests.post(DATAESR_UNPAYWALL_DUMP, json=d)
if r.ok is False:
print(r.text)
download_snapshot = False
# download and unzip the unpaywall snapshot
if download_snapshot:
os.system("mkdir -p {}".format(DATA_PATH))
print("start downloading and unzipping data ")
os.system("cd {0} && wget {1} && gunzip {2}".format(
DATA_PATH, UNPAYWALL_SNAPSHOT_URL, UNPAYWALL_SNAPSHOT_FILE))
print(" done !")
print()
split_snapshot = False
# split by year
if split_snapshot:
print("start filtering and splitting data ")
for year in range(YEAR_START, YEAR_END + 1):
os.system("mkdir -p {}{}".format(DATA_PATH, year))
fgrep_cmd = 'fgrep "\\"year\\": {}" {}'.format(
year, UNPAYWALL_SNAPSHOT_FILE.replace('.gz', ''))
# keeping lines with the wanted year
grep_year_cmd = "cd {} && ".format(DATA_PATH) + fgrep_cmd
grep_year_cmd += " > {0}{1}/unpaywall_{1}.json".format(DATA_PATH, year)
os.system(grep_year_cmd)
# splitting the file
split_cmd = "cd {0}{1} && split -l 100000 unpaywall_{1}.json".format(
DATA_PATH, year)
os.system(split_cmd)
print(" done !")
print()
# now loading the dump unpaywall data
for year in range(YEAR_START, YEAR_END + 1):
files = []
for x in os.listdir('{}{}'.format(DATA_PATH, year)):
if len(x) == 3 and x[0] == 'x':
files.append(x)
files = sorted(files)
for file_x in files:
start_time = datetime.datetime.now()
elts = []
with open('{}{}/{}'.format(DATA_PATH, year, file_x)) as f:
for line in f:
elts.append(json.loads(line))
elts_chunks = list(chunks(elts, 1000))
for i, sub_list in enumerate(elts_chunks):
Parallel(n_jobs=NB_JOBS)(
delayed(
post_unpaywall_data)(elt) for elt in sub_list)
end_time = datetime.datetime.now()
print("{} {}: {}".format(year, file_x, end_time - start_time))
|
x1 = int(input("Enter x1 value bellow: "))
x2 = int(input("Enter x2 value bellow: "))
y1 = int(input("Enter y1 value bellow: "))
y2 = int(input("Enter y2 value bellow: "))
def computeDistance(x1, x2, y1, y2):
#return math.sqrt((math.pow((x2 - x1), 2)) + (math.pow((y2 - y1), 2)))
return ((x2 - x1)**2 + (y2 - y1)**2)**0.5
print("Distance between points is: " + str(computeDistance(x1, x2, y1, y2))) |
import h5py
import numpy
import glob
import cinemasci.cdb
import os.path
class ascent():
def __init__(self):
self.outcdb = None
self.basename = None
self.dirname = None
self.cycles = []
self.minmax = {}
self.variables = []
#
# convert an ascent hdf5 data set of float images to a cinema database
#
# this function looks in the "indir" directory for files matching the pattern:
# <something>.cycle_<0 padded number>
#
def convert(self, indir, outcdb):
self.indir = indir
self.outcdb = outcdb
# initialize the cinema database
cdb = cinemasci.new("cdb", {"path": self.outcdb})
cdb.initialize()
self.__get_cycles()
self.__get_variable_ranges()
curextract = 0
data = {}
for c in self.cycles:
hfile = os.path.join(self.dirname, "{}.cycle_{}/domain_000000.hdf5".format(self.basename, c))
with h5py.File(hfile, "r") as bpf:
# shape
w = bpf["coordsets/coords/dims/i"][0] - 1
h = bpf["coordsets/coords/dims/j"][0] - 1
for v in self.variables:
# write the compressed data
data[v] = bpf.get("fields/{}/values".format(v))[...].reshape((w,h))
numpy.savez_compressed("{}/{}".format(self.outcdb, str(curextract).zfill(6)), data=data[v])
channel = v
if channel == 'depth':
channel = 'CISDepth'
# insert an entry in to the database
id = cdb.add_entry({'cycle': c,
'CISImage': 'cycle_{}'.format(c.zfill(6)),
'CISVersion': '1.0',
'CISOrigin': 'UL',
'CISImageWidth': w,
'CISImageHeight': h,
'CISLayer': 'layer0',
'CISLayerOffsetX': 0,
'CISLayerOffsetY': 0,
'CISLayerWidth': w,
'CISLayerHeight': h,
'CISChannel': channel,
'CISChannelVar': v,
'CISChannelVarType': 'float',
'CISChannelVarMin': self.minmax[v][0],
'CISChannelVarMax': self.minmax[v][1],
'FILE': '{}.npz'.format(str(curextract).zfill(6))
})
curextract = curextract + 1
cdb.finalize()
#
# run through the data, computing the min/max of all variables
#
def __get_variable_ranges(self):
for c in self.cycles:
hfile = os.path.join(self.dirname, "{}.cycle_{}/domain_000000.hdf5".format(self.basename, c))
with h5py.File(hfile, "r") as bpf:
w = bpf["coordsets/coords/dims/i"][0] - 1
h = bpf["coordsets/coords/dims/j"][0] - 1
# get the variable names
fields = bpf["fields"]
for val in fields.keys():
if not val == "ascent_ghosts":
if not val in self.variables:
self.variables.append(val)
# get the min/max variables
for v in self.variables:
data = bpf.get("fields/{}/values".format(v))[...].reshape((w,h))
if v in self.minmax:
vmin = numpy.nanmin(data)
vmax = numpy.nanmax(data)
if vmin < self.minmax[v][0]:
self.minmax[v][0] = vmin
if vmax > self.minmax[v][1]:
self.minmax[v][1] = vmax
else:
self.minmax[v] = [numpy.nanmin(data), numpy.nanmax(data)]
def __get_cycles(self):
cycles = []
hpaths = glob.glob("{}/*.cycle_[0-9]*.root".format(self.indir))
self.dirname = os.path.dirname(hpaths[0])
self.basename = os.path.basename(hpaths[0]).split('.')[0]
for p in hpaths:
split_path = p.split('_')
cycle = split_path[-1].split('.')
self.cycles.append(cycle[0])
self.cycles.sort()
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 CENATIC: Centro Nacional de Referencia de
# Aplicacion de las TIC basadas en Fuentes Abiertas, Spain.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# Neither the name of the CENATIC nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# You may contact the copyright holder at: Fundacion CENATIC, Edificio
# de Servicios Sociales: C/ Vistahermosa, 1, 3ra planta, 06200
# Almendralejo (Badajoz), Spain
import CTK
import Auth
import Bookmark
import Asset
APPEND_JS = """
$("#%s").val($.trim($("#%s").val() +' '+ $("#%s").val()));
"""
OPTIONS_JS = """
var renders = %s;
$('#%s').show().html(renders[$('#%s').val()]);
"""
class ComboTextField (CTK.Container):
def __init__ (self, props={}, options=[], option_renders={}):
CTK.Container.__init__ (self)
self.props = props.copy()
# Option View
box = CTK.Box({'id': 'info_%s' % self.id, 'class': 'no-see part_info'})
# TextField widget
text_props = self.props.copy()
text_props['class'] = '%s two_fields'%(text_props.get('class',''))
text = CTK.TextField(text_props)
# Combowidget
combo_props = self.props.copy()
combo_props['name'] = "%s_combo"%(props['name'])
combo = CTK.Combobox (combo_props, options)
combo.bind ('change', APPEND_JS % (text.id, text.id, combo.id))
combo.bind ('change', OPTIONS_JS % (str(option_renders), box.id, combo.id))
# Chooser
table = CTK.Table()
table[(1,1)] = text
table[(1,2)] = CTK.RawHTML (" ")
table[(1,3)] = combo
self += table
self += box
class PartChooser (ComboTextField):
def __init__ (self, props={}):
user_id = Auth.get_user_id()
bookmarks = Bookmark.get_user_bookmarks (user_id)
options = [('','--')]+[(b,'#%s'%b) for b in bookmarks]
props['selected'] = ''
renders = self._render_options (bookmarks)
ComboTextField.__init__ (self, props, options, renders)
def _render_options (self, bookmarks):
renders = {}
for asset_id in bookmarks:
try:
asset = Asset.Asset (asset_id)
diz = asset.get_diz()
diz['id'] = asset_id
info = CTK.RawHTML('<p><img src="%(thumb)s" /></p>'
'<p>Activo: #%(id)d</p>'
'<p>Título: %(title)s</p>'
'<p>Autor: %(creator)s</p>'
'<p>Licencia: %(license)s</p>'% (diz))
renders[asset_id] = info.Render().html
except IndexError:
pass
return renders
|
import asyncio
from nats.aio.client import Client as NATS
from nats.aio.errors import ErrTimeout, ErrNoServers
class Handler:
__instance = None
def __init__(self, servers, loop):
self.nc = NATS()
self.loop = loop
self.servers = servers
self.queue = asyncio.Queue()
if servers is None:
servers = ["nats://127.0.0.1:4222"]
async def error_cb(self, e):
print("Error:", e)
async def closed_cb(self):
print("Connection to NATS is closed.")
async def reconnected_cb(self):
print(f"Connected to NATS at {self.nc.connected_url.netloc}...")
async def connect(self):
if self.nc != None:
# Options to the nats connection
options = {
"loop": self.loop,
"error_cb": self.error_cb,
"closed_cb": self.closed_cb,
"reconnected_cb": self.reconnected_cb,
"servers": self.servers
}
# connect to nats server
await self.nc.connect(**options)
print("Connected to: " + self.servers)
async def disconnect(self):
if self.loop != None:
await self.nc.flush()
await self.nc.close()
self.loop.stop()
print("Connection to NATS is closed.")
async def subscribe(self, subject, cb):
if self.nc != None:
await self.nc.subscribe(subject, cb=cb)
print("Subscribed to: " + subject)
async def publish(self, subject, message):
if self.nc != None:
await nc.publish(subject, message)
print("Message: " + message + "published to: " + subject)
|
import argparse
import os
from itertools import chain
from datasets import load_dataset
from tokenizers import Tokenizer
from tokenizers.models import WordLevel
from tokenizers.pre_tokenizers import Whitespace
from tokenizers.processors import TemplateProcessing
from tokenizers.trainers import WordLevelTrainer
from process_data.utils import CURRENT_DATA_BASE, ORIGINAL_DATA_BASE, read_file
BASE_PATH = "/home/ming/malware/inst2vec_bert/bert/"
def parse_args():
parser = argparse.ArgumentParser(
description="Train a word level tokenizer for ASM_BERT"
)
parser.add_argument(
"--vocab_size",
type=int,
default=1016,
help="The size of vocabulary used to train the tokenizer.",
)
parser.add_argument(
"--padding_length",
type=int,
default=3,
help="The length will be padded to by the tokenizer.",
)
args = parser.parse_args()
return args
def train_tokenizer(args, dataset):
tokenizer = Tokenizer(WordLevel(unk_token="[UNK]"))
tokenizer.pre_tokenizer = Whitespace()
trainer = WordLevelTrainer(
vocab_size=args.vocab_size,
special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"],
)
tokenizer.train_from_iterator(dataset, trainer)
return tokenizer
def save_tokenizer(tokenizer, tokenizer_file):
tokenizer.save(tokenizer_file)
def load_tokenizer(tokenizer_file):
if not os.path.exists(tokenizer_file):
print("{} doesn't exist, will be retrained...".format(tokenizer_file))
return None
print("The tokenizer has already been trained.")
return Tokenizer.from_file(tokenizer_file)
def post_process(tokenizer):
tokenizer.post_processor = TemplateProcessing(
single="$A",
# pair="[CLS] $A [SEP] $B:1 [SEP]:1",
# special_tokens=[
# ("[CLS]", tokenizer.token_to_id("[CLS]")),
# ("[SEP]", tokenizer.token_to_id("[SEP]")),
# ],
)
return tokenizer
def tokenizer_encode(tokenizer, data):
return tokenizer.encode_batch(data)
def main(tokenizer_file=""):
args = parse_args()
tokenizer = load_tokenizer(tokenizer_file)
if tokenizer is not None:
return
# json_files = [
# os.path.join(CURRENT_DATA_BASE, "inst.1.{}.json".format(i)) for i in range(128)
# ]
# dataset = load_dataset("json", data_files=json_files, field="data")
text_files = [
os.path.join(CURRENT_DATA_BASE, "inst_of_block.{}".format(i)) for i in range(59)
]
dataset = []
for f in text_files:
tmp = read_file(f)
dataset += [block[:-1].split("\t") for block in tmp]
print("Get {} instructions".format(len(dataset)))
print("Trainging tokenizer...")
tokenizer = train_tokenizer(args, chain.from_iterable(dataset))
tokenizer = post_process(tokenizer)
tokenizer.enable_padding(
pad_id=tokenizer.token_to_id("[PAD]"),
pad_token="[PAD]",
length=args.padding_length,
)
save_tokenizer(tokenizer, tokenizer_file)
if __name__ == "__main__":
main(os.path.join(CURRENT_DATA_BASE, "tokenizer-inst.all.json"))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from itertools import permutations
import six
import numpy as np
import tensorflow as tf
from zhusuan.model.utils import *
from zhusuan.model.utils import Context
from tests.model._div_op import regular_div, floor_div
from tests.model._true_div_op import true_div
class TestContext(tf.test.TestCase):
def test_Context(self):
self.assertEqual(Context.get_contexts(), [])
with self.assertRaisesRegexp(RuntimeError, "No contexts on the stack"):
Context.get_context()
with Context() as context:
self.assertEqual(Context.get_contexts(), [context])
self.assertEqual(Context.get_context(), context)
with Context() as context_inner:
self.assertEqual(Context.get_contexts(),
[context, context_inner])
self.assertEqual(Context.get_context(), context_inner)
self.assertEqual(Context.get_contexts(), [context])
self.assertEqual(Context.get_context(), context)
self.assertEqual(Context.get_contexts(), [])
with self.assertRaisesRegexp(RuntimeError, "No contexts on the stack"):
Context.get_context()
class TestGetBackwardTensors(tf.test.TestCase):
def testGetBackwardOpsChain(self):
# a -> b -> c
a = tf.placeholder(tf.float32)
b = tf.sqrt(a)
c = tf.square(b)
for n in range(4):
for seed_tensors in permutations([a, b, c], n):
if c in seed_tensors:
truth = [a.op, b.op, c.op]
elif b in seed_tensors:
truth = [a.op, b.op]
elif a in seed_tensors:
truth = [a.op]
else:
truth = []
self.assertEqual(get_backward_ops(seed_tensors), truth)
self.assertEqual(get_backward_ops([c], treat_as_inputs=[b]), [c.op])
self.assertEqual(
get_backward_ops([b, c], treat_as_inputs=[b]), [c.op])
self.assertEqual(
get_backward_ops([a, c], treat_as_inputs=[b]), [a.op, c.op])
def testGetBackwardOpsSplit(self):
# a -> b -> c
# \-> d
a = tf.placeholder(tf.float32)
b = tf.exp(a)
c = tf.log(b)
d = tf.negative(b)
self.assertEqual(get_backward_ops([d]), [a.op, b.op, d.op])
self.assertEqual(get_backward_ops([c]), [a.op, b.op, c.op])
self.assertEqual(
get_backward_ops([c, d]), [a.op, b.op, c.op, d.op])
self.assertEqual(get_backward_ops([b, d]), [a.op, b.op, d.op])
self.assertEqual(get_backward_ops([a, d]), [a.op, b.op, d.op])
self.assertEqual(
get_backward_ops([c, d], treat_as_inputs=[b]), [c.op, d.op])
self.assertEqual(
get_backward_ops([c], treat_as_inputs=[d]), [a.op, b.op, c.op])
def testGetBackwardOpsMerge(self):
# a -> c -> d
# b ->/
a = tf.placeholder(tf.float32)
b = tf.constant(0, dtype=tf.int32)
c = tf.reduce_sum(a, reduction_indices=b)
d = tf.stop_gradient(c)
self.assertEqual(
get_backward_ops([d]), [a.op, b.op, c.op, d.op])
self.assertEqual(get_backward_ops([d], treat_as_inputs=[c]), [d.op])
self.assertEqual(
get_backward_ops([d], treat_as_inputs=[a]), [b.op, c.op, d.op])
def testGetBackwardOpsBridge(self):
# a -> b -> c -> d -> e
# \ --- /
a = tf.placeholder(tf.int32)
b = tf.identity(a)
c = tf.cast(b, tf.float32)
d = tf.tile(c, b)
e = tf.tanh(d)
self.assertEqual(
get_backward_ops([e]), [a.op, b.op, c.op, d.op, e.op])
self.assertEqual(get_backward_ops([c]), [a.op, b.op, c.op])
self.assertEqual(get_backward_ops([e], treat_as_inputs=[c]),
[a.op, b.op, d.op, e.op])
def testGetBackwardOpsControlDeps(self):
# a -> b - \
# c -> d - e
# \ /
# f
a = tf.placeholder(tf.float32, name='a')
b = tf.identity(a, name='b')
c = tf.placeholder(tf.float32, name='c')
d = tf.identity(c, name='d')
with tf.control_dependencies([b, d]):
e = tf.placeholder(tf.float32, name='e')
with tf.control_dependencies([e, d]):
f = tf.placeholder(tf.float32, name='f')
self.assertEqual(get_backward_ops([f]),
[a.op, b.op, c.op, d.op, e.op, f.op])
self.assertEqual(get_backward_ops([d, f]),
[c.op, d.op, a.op, b.op, e.op, f.op])
self.assertEqual(get_backward_ops([f], treat_as_inputs=[b]),
[a.op, b.op, c.op, d.op, e.op, f.op])
self.assertEqual(get_backward_ops([f], treat_as_inputs=[b, c]),
[a.op, b.op, d.op, e.op, f.op])
self.assertEqual(get_backward_ops([f], treat_as_inputs=[d, e]),
[a.op, b.op, c.op, d.op, e.op, f.op])
self.assertEqual(get_backward_ops([d, f], treat_as_inputs=[b]),
[c.op, d.op, a.op, b.op, e.op, f.op])
def test_get_backward_ops_control_flow(self):
# while_loop, scan, TensorArray
pass
class _SimpleTensor(TensorArithmeticMixin):
def __init__(self, value):
self.value = tf.convert_to_tensor(value)
@property
def dtype(self):
return self.value.dtype
def _to_tensor(value, dtype=None, name=None, as_ref=False):
if dtype and not dtype.is_compatible_with(value.dtype):
raise ValueError('Incompatible type conversion requested to type '
'%s for variable of type %s' %
(dtype.name, value.dtype.name))
if as_ref:
raise ValueError('%r: Ref type not supported.' % value)
return value.value
tf.register_tensor_conversion_function(_SimpleTensor, _to_tensor)
class ArithMixinTestCase(tf.test.TestCase):
def test_prerequisite(self):
if six.PY2:
self.assertAlmostEqual(regular_div(3, 2), 1)
self.assertAlmostEqual(regular_div(3.3, 1.6), 2.0625)
else:
self.assertAlmostEqual(regular_div(3, 2), 1.5)
self.assertAlmostEqual(regular_div(3.3, 1.6), 2.0625)
self.assertAlmostEqual(true_div(3, 2), 1.5)
self.assertAlmostEqual(true_div(3.3, 1.6), 2.0625)
self.assertAlmostEqual(floor_div(3, 2), 1)
self.assertAlmostEqual(floor_div(3.3, 1.6), 2.0)
def test_unary_op(self):
def check_op(name, func, x):
x_tensor = tf.convert_to_tensor(x)
ans = func(x_tensor)
res = tf.convert_to_tensor(func(_SimpleTensor(x_tensor)))
self.assertEqual(
res.dtype, ans.dtype,
msg='Result dtype does not match answer after unary operator '
'%s is applied: %r vs %r (x is %r).' %
(name, res.dtype, ans.dtype, x)
)
res_val = res.eval()
ans_val = ans.eval()
np.testing.assert_equal(
res_val, ans_val,
err_msg='Result value does not match answer after unary '
'operator %s is applied: %r vs %r (x is %r).' %
(name, res_val, ans_val, x)
)
with tf.Graph().as_default(), self.test_session(use_gpu=True):
int_data = np.asarray([1, -2, 3], dtype=np.int32)
float_data = np.asarray([1.1, -2.2, 3.3], dtype=np.float32)
bool_data = np.asarray([True, False, True], dtype=np.bool)
check_op('abs', abs, int_data)
check_op('abs', abs, float_data)
check_op('neg', (lambda v: -v), int_data)
check_op('neg', (lambda v: -v), float_data)
check_op('invert', (lambda v: ~v), bool_data)
def test_binary_op(self):
def check_op(name, func, x, y):
x_tensor = tf.convert_to_tensor(x)
y_tensor = tf.convert_to_tensor(y)
ans = func(x_tensor, y_tensor)
res_1 = tf.convert_to_tensor(
func(_SimpleTensor(x_tensor), y_tensor))
res_2 = tf.convert_to_tensor(
func(x_tensor, _SimpleTensor(y_tensor)))
res_3 = tf.convert_to_tensor(
func(_SimpleTensor(x_tensor), _SimpleTensor(y_tensor)))
for tag, res in [('left', res_1), ('right', res_2),
('both', res_3)]:
self.assertEqual(
res.dtype, ans.dtype,
msg='Result dtype does not match answer after %s binary '
'operator %s is applied: %r vs %r (x is %r, y is %r).'
% (tag, name, res.dtype, ans.dtype, x, y)
)
res_val = res.eval()
ans_val = ans.eval()
np.testing.assert_equal(
res_val, ans_val,
err_msg='Result value does not match answer after %s '
'binary operator %s is applied: %r vs %r '
'(x is %r, y is %r).' %
(tag, name, res_val, ans_val, x, y)
)
def run_ops(x, y, ops):
for name, func in six.iteritems(ops):
check_op(name, func, x, y)
arith_ops = {
'add': lambda x, y: x + y,
'sub': lambda x, y: x - y,
'mul': lambda x, y: x * y,
'div': regular_div,
'truediv': true_div,
'floordiv': floor_div,
'mod': lambda x, y: x % y,
}
logical_ops = {
'and': lambda x, y: x & y,
'or': lambda x, y: x | y,
'xor': lambda x, y: x ^ y,
}
relation_ops = {
'lt': lambda x, y: x < y,
'le': lambda x, y: x <= y,
'gt': lambda x, y: x > y,
'ge': lambda x, y: x >= y,
}
with tf.Graph().as_default(), self.test_session(use_gpu=True):
# arithmetic operators
run_ops(np.asarray([-4, 5, 6], dtype=np.int32),
np.asarray([1, -2, 3], dtype=np.int32),
arith_ops)
run_ops(np.asarray([-4.4, 5.5, 6.6], dtype=np.float32),
np.asarray([1.1, -2.2, 3.3], dtype=np.float32),
arith_ops)
# it seems that tf.pow(x, y) does not support negative integers
# yet, so we individually test this operator here.
check_op('pow',
(lambda x, y: x ** y),
np.asarray([-4, 5, 6], dtype=np.int32),
np.asarray([1, 2, 3], dtype=np.int32))
check_op('pow',
(lambda x, y: x ** y),
np.asarray([-4.4, 5.5, 6.6], dtype=np.float32),
np.asarray([1.1, -2.2, 3.3], dtype=np.float32))
# logical operators
run_ops(np.asarray([True, False, True, False], dtype=np.bool),
np.asarray([True, True, False, False], dtype=np.bool),
logical_ops)
# relation operators
run_ops(np.asarray([1, -2, 3, -4, 5, 6, -4, 5, 6], dtype=np.int32),
np.asarray([1, -2, 3, 1, -2, 3, -4, 5, 6], dtype=np.int32),
relation_ops)
run_ops(
np.asarray([1.1, -2.2, 3.3, -4.4, 5.5, 6.6, -4.4, 5.5, 6.6],
dtype=np.float32),
np.asarray([1.1, -2.2, 3.3, 1.1, -2.2, 3.3, -4.4, 5.5, 6.6],
dtype=np.float32),
relation_ops
)
def test_getitem(self):
def check_getitem(x, y, xx, yy):
ans = tf.convert_to_tensor(x[y])
res = xx[yy]
self.assertEqual(
res.dtype, ans.dtype,
msg='Result dtype does not match answer after getitem '
'is applied: %r vs %r (x is %r, y is %r, xx is %r, '
'yy is %r).' % (res.dtype, ans.dtype, x, y, xx, yy)
)
res_val = res.eval()
ans_val = ans.eval()
np.testing.assert_equal(
res_val, ans_val,
err_msg='Result value does not match answer after '
'getitem is applied: %r vs %r (x is %r, y is %r, '
'xx is %r, yy is %r).' %
(res_val, ans_val, x, y, xx, yy)
)
class _SliceGenerator(object):
def __getitem__(self, item):
return item
sg = _SliceGenerator()
with tf.Graph().as_default(), self.test_session(use_gpu=True):
data = np.asarray([1, 2, 3, 4, 5, 6, 7, 8], dtype=np.int32)
indices_or_slices = [
0,
-1,
# TensorFlow has not supported array index yet.
# np.asarray([0, 3, 2, 6], dtype=np.int32),
# np.asarray([-1, -2, -3], dtype=np.int32),
sg[0:],
sg[:1],
sg[:: 2],
sg[-1:],
sg[: -1],
sg[:: -1],
]
for s in indices_or_slices:
x_tensor = tf.convert_to_tensor(data)
x_simple_tensor = _SimpleTensor(x_tensor)
check_getitem(data, s, x_simple_tensor, s)
if not isinstance(s, slice):
y_tensor = tf.convert_to_tensor(s)
y_simple_tensor = _SimpleTensor(y_tensor)
check_getitem(data, s, x_simple_tensor, y_tensor)
check_getitem(data, s, x_simple_tensor, y_simple_tensor)
check_getitem(data, s, x_tensor, y_simple_tensor)
|
# This is a automatically generated installer script for FontLab
# Generated on %(timeStamp)s
# scriptVersion: %(scriptVersion)s
p = "%(appLogPath)s"
log(p,"-" * 50)
log(p,"hello, this is application: %(appName)s")
log(p,"Running script version: %(scriptVersion)s")
log(p,"Platform:"+platform.platform())
resultData = []
resultData.append("application:\t%(appName)s")
print "I will log to", "%(appLogPath)s"
print "I will save my results to", "%(resultsPath)s"
print "I will test for these modules:", "%(moduleData)s"
from FL import *
log(p,"FontLab version "+fl.version)
resultData.append("version:\t"+fl.version)
installedAlready = False
for moduleName, data in %(moduleData)s.items():
print "---", moduleName, data
try:
print "---", __import__(moduleName, globals(), locals(), [], -1)
resultData.append("found:\t"+moduleName)
except ImportError:
resultData.append("mustLoad:\t"+moduleName)
sitePackagesCandidates = findSitePackages()
for candidate in findSitePackages():
resultData.append("path:\t"+candidate)
log(p,"site-packages found at: "+candidate)
f = open("%(resultsPath)s", 'wb')
f.write("\n".join(resultData))
f.close()
log(p,'done!') |
__author__ = 'guorongxu'
import os
import re
## Parsing the go term file and return GO ID list with descriptions.
def parse_correlation_file(input_file):
node_hash = {}
if os.path.exists(input_file):
with open(input_file) as fp:
lines = fp.readlines()
for line in lines:
fields = re.split(r'\t', line)
node_0 = fields[0] + "+g"
node_1 = fields[1] + "+g"
if node_0 in node_hash:
node_list = node_hash.get(node_0)
node_list.append([fields[1], fields[2], fields[3][:-1], "g"])
else:
node_list = []
node_list.append([fields[1], fields[2], fields[3][:-1], "g"])
node_hash.update({node_0:node_list})
if node_1 in node_hash:
node_list = node_hash.get(node_1)
node_list.append([fields[0], fields[2], fields[3][:-1], "g"])
else:
node_list = []
node_list.append([fields[0], fields[2], fields[3][:-1], "g"])
node_hash.update({node_1:node_list})
fp.closed
return node_hash
## Main entry
if __name__ == "__main__":
workspace = "/Users/guorongxu/Desktop/SearchEngine"
data_set = "TCGA"
tumor_type = "LIHC"
parse_correlation_file(workspace, data_set, tumor_type) |
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the resource propagation aspect."""
load(
"@bazel_skylib//lib:paths.bzl",
"paths",
)
load(
"@bazel_skylib//lib:partial.bzl",
"partial",
)
load(
"@build_bazel_rules_apple//common:define_utils.bzl",
"define_utils",
)
load(
"@build_bazel_rules_apple//common:path_utils.bzl",
"path_utils",
)
load(
"@build_bazel_rules_apple//apple/internal:experimental.bzl",
"is_experimental_bundling_enabled",
)
load(
"@build_bazel_rules_apple//apple/internal:resources.bzl",
"NewAppleResourceInfo",
"resources",
)
load(
"@build_bazel_rules_swift//swift:swift.bzl",
"SwiftInfo",
)
# List of native resource attributes to use to collect by default. This list should dissapear in the
# long term; objc_library will remove the resource specific attributes and the native rules (that
# have these attributes) will dissapear. The new resource rules will either have specific attributes
# or use data, but in any of those cases, this list won't be used as if there are specific
# attributes, we will not merge them to split them again.
_NATIVE_RESOURCE_ATTRS = [
"asset_catalogs",
"datamodels",
"resources",
"storyboards",
"strings",
"xibs",
]
def _structured_resources_parent_dir(resource, parent_dir):
"""Returns the package relative path for the parent directory of a resource.
Args:
resource: The resource for which to calculate the package relative path.
Returns:
The package relative path to the parent directory of the resource.
"""
package_relative = path_utils.owner_relative_path(resource)
path = paths.dirname(package_relative).rstrip("/")
return paths.join(parent_dir or "", path or "") or None
def _bundle_relative_parent_dir(resource, extension):
"""Returns the bundle relative path to the resource rooted at the bundle.
Looks for the first instance of a folder with the suffix specified by `extension`, and then
returns the directory path to the file within the bundle. For example, for a resource with path
my/package/Contents.bundle/directory/foo.txt and `extension` equal to `"bundle"`, it would
return Contents.bundle/directory.
Args:
resource: The resource for which to calculate the bundle relative path.
extension: The bundle extension to use when finding the relative path.
Returns:
The bundle relative path, rooted at the outermost bundle.
"""
bundle_path = path_utils.farthest_directory_matching(resource.short_path, extension)
bundle_relative_path = paths.relativize(resource.short_path, bundle_path)
parent_dir = paths.basename(bundle_path)
bundle_relative_dir = paths.dirname(bundle_relative_path).strip("/")
if bundle_relative_dir:
parent_dir = paths.join(parent_dir, bundle_relative_dir)
return parent_dir
def _apple_resource_aspect_impl(target, ctx):
"""Implementation of the resource propation aspect."""
# Kill switch to disable the aspect unless explicitly required.
if not is_experimental_bundling_enabled(ctx):
return []
# If the target already propagates a NewAppleResourceInfo, do nothing.
if NewAppleResourceInfo in target:
return []
providers = []
bucketize_args = {}
collect_args = {}
# Owner to attach to the resources as they're being bucketed.
owner = None
if ctx.rule.kind == "objc_bundle":
bucketize_args["parent_dir_param"] = partial.make(
_bundle_relative_parent_dir,
extension = "bundle",
)
collect_args["res_attrs"] = ["bundle_imports"]
elif ctx.rule.kind == "objc_bundle_library":
parent_dir_param = "%s.bundle" % ctx.label.name
bucketize_args["parent_dir_param"] = parent_dir_param
collect_args["res_attrs"] = _NATIVE_RESOURCE_ATTRS
# Collect the specified infoplists that should be merged together. The replacement for
# objc_bundle_library should handle it within its implementation.
plists = resources.collect(ctx.rule.attr, res_attrs = ["infoplist", "infoplists"])
plist_provider = resources.bucketize_typed(
plists,
bucket_type = "infoplists",
parent_dir_param = parent_dir_param,
)
providers.append(plist_provider)
# Nest bundles added through the bundles attribute in objc_bundle_library.
if ctx.rule.attr.bundles:
bundle_merged_provider = resources.merge_providers(
[x[NewAppleResourceInfo] for x in ctx.rule.attr.bundles],
)
providers.append(resources.nest_bundles(bundle_merged_provider, parent_dir_param))
elif ctx.rule.kind == "objc_library":
collect_args["res_attrs"] = _NATIVE_RESOURCE_ATTRS
# Only set objc_library targets as owners if they have srcs, non_arc_srcs or deps. This
# treats objc_library targets without sources as resource aggregators.
if ctx.rule.attr.srcs or ctx.rule.attr.non_arc_srcs or ctx.rule.attr.deps:
owner = str(ctx.label)
# Collect objc_library's bundles dependencies and propagate them.
providers.extend([
x[NewAppleResourceInfo]
for x in ctx.rule.attr.bundles
])
elif ctx.rule.kind == "swift_library":
bucketize_args["swift_module"] = target[SwiftInfo].module_name
collect_args["res_attrs"] = ["resources"]
owner = str(ctx.label)
elif ctx.rule.kind == "apple_binary" or ctx.rule.kind == "apple_stub_binary":
# Set the binary targets as the default_owner to avoid losing ownership information when
# aggregating dependencies resources that have an owners on one branch, and that don't have
# an owner on another branch. When rules_apple stops using apple_binary intermediaries this
# should be removed as there would not be an intermediate aggregator.
owner = str(ctx.label)
elif apple_common.Objc in target:
# TODO(kaipi): Clean up usages of the ObjcProvider as means to propagate resources, then
# remove this case.
if hasattr(target[apple_common.Objc], "merge_zip"):
merge_zips = target[apple_common.Objc].merge_zip.to_list()
merge_zips_provider = resources.bucketize_typed(
merge_zips,
bucket_type = "resource_zips",
)
providers.append(merge_zips_provider)
# Collect all resource files related to this target.
files = resources.collect(ctx.rule.attr, **collect_args)
if files:
providers.append(
resources.bucketize(files, owner = owner, **bucketize_args),
)
# If the target has structured_resources, we need to process them with a different
# parent_dir_param
if hasattr(ctx.rule.attr, "structured_resources"):
if ctx.rule.attr.structured_resources:
# TODO(kaipi): Validate that structured_resources doesn't have processable resources,
# e.g. we shouldn't accept xib files that should be compiled before bundling.
structured_files = resources.collect(
ctx.rule.attr,
res_attrs = ["structured_resources"],
)
if ctx.rule.kind == "objc_bundle_library":
# TODO(kaipi): Once we remove the native objc_bundle_library, there won't be a need
# for repeating the bundle name here.
structured_parent_dir = "%s.bundle" % ctx.label.name
else:
structured_parent_dir = None
# Avoid processing PNG files that are referenced through the structured_resources
# attribute. This is mostly for legacy reasons and should get cleaned up in the future.
providers.append(
resources.bucketize(
structured_files,
owner = owner,
parent_dir_param = partial.make(
_structured_resources_parent_dir,
parent_dir = structured_parent_dir,
),
avoid_buckets = ["pngs"],
),
)
# Get the providers from dependencies.
# TODO(kaipi): Add data here once we propagate resources through that attribute.
for attr in ["deps"]:
if hasattr(ctx.rule.attr, attr):
providers.extend([
x[NewAppleResourceInfo]
for x in getattr(ctx.rule.attr, attr)
if NewAppleResourceInfo in x
])
if providers:
# If any providers were collected, merge them.
return [resources.merge_providers(providers, default_owner = owner)]
return []
apple_resource_aspect = aspect(
implementation = _apple_resource_aspect_impl,
# TODO(kaipi): The aspect should also propagate through the data attribute.
attr_aspects = ["bundles", "deps"],
doc = """Aspect that collects and propagates resource information to be bundled by a top-level
bundling rule.""",
)
|
"""CNN-based text classification on SageMaker with TensorFlow and Keras"""
# Python Built-Ins:
import argparse
import os
# External Dependencies:
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Conv1D, Dense, Dropout, Embedding, Flatten, MaxPooling1D
from tensorflow.keras.models import Sequential
###### Helper functions ############
def load_training_data(base_dir):
X_train = np.load(os.path.join(base_dir, "train_X.npy"))
y_train = np.load(os.path.join(base_dir, "train_Y.npy"))
return X_train, y_train
def load_testing_data(base_dir):
X_test = np.load(os.path.join(base_dir, "test_X.npy"))
y_test = np.load(os.path.join(base_dir, "test_Y.npy"))
return X_test, y_test
def load_embeddings(base_dir):
embedding_matrix = np.load(os.path.join(base_dir, "docs-embedding-matrix.npy"))
return embedding_matrix
def parse_args():
"""Acquire hyperparameters and directory locations passed by SageMaker"""
parser = argparse.ArgumentParser()
# Hyperparameters sent by the client are passed as command-line arguments to the script.
parser.add_argument("--epochs", type=int, default=1)
parser.add_argument("--learning_rate", type=float, default=0.001)
parser.add_argument("--num_classes", type=int, default=4)
parser.add_argument("--max_seq_len", type=int, default=40)
# Data, model, and output directories
parser.add_argument("--output-data-dir", type=str, default=os.environ.get("SM_OUTPUT_DATA_DIR"))
parser.add_argument("--model-dir", type=str, default=os.environ.get("SM_MODEL_DIR"))
parser.add_argument("--train", type=str, default=os.environ.get("SM_CHANNEL_TRAIN"))
parser.add_argument("--test", type=str, default=os.environ.get("SM_CHANNEL_TEST"))
parser.add_argument("--embeddings", type=str, default=os.environ.get("SM_CHANNEL_EMBEDDINGS"))
return parser.parse_known_args()
###### Main application ############
if __name__ == "__main__":
###### Parse input arguments ############
args, unknown = parse_args()
print(args)
###### Load data from input channels ############
X_train, y_train = load_training_data(args.train)
X_test, y_test = load_testing_data(args.test)
embedding_matrix = load_embeddings(args.embeddings)
###### Setup model architecture ############
model = Sequential()
model.add(Embedding(
embedding_matrix.shape[0], # Final vocabulary size
embedding_matrix.shape[1], # Word vector dimensions
weights=[embedding_matrix],
input_length=args.max_seq_len,
trainable=False,
name="embed",
))
model.add(Conv1D(filters=128, kernel_size=3, activation="relu", name="conv_1"))
model.add(MaxPooling1D(pool_size=5, name="maxpool_1"))
model.add(Flatten(name="flat_1"))
model.add(Dropout(0.3, name="dropout_1"))
model.add(Dense(128, activation="relu", name="dense_1"))
model.add(Dense(args.num_classes, activation="softmax", name="out_1"))
###### Compile the model ############
optimizer = tf.keras.optimizers.RMSprop(learning_rate=args.learning_rate)
model.compile(optimizer=optimizer, loss="binary_crossentropy", metrics=["acc"])
model.summary()
print("Training model")
model.fit(X_train, y_train, batch_size=16, epochs=args.epochs, verbose=2)
print("Evaluating model")
# TODO: Better differentiate train vs val loss in logs
scores = model.evaluate(X_test, y_test, verbose=2)
print(
"Validation results: "
+ "; ".join(map(
lambda i: f"{model.metrics_names[i]}={scores[i]:.5f}", range(len(model.metrics_names))
))
)
###### Save Keras model for TensorFlow Serving ############
print(f"------ save model to {os.path.join(args.model_dir, 'model/1/')}")
model.save(os.path.join(args.model_dir, "model/1"))
|
"""
We have a collection of stones, each stone has a positive integer weight.
Each turn, we choose the two heaviest stones and smash them together. Suppose the stones have weights x and y
with x <= y. The result of this smash is:
If x == y, both stones are totally destroyed;
If x != y, the stone of weight x is totally destroyed, and the stone of weight y has new weight y-x.
At the end, there is at most 1 stone left. Return the weight of this stone (or 0 if there are no stones left.)
Example 1:
Input: [2,7,4,1,8,1]
Output: 1
Explanation:
We combine 7 and 8 to get 1 so the array converts to [2,4,1,1,1] then,
we combine 2 and 4 to get 2 so the array converts to [2,1,1,1] then,
we combine 2 and 1 to get 1 so the array converts to [1,1,1] then,
we combine 1 and 1 to get 0 so the array converts to [1] then that's the value of last stone.
Note:
1. 1 <= stones.length <= 30
2. 1 <= stones[i] <= 1000
"""
from heapq import heappush, heappop
class Solution:
def lastStoneWeight(self, stones) -> int:
if len(stones) == 1:
return stones[0]
minheap = []
for x in stones:
heappush(minheap, -x)
while len(minheap) > 1:
heappush(minheap, heappop(minheap) - heappop(minheap))
return -minheap[0] if minheap else 0
def lastStoneWeight2(self, stones) -> int:
"""
without the module heapq
"""
while stones:
if len(stones) == 1:
return stones[0]
stones.sort()
stones.append(stones.pop() - stones.pop())
return 0
|
"""
Aula - 20
- Interactive Help
- docstrings
- Argumentos/ Parametros opcionais
- Escopo de variáveis
- Retorno de resultados
"""
# Interactive Help
# help(print)
# help(int)
# print(input.__doc__)
######################################################
# Docstrings
def contador(i, f, p):
"""
-> Faz uma contagem e mostra na tela
:param i: inicio da contagem
:param f: fim da contagem
:param p: passos da contagem
:return: sem retorno
"""
c = i
while c <= f:
print(f'{c} ', end='')
c = c + p
print('FIM!')
contador(1, 10, 1)
######################################################
# Argumentos/ Parametros opcionais
def somar(a=0, b=0, c=0):
"""
-> Faz a soma de três valores e mostra o resultado na tela.
:param a: primeiro valor
:param b: segundo valor
:param c: terceiro valor
:return: sem retorno
"""
s = a + b + c
print(f'A soma vale {s}')
somar(3, 2, 5)
somar(3, 2)
somar()
######################################################
# Escopo de Variáveis(Local ou Global)
def teste():
x = 8
print(f'Na função teste, n vale {n}')
print(f'Na função teste, x vale {x}')
n = 2
print(f'No programa principal, n vale {n}')
teste()
# print(f'No programa principal, x vale {x}') -> NameError: Não definido, x criado localmente na função
######################################################
# Retorno de resultados
def soma(a=0, b=0, c=0):
s = a + b + c
return s
resp1 = soma(3, 2, 5)
resp2 = soma(1, 7)
resp3 = soma(4)
print(f'Meus cálculos deram {resp1}, {resp2}, {resp3}.')
|
from setuptools import setup, find_packages
import pyTGA
if __name__ == "__main__":
setup(
name="pyTGA",
version=pyTGA.VERSION,
description='A pure Python module to manage TGA images',
long_description='A pure Python module to manage TGA images',
# Author details
author='Mirco Tracolli',
author_email='[email protected]',
url='https://github.com/MircoT/pyTGA',
download_url='https://github.com/MircoT/pyTGA/releases/latest',
# Choose your license
license='MIT',
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
],
keywords='tga image development',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=[],
extras_require={},
package_data={},
data_files=[],
entry_points={}
)
|
"""Class to extract metadata from a Data Table"""
import getpass
import psycopg2
import psycopg2.extras
from psycopg2 import sql
from . import settings
from . import extract_metadata_helper
class ExtractMetadata():
"""Class to extract metadata from a Data Table."""
def __init__(self, data_table_id):
"""Set Data Table ID and connect to database.
Args:
data_table_id (int): ID associated with this Data Table.
"""
self.data_table_id = data_table_id
self.metabase_connection_string = settings.metabase_connection_string
self.data_conn = psycopg2.connect(settings.data_connection_string)
self.data_conn.autocommit = True
self.data_cur = self.data_conn.cursor()
def process_table(self, categorical_threshold=10, type_overrides={},
date_format_dict={}):
"""Update the metabase with metadata from this Data Table."""
with psycopg2.connect(self.metabase_connection_string) as conn:
with conn.cursor() as cursor:
schema_name, table_name = self.__get_table_name(cursor)
self._get_table_level_metadata(cursor, schema_name, table_name)
self._get_column_level_metadata(
cursor,
schema_name,
table_name,
categorical_threshold,
type_overrides,
date_format_dict,
)
self.data_cur.close()
self.data_conn.close()
def _get_table_level_metadata(self, metabase_cur, schema_name, table_name):
"""Extract table level metadata and store it in the metabase.
Extract table level metadata (number of rows, number of columns and
file size (table size)) and store it in DataTable. Also set updated by
and date last updated.
Size is in bytes
"""
self.data_cur.execute(
sql.SQL('SELECT COUNT(*) as n_rows FROM {}.{};').format(
sql.Identifier(schema_name),
sql.Identifier(table_name),
)
)
n_rows = self.data_cur.fetchone()[0]
self.data_cur.execute(
sql.SQL("""
SELECT COUNT(*)
FROM INFORMATION_SCHEMA.COLUMNS
WHERE
TABLE_SCHEMA = %s
AND TABLE_NAME = %s
"""),
[schema_name, table_name]
)
n_cols = self.data_cur.fetchone()[0]
self.data_cur.execute(
sql.SQL('SELECT PG_RELATION_SIZE(%s);'),
[schema_name + '.' + table_name],
)
table_size = self.data_cur.fetchone()[0]
if n_rows == 0:
raise ValueError('Selected data table has 0 rows.')
# This will also capture n_cols == 0 and size == 0.
metabase_cur.execute(
"""
UPDATE metabase.data_table
SET
number_rows = %(n_rows)s,
number_columns = %(n_cols)s,
size = %(table_size)s,
updated_by = %(user_name)s,
date_last_updated = (SELECT CURRENT_TIMESTAMP)
WHERE data_table_id = %(data_table_id)s
;
""",
{
'n_rows': n_rows,
'n_cols': n_cols,
'table_size': table_size,
'user_name': getpass.getuser(),
'data_table_id': self.data_table_id,
}
)
# TODO: Update create_by and date_created
# https://github.com/chapinhall/adrf-metabase/pull/8#discussion_r265339190
def _get_column_level_metadata(
self, metabase_cur, schema_name, table_name, categorical_threshold,
type_overrides, date_format_dict):
"""Extract column level metadata and store it in the metabase.
Process columns one by one, identify or infer type, update Column Info
and corresponding column table.
"""
column_names = self.__get_column_names(schema_name, table_name)
for col_name in column_names:
column_results = self.__get_column_type(
schema_name,
table_name,
col_name,
categorical_threshold,
date_format_dict,
)
if col_name in type_overrides:
column_type = type_overrides[col_name]
if column_type in ['numeric', 'date']:
msg = ('Invalid type override. Column {} cannot be '
'converted to type {}').format(
col_name,
column_type)
raise ValueError(msg)
if column_type == 'text':
column_data = [str(i) for i in column_results.data]
else:
column_data = column_results.data
else:
column_type = column_results.type
column_data = column_results.data
if column_type == 'numeric':
self.__update_numeric_metadata(
metabase_cur,
col_name, column_data)
elif column_type == 'text':
self.__update_text_metadata(
metabase_cur,
col_name,
column_data)
elif column_type == 'date':
self.__update_date_metadata(
metabase_cur,
col_name,
column_data)
elif column_type == 'code':
self.__update_code_metadata(
metabase_cur,
col_name,
column_data)
else:
raise ValueError('Unknown column type')
def __get_column_names(self, schema_name, table_name):
"""Returns the names of the columns in the data table.
Returns:
(str): Column names.
"""
self.data_cur.execute(
"""
SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS
WHERE table_schema = %(schema)s
AND table_name = %(table)s;
""",
{
'schema': schema_name,
'table': table_name
},
)
columns = self.data_cur.fetchall()
return([c[0] for c in columns])
def __get_table_name(self, metabase_cur):
"""Return the the table schema and name using the Data Table ID.
Returns table name and schema name by looking up the Data Table ID in
the metabase. The table name and schema name will be used to query the
table itself.
Returns:
(str, str): (schema name, table name)
"""
metabase_cur.execute(
"""
SELECT file_table_name
FROM metabase.data_table
WHERE data_table_id = %(data_table_id)s;
""",
{'data_table_id': self.data_table_id},
)
result = metabase_cur.fetchone()
if result is None:
raise ValueError('data_table_id not found in metabase.data_table')
schema_name_table_name_tp = result[0].split('.')
if len(schema_name_table_name_tp) != 2:
raise ValueError('file_table_name is not in <schema>.<table> '
'format')
return schema_name_table_name_tp
def __get_column_type(self, schema_name, table_name, col,
categorical_threshold, date_format_dict):
"""Identify or infer column type.
Infers the column type.
Returns:
str: 'numeric', 'text', 'date' or 'code'
"""
column_data = extract_metadata_helper.get_column_type(
self.data_cur,
col,
categorical_threshold,
schema_name,
table_name,
date_format_dict,
)
return column_data
def __update_numeric_metadata(self, metabase_cur, col_name, col_data):
"""Extract metadata from a numeric column.
Extract metadata from a numeric column and store metadata in Column
Info and Numeric Column. Update relevant audit fields.
"""
extract_metadata_helper.update_numeric(
metabase_cur,
col_name,
col_data,
self.data_table_id,
)
def __update_text_metadata(self, metabase_cur, col_name, col_data):
"""Extract metadata from a text column.
Extract metadata from a text column and store metadata in Column Info
and Text Column. Update relevant audit fields.
"""
extract_metadata_helper.update_text(
metabase_cur,
col_name,
col_data,
self.data_table_id,
)
def __update_date_metadata(self, metabase_cur, col_name, col_data):
"""Extract metadata from a date column.
Extract metadata from date column and store metadate in Column Info and
Date Column. Update relevant audit fields.
"""
extract_metadata_helper.update_date(
metabase_cur,
col_name,
col_data,
self.data_table_id,
)
def __update_code_metadata(self, metabase_cur, col_name, col_data):
"""Extract metadata from a categorial column.
Extract metadata from a categorial columns and store metadata in Column
Info and Code Frequency. Update relevant audit fields.
"""
# TODO: modify categorical_threshold to take percentage arguments.
extract_metadata_helper.update_code(
metabase_cur,
col_name,
col_data,
self.data_table_id,
)
def export_table_metadata(self, output_filepath):
"""
Export GMETA (metadata in JSON format) for a processed table given
data_table_id.
"""
with psycopg2.connect(
self.metabase_connection_string
) as metabase_conn:
with metabase_conn.cursor(
cursor_factory=psycopg2.extras.DictCursor
) as metabase_cur:
table_gmeta_fields_dict = extract_metadata_helper.\
select_table_level_gmeta_fields(
metabase_cur,
self.data_table_id,
)
column_gmeta_fields_dict = extract_metadata_helper.\
select_column_level_gmeta_fields(
metabase_cur,
self.data_table_id,
)
extract_metadata_helper.export_gmeta_in_json(
table_gmeta_fields_dict,
column_gmeta_fields_dict,
output_filepath,
)
print('Exported GMETA to', output_filepath)
|
# Module to test Variables
#-------------------------------------------------------------------------------
import pytest
import math
import numpy as np
import scipy.stats
import sympy
import probayes as pb
from probayes import NEARLY_POSITIVE_INF as inf
from probayes import NEARLY_POSITIVE_ZERO as zero
#-------------------------------------------------------------------------------
LOG_TESTS = [(math.exp(1.),1.)]
INC_TESTS = [(3,4), (np.linspace(-3, 3, 7), np.linspace(-2, 4, 7))]
RAN_LOG_TESTS = [([(1,), (100,)], {100}), ([1, 100], {-100})]
#-------------------------------------------------------------------------------
def ismatch(x, y):
close = np.isclose(x, y)
if isinstance(x, np.ndarray) or isinstance(y, np.ndarray):
return np.all(close)
else:
return close
#-------------------------------------------------------------------------------
@pytest.mark.parametrize("inp,out", LOG_TESTS)
def test_log(inp, out):
# Sympy
x = pb.Variable('x', vtype=float, vset=[zero, inf])
x.set_ufun(sympy.log(~x))
output = x.ufun[0](inp)
assert ismatch(out, output), \
"Observed/expected match {}/{}".format(output, out)
output = x.ufun[-1](output)
assert ismatch(inp, output), \
"Observed/expected match {}/{}".format(output, inp)
# Numpy
y = pb.Variable('y', vtype=float, vset=[zero, inf])
y.set_ufun((np.log, np.exp))
output = y.ufun[0](inp)
assert ismatch(out, output), \
"Observed/expected match {}/{}".format(output, out)
output = y.ufun[-1](output)
assert ismatch(inp, output), \
"Observed/expected match {}/{}".format(output, inp)
#-------------------------------------------------------------------------------
@pytest.mark.parametrize("inp,out", INC_TESTS)
def test_inc(inp, out):
# Sympy
x = pb.Variable('x', vtype=float, vset=[zero, inf])
x.set_ufun(x[:]+1.)
output = x.ufun[0](inp)
assert ismatch(out, output), \
"Observed/expected match {}/{}".format(output, out)
output = x.ufun[-1](output)
assert ismatch(inp, output), \
"Observed/expected match {}/{}".format(output, inp)
# Numpy
y = pb.Variable('y', vtype=float, vset=[zero, inf])
y.set_ufun((lambda z: z+1, lambda z: z-1))
output = y.ufun[0](inp)
assert ismatch(out, output), \
"Observed/expected match {}/{}".format(output, out)
output = y.ufun[-1](output)
assert ismatch(inp, output), \
"Observed/expected match {}/{}".format(output, inp)
#-------------------------------------------------------------------------------
@pytest.mark.parametrize("ran,val", RAN_LOG_TESTS)
def test_ran(ran, val):
# Sympy
x = pb.Variable('x', vtype=float, vset=ran)
x.set_ufun(sympy.log(x[:]))
vals = x(val)[x.name]
assert np.max(vals) <= x.vlims[1] and np.min(vals) >= x.vlims[0]
# Numpy
y = pb.Variable('y', vtype=float, vset=ran)
y.set_ufun((np.log, np.exp))
vals = y(val)[y.name]
assert np.max(vals) <= x.vlims[1] and np.min(vals) >= x.vlims[0]
# Delta
steps = int(abs(list(val)[0]))
value = np.mean(vals)
y.set_delta([1], bound=True)
vals = np.empty(steps, dtype=float)
for i in range(steps):
vals[i] = y.apply_delta(value)
value = vals[i]
assert np.max(vals) <= x.vlims[1] and np.min(vals) >= x.vlims[0]
#-------------------------------------------------------------------------------
|
#!/usr/bin/env python
import unittest
import os
import json
from lib.plan import plan_index, plan_filepath
from test.lib.corrigible_test import CorrigibleTest
import lib.plan
script_dirpath = os.path.join(os.path.dirname(lib.plan.__file__), '..', 'test')
system_config_dirpath = os.path.join(script_dirpath,'resources','systems')
plans_config_dirpath = os.path.join(script_dirpath,'resources','plans')
files_config_dirpath = os.path.join(script_dirpath,'resources','files')
corrigible_exec_filepath = os.path.join(script_dirpath, '..', 'corrigible')
os.environ['CORRIGIBLE_SYSTEMS'] = system_config_dirpath
os.environ['CORRIGIBLE_PLANS'] = plans_config_dirpath
os.environ['CORRIGIBLE_FILES'] = files_config_dirpath
PLAYBOOK_FILEPATH__SYSTEMCONF_TEST = "/tmp/corrigible-test-output.yml"
HOSTS_FILEPATH__SYSTEMCONF_TEST = "/tmp/corrigible-test-hosts-output.hosts"
class TestSimpleSystemConfig(CorrigibleTest):
def setUp(self):
self.output_playbook_filepath = PLAYBOOK_FILEPATH__SYSTEMCONF_TEST
self.output_hostsfile_filepath = HOSTS_FILEPATH__SYSTEMCONF_TEST
self.corrigible_exec_filepath = corrigible_exec_filepath
def regen_test_hostsfile_gen_files(self, **kwargs):
"""re-run corrigible for the hostsfile generation test config"""
self.rerun_corrigible(system_config="test_hostsfile_generation",
generate_files_only=True)
def regen_test_simple_plans(self, **kwargs):
"""re-run corrigible for the simple plan test config"""
self.rerun_corrigible(system_config="test_simple_plans",
generate_files_only=True)
def test_system_config_output_files_exist(self):
"""test that the output files exist after rerunning corrigible using the hostsfile generation test system
config"""
self.regen_test_simple_plans()
self.assertTrue(os.path.isfile(self.output_playbook_filepath))
self.assertTrue(os.path.isfile(self.output_hostsfile_filepath))
def test_system_config_hosts_file_accurate(self):
"""test that the generated hosts file is accurate after rerunning corrigible using the hostsfile generation
test system config"""
self.regen_test_hostsfile_gen_files()
hostgroups = self.hosts_groups_from_file(self.output_hostsfile_filepath)
self.assertTrue('all' in hostgroups)
self.assertTrue(len(hostgroups) == 1)
lines = self.hostgroup_lines(self.output_hostsfile_filepath,'all')
self.assertTrue(len(lines) == 1)
self.assertTrue(lines[0] == "testhost ansible_ssh_host=1.2.3.4")
def test_plan_index(self):
"""test that the plan index method in the provision files lib is properly returning the index specified in
the plan filename"""
dt_index = plan_index('plans_test')
self.assertTrue(dt_index == 57)
d_index = plan_index('apt_upgrade')
self.assertTrue(d_index == 19)
def test_plan_filepath(self):
"""test that the plan filepath method in the provision files lib is properly returning the filepath to the
plan file"""
dt_filepath = plan_filepath('plans_test');
computed_filepath = os.path.abspath(os.path.join(script_dirpath,'resources','plans','57_plans_test.plan.yml'))
self.assertTrue(dt_filepath == computed_filepath)
def test_parameter_substitution(self):
"""after re-running corrigible on the simple plans test system config, test that basic parameter
substitution is working"""
self.regen_test_simple_plans()
self.assertTrue(os.path.isfile(self.output_playbook_filepath))
self.assertTrue(os.path.isfile(self.output_hostsfile_filepath))
s = self.playbook_as_struct()
self.assertTrue(s[1]['user'] == os.environ['USER'])
self.assertTrue(s[1]['sudo'] == True)
def test_plan_ordering_by_index(self):
"""after re-running corrigible on the simple plans test system config, test that the plans are ordered
as per the index indicated is each's filename"""
self.regen_test_simple_plans()
self.assertTrue(os.path.isfile(self.output_playbook_filepath))
self.assertTrue(os.path.isfile(self.output_hostsfile_filepath))
s = self.playbook_as_struct()
self.assertTrue('user' in s[2]['tasks'][0])
self.assertFalse('user' in s[4]['tasks'][0])
self.assertFalse('user' in s[6]['tasks'][0])
self.assertTrue('cron' in s[4]['tasks'][0])
self.assertFalse('cron' in s[2]['tasks'][0])
self.assertFalse('cron' in s[6]['tasks'][0])
self.assertTrue('apt' in s[6]['tasks'][0])
self.assertFalse('apt' in s[4]['tasks'][0])
self.assertFalse('apt' in s[2]['tasks'][0])
# files
self.assertTrue('copy' in s[1]['tasks'][0])
if __name__ == '__main__':
unittest.main()
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from . import imagedata_pb2 as imagedata__pb2
class PredictorStub(object):
"""python -m grpc_tools.protoc -I. --python_out=. --grpc_python_out=. imagedata.proto
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetPrediction = channel.unary_unary(
'/Predictor/GetPrediction',
request_serializer=imagedata__pb2.ImageData.SerializeToString,
response_deserializer=imagedata__pb2.PredictionClass.FromString,
)
class PredictorServicer(object):
"""python -m grpc_tools.protoc -I. --python_out=. --grpc_python_out=. imagedata.proto
"""
def GetPrediction(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_PredictorServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetPrediction': grpc.unary_unary_rpc_method_handler(
servicer.GetPrediction,
request_deserializer=imagedata__pb2.ImageData.FromString,
response_serializer=imagedata__pb2.PredictionClass.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'Predictor', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
from asyncio import Future
import pytest
from pytest_mock import MockerFixture
from conftest import FooCommand
from protostar.cli.argument_parser_facade import ArgumentParserFacade
from protostar.cli.cli_app import CLIApp
from protostar.cli.command import Command
@pytest.mark.asyncio
async def test_command_run_method_was_called(
foo_command: FooCommand, mocker: MockerFixture
):
foo_command.run = mocker.MagicMock()
foo_command.run.return_value = Future()
foo_command.run.return_value.set_result(None)
cli = CLIApp(commands=[foo_command])
parser = ArgumentParserFacade(cli)
await cli.run(parser.parse([foo_command.name]))
foo_command.run.assert_called_once()
@pytest.mark.asyncio
async def test_fail_when_no_command_was_found(foo_command: FooCommand):
cli = CLIApp(
commands=[foo_command],
root_args=[Command.Argument(name="version", type="bool", description="...")],
)
parser = ArgumentParserFacade(cli)
await cli.run(parser.parse(["FOO"]))
with pytest.raises(CLIApp.CommandNotFoundError):
await cli.run(None)
|
# Copyright (c) 2017-2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from concurrent.futures.thread import ThreadPoolExecutor
from typing import Generator
from dazl import testing
import pytest
DEFAULT_SDK_VERSION = "1.17.0"
@pytest.fixture(scope="session")
def sandbox() -> "Generator[str, None, None]":
"""
Run an instance of the Sandbox, or use one configured through environment variables.
Some environment variables change the nature of the Sandbox used in these tests:
* DAZL_TEST_DAML_LEDGER_URL: If set, it is assumed to be an already-running ledger, and we
merely return that URL instead of starting up our own sandbox. This is the way that the tests
run in CI.
* DAML_SDK_VERSION: If set AND DAZL_TEST_DAML_LEDGER_URL is not specified, this controls the
version of the Sandbox that is launched through this wrapper. This value can be overridden
to test dazl against newer (or older) versions of the SDK without making code changes:
```
DAML_SDK_VERSION=1.0.0 make test
```
"""
with testing.sandbox(project_root=None) as sb:
yield sb.url
@pytest.fixture()
def executor() -> "Generator[ThreadPoolExecutor, None, None]":
with ThreadPoolExecutor(3) as executor:
yield executor
|
import numpy as np
import pyqtgraph as pg
from acconeer.exptool import configs, utils
from acconeer.exptool.clients import SocketClient, SPIClient, UARTClient
from acconeer.exptool.pg_process import PGProccessDiedException, PGProcess
def main():
args = utils.ExampleArgumentParser(num_sens=1).parse_args()
utils.config_logging(args)
if args.socket_addr:
client = SocketClient(args.socket_addr)
elif args.spi:
client = SPIClient()
else:
port = args.serial_port or utils.autodetect_serial_port()
client = UARTClient(port)
sensor_config = get_sensor_config()
sensor_config.sensor = args.sensors
session_info = client.setup_session(sensor_config)
pg_updater = PGUpdater(sensor_config, None, session_info)
pg_process = PGProcess(pg_updater)
pg_process.start()
client.start_session()
interrupt_handler = utils.ExampleInterruptHandler()
print("Press Ctrl-C to end session")
while not interrupt_handler.got_signal:
info, data = client.get_next()
try:
pg_process.put_data(data)
except PGProccessDiedException:
break
print("Disconnecting...")
pg_process.close()
client.disconnect()
def get_sensor_config():
return configs.PowerBinServiceConfig()
class PGUpdater:
def __init__(self, sensor_config, processing_config, session_info):
self.session_info = session_info
self.smooth_max = utils.SmoothMax(sensor_config.update_rate)
def setup(self, win):
num_depths = self.session_info["bin_count"]
start = self.session_info["range_start_m"]
length = self.session_info["range_length_m"]
end = start + length
xs = np.linspace(start, end, num_depths * 2 + 1)[1::2]
bin_width = 0.8 * length / num_depths
self.plot = win.addPlot()
self.plot.setMenuEnabled(False)
self.plot.showGrid(x=True, y=True)
self.plot.setLabel("bottom", "Depth (m)")
self.plot.setLabel("left", "Amplitude")
self.plot.setXRange(start, end)
self.plot.setYRange(0, 1)
self.bar_graph = pg.BarGraphItem(
x=xs,
height=np.zeros_like(xs),
width=bin_width,
brush=pg.mkBrush(utils.color_cycler()),
)
self.plot.addItem(self.bar_graph)
def update(self, data):
self.bar_graph.setOpts(height=data)
self.plot.setYRange(0, self.smooth_max.update(data))
if __name__ == "__main__":
main()
|
# Provides a web interface for ipcampy
# Andrea Masi 2014 [email protected]
import datetime
from flask import Flask, render_template, send_from_directory, redirect, session, request, url_for
from utils import list_snapshots_days, list_snapshots_hours, list_snapshots_for_a_minute
app = Flask("ipcamweb")
USERNAME = "watcher"
def is_authenticated():
user = session.get('username')
paswd = session.get('password')
if user == USERNAME and paswd == app.PASSWORD:
is_authenticated = True
else:
is_authenticated = False
return is_authenticated
@app.route("/")
def main():
if is_authenticated():
return render_template("main.html", cams=app.cams)
else:
return redirect(url_for('login'))
@app.route("/cam/<int:cam_index>")
def cam_detail(cam_index):
days = list_snapshots_days(app.store_path, app.cams[cam_index - 1].cam_id)
return render_template("cam_detail.html", cam_index=cam_index, days=days, cam=app.cams[cam_index - 1])
@app.route("/screenshots/<int:cam_index>/<day>")
def screenshots_details(cam_index, day):
hours = list_snapshots_hours(app.store_path, app.cams[cam_index - 1].cam_id, day)
return render_template(
"screenshots_details.html",
day=(datetime.datetime.strptime(day, "%d%m%Y").strftime('%d/%m/%y'), day),
cam_index=cam_index,
hours=hours,
cam=app.cams[cam_index - 1],
)
@app.route("/view-screenshots/<int:cam_index>/<day>/<hourm>")
def view_screenshots(cam_index, day, hourm):
screenshots = list_snapshots_for_a_minute(
app.store_path,
app.cams[cam_index - 1].cam_id,
day,
hourm,
)
return render_template(
"view_screenshots.html",
cam_index=cam_index,
day=day,
date=datetime.datetime.strptime(day, "%d%m%Y").strftime('%d/%m/%y'),
hourm=hourm,
screenshots=screenshots,
cam=app.cams[cam_index - 1],
)
@app.route("/view-screenshot/<int:cam_index>/<day>/<hourm>/<screenshot>")
def view_screenshot(cam_index, day, hourm, screenshot):
return render_template(
"view_screenshot.html",
cam_index=cam_index,
day=day,
hourm=hourm,
screenshot=screenshot,
cam=app.cams[cam_index - 1],
)
@app.route("/get-image/<int:cam_index>/<day>/<hour>/<image>")
def get_image(cam_index, image, day, hour):
serve_path = app.store_path+"/"+app.cams[cam_index - 1].cam_id+"/"+day+"/"+hour
return send_from_directory(serve_path, image)
@app.route("/login", methods=['GET', 'POST'])
def login():
if is_authenticated():
return redirect(url_for('main'))
if request.method == 'POST':
session['username'] = request.form['username']
session['password'] = request.form['password']
if is_authenticated():
return redirect(url_for('main'))
return render_template("login.html")
@app.route("/logout")
def logout():
session.pop('username', None)
session.pop('password', None)
return redirect(url_for('login'))
|
#!/usr/bin/env python
"""
This runs a command and compares output to a known file over
a given line range.
"""
from __future__ import print_function
import subprocess
import argparse
import os
#pylint: disable=invalid-name
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--command', required=True)
parser.add_argument('--stdin', required=True)
parser.add_argument('--stdout', required=True)
parser.add_argument('--check', required=True)
parser.add_argument('--start', default=0)
parser.add_argument('--stop', default=-1)
args = parser.parse_args()
d = os.path.dirname(args.stdout)
if not os.path.exists(d):
os.makedirs(d)
with open(args.stdout, 'w') as outfile:
with open(args.stdin, 'r') as infile:
proc = subprocess.Popen(
args.command, stdout=outfile, stdin=infile)
proc.communicate()
i_start = int(args.start)
i_stop = int(args.stop)
with open(args.stdout, 'r') as outfile:
out_contents = file.readlines(outfile)
out_contents = "".join(out_contents[i_start:i_stop])
with open(args.check, 'r') as checkfile:
check_contents = file.readlines(checkfile)
check_contents = "".join(check_contents[i_start:i_stop])
if (out_contents != check_contents):
print("output:\n", out_contents)
print("check:\n", check_contents)
exit(1)
exit(0)
# vim: set et ft=python fenc= ff=unix sts=4 sw=4 ts=4 :
|
import pandas as pd
import os
import matplotlib.pyplot as plt
import numpy as np
def extract_single_network_edgelist(network_path):
print(network_path)
ep_network = pd.read_csv(network_path)
ep_network.columns.values[0] = "Plant"
ep_network_edgelist = ep_network.melt(id_vars=["Plant"],
var_name="Pollinator",
value_name="Value")
ep_network_edgelist["Value"] = pd.to_numeric(ep_network_edgelist['Value'], errors='coerce')
ep_network_edgelist = ep_network_edgelist[ep_network_edgelist["Value"] > 0]
return ep_network_edgelist
def extract_folder_networks(network_folder,source,network_type):
networks_edgelist = pd.DataFrame()
networks_files = [os.path.join(network_folder, f) for f in os.listdir(network_folder) if
os.path.isfile(os.path.join(network_folder, f))]
for i,network_path in enumerate(networks_files):
if not "reference" in network_path:
curr_network_edgelist = extract_single_network_edgelist(network_path)
curr_network_edgelist["source"] = source
curr_network_edgelist["network_type"] = network_type
curr_network_edgelist["network_path"] = network_path
networks_edgelist = pd.concat([networks_edgelist,curr_network_edgelist], ignore_index= True)
return networks_edgelist
def main():
networks_folder_path = '/Users/noa/Workspace/networks'
name_resolution_file = os.path.join(networks_folder_path,'processed_resolved_names.csv')
name_resolution = pd.read_csv(name_resolution_file)
networks_edgelists_path = "All_networks_edgelists.csv"
if not os.path.exists(networks_edgelists_path):
network_sources_folders = {o:os.path.join(networks_folder_path, o) for o in os.listdir(networks_folder_path) if os.path.isdir(os.path.join(networks_folder_path, o))}
print(network_sources_folders)
all_networks_edgelists = pd.DataFrame()
for source in network_sources_folders:
binary_networks_folder = os.path.join(network_sources_folders[source],'binary')
binary_network_edgelists = extract_folder_networks(binary_networks_folder,source, network_type = "binary")
weighted_networks_folder = os.path.join(network_sources_folders[source],'weighted')
weighted_network_edgelists = extract_folder_networks(weighted_networks_folder,source, network_type = "weighted")
all_networks_edgelists = pd.concat([all_networks_edgelists,binary_network_edgelists,weighted_network_edgelists], ignore_index= True)
print(all_networks_edgelists)
all_networks_edgelists.to_csv(networks_edgelists_path)
else:
all_networks_edgelists = pd.read_csv(networks_edgelists_path)
all_networks_edgelists["Plant"] = all_networks_edgelists["Plant"].str.lower().str.replace("_"," ")
all_networks_edgelists = pd.merge(all_networks_edgelists, name_resolution, how='left', left_on=['Plant'],
right_on=['query'])
all_networks_edgelists = all_networks_edgelists[["source","network_type","network_path","Plant", "matched_name", "genus", "family", "Pollinator", "Value"]]
all_networks_edgelists = all_networks_edgelists.astype(
{'matched_name': 'string', 'genus': 'string', 'family': 'string', 'Pollinator': 'string', 'Plant':'string','network_path':'string', 'source':'string','network_type': 'string'})
all_networks_edgelists[["matched_name", "genus", "family", "Pollinator","Plant","network_path","network_type","source"]] = all_networks_edgelists[
["matched_name", "genus", "family", "Pollinator","Plant","network_path","network_type","source"]].fillna('unknown')
all_networks_edgelists.to_csv("All_networks_edgelists_named.csv")
#unique_plants = (all_networks_edgelists[["Plant","matched_name"]]).drop_duplicates()
#print(f"Percentage of known plants: {np.mean(all_networks_edgelists['matched_name']!='unknown')}")
# network_level_d = {'number_of_plants': len(np.unique(ep_network_edgelist["matched_name"])), 'number_of_pollinators': len(np.unique(ep_network_edgelist["Pollinator"])),
# 'total_number_of_visits': (np.sum(ep_network_edgelist["Value"])),
# 'total_number_of_interactions': len(ep_network_edgelist.index),
# 'pct_unknown_names' : np.mean(ep_network_edgelist["matched_name"]=="unknown") }
#
if __name__ == '__main__':
main()
|
import numpy as np
from osgeo import gdal
import matplotlib.pyplot as plt
import os
dpath = os.path.dirname(__file__)
class BiInterpolator:
'''Bilinear interpolation in 2D. The code is modified from mpl_toolkits.basemap.interp and scipy.interpolate'''
def __init__(self, xin, yin, datain):
'''Setting up the interpolator.
.. Args:
* xin -> Monotonic array of x coordinates
* yin -> Monotonic array of y coordinates
* datain -> 2D array corresponding to (y,x) '''
if xin.shape[0] != datain.shape[1]:
raise ValueError('Shapes of datain and x do not match')
if yin.shape[0] != datain.shape[0]:
raise ValueError('Shapes of datain and y do not match')
if xin[-1] < xin[0]:
raise ValueError('Array x not sorted')
if yin[-1] < yin[0]:
raise ValueError('Array y not sorted')
self.xin = xin.copy()
self.yin = yin.copy()
delx = xin[1:] - xin[0:-1]
dely = yin[1:] - yin[0:-1]
if max(delx)-min(delx) < 1.e-4 and max(dely)-min(dely) < 1.e-4:
self.regular = True
else:
self.regular = False
self.xinlist = self.xin.tolist()
self.yinlist = self.yin.tolist()
self.nx = len(self.xinlist)
self.ny = len(self.yinlist)
self.zin = datain
def __call__(self,xi,yi):
'''Function call to actually interpolate.'''
if xi.shape != yi.shape:
raise ValueError('xi and yi must have same shape.')
if self.regular:
xcoords = (self.nx-1)*(xi-self.xin[0])/(self.xin[-1]-self.xin[0])
ycoords = (self.ny-1)*(yi-self.yin[0])/(self.yin[-1]-self.yin[0])
else:
xiflat = xi.flatten()
yiflat = yi.flatten()
ix = (np.searchsorted(self.xin,xiflat)-1).tolist()
iy = (np.searchsorted(self.yin,yiflat)-1).tolist()
xiflat = xiflat.tolist()
yiflat = yiflat.tolist()
xin = self.xinlist
yin = self.yinlist
xcoords = []
ycoords = []
for n,i in enumerate(ix):
if i < 0:
xcoords.append(-1)
elif i >= self.nx-1:
xcoords.append(self.nx)
else:
xcoords.append(float(i)+(xiflat[n]-xin[i])/(xin[i+1]-xin[i]))
for m,j in enumerate(iy):
if j < 0:
ycoords.append(-1)
elif j >= self.ny-1:
ycoords.append(self.ny)
else:
ycoords.append(float(j)+(yiflat[m]-yin[j])/(yin[j+1]-yin[j]))
xcoords = np.reshape(xcoords, xi.shape)
ycoords = np.reshape(ycoords, yi.shape)
xcoords = np.clip(xcoords,0,self.nx-1)
ycoords = np.clip(ycoords,0,self.ny-1)
xint = xcoords.astype(np.int32)
yint = ycoords.astype(np.int32)
xip1 = np.clip(xint+1,0,self.nx-1)
yip1 = np.clip(yint+1,0,self.ny-1)
delx = xcoords - xint.astype(np.float32)
dely = ycoords - yint.astype(np.float32)
zin = self.zin
dataout = (1.-delx)*(1.-dely)*zin[yint,xint] + \
delx*dely*zin[yip1,xip1] + \
(1.-delx)*dely*zin[yip1,xint] + \
delx*(1.-dely)*zin[yint,xip1]
return dataout
class EGM96reader:
'''Create a default reader. Can use it with lon/ lat as inputs.'''
def __init__(self):
#####Read the metadata for the raster file
egm96full = gdal.Open('%s/egm96-5.pgm'%(dpath))
mdict = egm96full.GetMetadata_Dict()
zscale = np.float(mdict['Scale'])
zoff = np.float(mdict['Offset'])
#########Get the geo-coordinates
(x0,delx,dum0,y0,dum1,dely) = egm96full.GetGeoTransform()
xsize = egm96full.RasterXSize
ysize = egm96full.RasterYSize
########Note that we set up data for (lon, -lat)
xin = x0 + delx*np.arange(xsize)
yin = y0 + dely*np.arange(ysize)
yin = -1.0*yin #Order reversed
zin = egm96full.ReadAsArray(0,0,xsize,ysize)
zin =zin*zscale + zoff
self.oper = BiInterpolator(xin, yin, zin)
def __call__(self, xi, yi):
#Call the object with lon and lat
yinp = -yi
xinp = xi.copy()
xinp[xinp<0] += 360.0
return self.oper(xinp,yinp)
def test(self):
Datin = np.loadtxt('%s/GeoidHeights.dat'%(dpath))
lat = -1.0*Datin[:,0] #Multiply by -1
lon = Datin[:,1]
lon[lon<0] +=360.0
testh = Datin[:,3]
res = self.oper(lon,lat)
err = res-testh
print('All errors are reported in meters.')
print('**********************************')
print('Maximum Error: ', np.max(err))
print('Minimum Error: ', np.min(err))
print('Standard deviation: ', np.std(err))
def plot(self):
plt.imshow(self.oper.zin,extent=[0,360,-90,90])
plt.colorbar()
plt.title('Geoid Offset in meters')
plt.xlabel('Longitude')
plt.ylabel('Latitude')
plt.show()
# res = reader(360.0+np.array([-118.0,-118.0]), -1.0*np.array([35.0,35.0]))
# print res
|
#! /usr/bin/python2.2
# Copyright (C) 2002 by Martin Pool <[email protected]>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version
# 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
# Populate a tree with pseudo-randomly distributed files to test
# rsync.
from __future__ import generators
import random, string, os, os.path
nfiles = 10000
depth = 5
n_children = 20
n_files = 20
n_symlinks = 10
name_chars = string.digits + string.letters
abuffer = 'a' * 1024
def random_name_chars():
a = ""
for i in range(10):
a = a + random.choice(name_chars)
return a
def generate_names():
n = 0
while 1:
yield "%05d_%s" % (n, random_name_chars())
n += 1
class TreeBuilder:
def __init__(self):
self.n_children = 20
self.n_files = 100
self.total_entries = 100000 # long(1e8)
self.actual_size = 0
self.name_gen = generate_names()
self.all_files = []
self.all_dirs = []
self.all_symlinks = []
def random_size(self):
return random.lognormvariate(4, 4)
def random_symlink_target(self):
what = random.choice(['directory', 'file', 'symlink', 'none'])
try:
if what == 'directory':
return random.choice(self.all_dirs)
elif what == 'file':
return random.choice(self.all_files)
elif what == 'symlink':
return random.choice(self.all_symlinks)
elif what == 'none':
return self.name_gen.next()
except IndexError:
return self.name_gen.next()
def can_continue(self):
self.total_entries -= 1
return self.total_entries > 0
def build_tree(self, prefix, depth):
"""Generate a breadth-first tree"""
for count, function in [[n_files, self.make_file],
[n_children, self.make_child_recurse],
[n_symlinks, self.make_symlink]]:
for i in range(count):
if not self.can_continue():
return
name = os.path.join(prefix, self.name_gen.next())
function(name, depth)
def print_summary(self):
print "total bytes: %d" % self.actual_size
def make_child_recurse(self, dname, depth):
if depth > 1:
self.make_dir(dname)
self.build_tree(dname, depth-1)
def make_dir(self, dname, depth='ignore'):
print "%s/" % (dname)
os.mkdir(dname)
self.all_dirs.append(dname)
def make_symlink(self, lname, depth='ignore'):
print "%s -> %s" % (lname, self.random_symlink_target())
def make_file(self, fname, depth='ignore'):
size = long(self.random_size())
print "%-70s %d" % (fname, size)
f = open(fname, 'w')
f.truncate(size)
self.fill_file(f, size)
self.all_files.append(fname)
self.actual_size += size
def fill_file(self, f, size):
while size > 0:
f.write(abuffer[:size])
size -= len(abuffer)
tb = TreeBuilder()
tb.build_tree('/tmp/foo', 3)
tb.print_summary()
|
from fjell.app import Application
app = Application(__name__, debug=True)
app.config.update(
{
"db": "sqlite:////tmp/db.sqlite3",
}
)
app.add_plugin("fjell.plugins.sqla")
app.add_plugin("fjell.plugins.jinja2")
app.add_routes(
[
("GET", "/", "example.views.index"),
("GET", "/template", "example.views.TemplateView"),
(("GET", "POST"), "/hello", "example.views.JsonViewSet"),
]
)
app.add_route_set("/users", "example.views.UsersViewSet")
|
# https://leetcode.com/problems/text-justification/
class Solution:
def fullJustify(self, words: List[str], maxWidth: int) -> List[str]:
return self.combineWords(self.groupWords(words, maxWidth), maxWidth)
def groupWords(self, words: List[str], maxWidth: int) -> List[str]:
groups = []
curSentenceLength = 0
curGroup = []
for i, word in enumerate(words):
length = len(word)
if (curSentenceLength + length + len(curGroup)) > maxWidth:
groups.append((curGroup, curSentenceLength))
curGroup = []
curSentenceLength = 0
curSentenceLength += length
curGroup.append(word)
if curGroup:
groups.append((curGroup, curSentenceLength))
return groups
def combineWords(self, groups, maxWidth):
sentences = []
for i in range(len(groups) - 1):
group, length = groups[i]
wordCount = len(group)
wordGaps = wordCount - 1
remainingSpaces = maxWidth - length
spaceAllocation = [0] * wordCount
spaceIndex = 0
if wordCount == 1:
spaceAllocation[0] = remainingSpaces
else:
while remainingSpaces > 0:
spaceAllocation[spaceIndex] += 1
remainingSpaces -= 1
spaceIndex = (spaceIndex + 1) % wordGaps
curSentence = ""
for i in range(wordCount):
curSentence += group[i] + " " * spaceAllocation[i]
sentences.append(curSentence)
lastSentence = " ".join(groups[-1][0])
lastSentence += " " * (maxWidth - len(lastSentence))
sentences.append(lastSentence)
return sentences
|
__version__ = "1.0.0"
from protocol_lib.collection import ICollection
from protocol_lib.container import IContainer
from protocol_lib.hashable import IHashable
from protocol_lib.iterable import IIterable, IIterator, IReversible
from protocol_lib.mapping import IMapping
from protocol_lib.sequence import IMutableSequence, ISequence
from protocol_lib.sized import ISized
__all__ = [
"ICollection",
"IContainer",
"IHashable",
"IIterable",
"IIterator",
"IMapping",
"IMutableSequence",
"IReversible",
"ISequence",
"ISized",
]
|
# https://oj.leetcode.com/problems/jump-game/
class Solution:
# @param A, a list of integers
# @return a boolean
def canJump(self, A):
if len(A) == 0:
return False
table = [False] * len(A)
# stand on the first piece
table[0] = True
for i in xrange(0, len(A)-1):
if not table[i]:
continue
for j in xrange(A[i], 0, -1):
next = i + j
if next >= len(A) - 1:
return True
# If table[next] == True,
# table[i] ... table[next] should be true and skip it
if table[next]:
break
table[next] = True
return table[-1]
s = Solution()
print s.canJump([2,3,1,1,4]), True
print s.canJump([3,2,1,0,4]), False
|
###########################################
# A structure for storing finch comfiguration
###########################################
from Finch_constants import Finch_constants as FS
class Finch_config:
# How should we define the states?
# As integers assigned to constant names in an enum?
# Initializes a default config.
def __init__(self):
# Domain
self.dimension = 1
self.geometry = FS.SQUARE
self.mesh_type = FS.TREE
# FEM details
self.solver_type = FS.DG
self.basis_type = FS.NODAL
self.trial_function = FS.LEGENDRE
self.test_function = FS.LEGENDRE
self.basis_nodes = FS.LOBATTO
self.p_adaptive = True
self.basis_order_min = 4
self.basis_order_max = 8
# Variables
# Maybe info about variables and boundaries should be
# in problem specification rather than here.
# Other solver details
# These are some basic examples, there are more needed.
self.linear = True
self.t_adaptive = True
self.stepper = FS.RK4
self.linalg_matrixfree = False
self.linalg_backend = FS.PETSC
self.output_format = FS.VTK
|
#!/usr/bin/env python
import os
import time
import cv2
import numpy as np
import pybullet as p
import matplotlib.pyplot as plt
import tensorflow as tf; tf.compat.v1.enable_eager_execution()
from ravens.models import Attention, Regression
from ravens import cameras
from ravens import utils
class RegressionAgent:
def __init__(self, name, task):
self.name = name
self.task = task
self.total_iter = 0
self.pixel_size = 0.003125
self.input_shape = (320, 160, 6)
self.camera_config = cameras.RealSenseD415.CONFIG
self.bounds = np.array([[0.25, 0.75], [-0.5, 0.5], [0, 0.28]])
self.total_iter = 0
# A place to save pre-trained models.
self.models_dir = os.path.join('checkpoints', self.name)
if not os.path.exists(self.models_dir):
os.makedirs(self.models_dir)
# Set up model.
self.optim = tf.keras.optimizers.Adam(lr=1e-2)
self.metric = tf.keras.metrics.Mean(name='metric')
self.batch_size = 4
def show_images(self, colormap, heightmap):
import matplotlib.pyplot as plt
plt.imshow(colormap)
plt.show()
plt.imshow(heightmap)
plt.show()
def train(self, dataset, num_iter, writer):
"""Train on dataset for a specific number of iterations."""
@tf.function
def pick_train_step(model, optim, in_tensor, yxtheta, loss_criterion):
with tf.GradientTape() as tape:
output = model(in_tensor)
loss = loss_criterion(yxtheta, output)
grad = tape.gradient(loss, model.trainable_variables)
optim.apply_gradients(
zip(grad, model.trainable_variables))
return loss
@tf.function
def place_train_step(model, optim, in_tensor, yxtheta, loss_criterion):
with tf.GradientTape() as tape:
output = model(in_tensor)
loss = loss_criterion(yxtheta, output)
grad = tape.gradient(loss, model.trainable_variables)
optim.apply_gradients(
zip(grad, model.trainable_variables))
return loss
for i in range(num_iter):
start = time.time()
input_images, p0s, p0_thetas = [], [], []
p1s, p1_thetas = [], []
for _ in range(self.batch_size):
obs, act, info = dataset.random_sample()
# Get heightmap from RGB-D images.
configs = act['camera_config']
colormap, heightmap = self.get_heightmap(obs, configs)
#self.show_images(colormap, heightmap)
# Get training labels from data sample.
# (spatially distributed on object) get actions from oracle distribution
#pose0, pose1 = act['params']['pose0'], act['params']['pose1']
# (identical object location) get actions from object poses
l_object = info[4]
pose0 = l_object[0], l_object[1]
l_target = info[5]
pose1 = l_target[0], l_target[1]
p0_position, p0_rotation = pose0[0], pose0[1]
p0 = utils.position_to_pixel(p0_position, self.bounds, self.pixel_size)
p0_theta = -np.float32(p.getEulerFromQuaternion(p0_rotation)[2])
p1_position, p1_rotation = pose1[0], pose1[1]
p1 = utils.position_to_pixel(p1_position, self.bounds, self.pixel_size)
p1_theta = -np.float32(p.getEulerFromQuaternion(p1_rotation)[2])
# to make it relative
# p1_theta = p1_theta - p0_theta
# p0_theta = 0
p1_xytheta = np.array([p1_position[0], p1_position[1], p1_theta])
# Concatenate color with depth images.
# input_image = np.concatenate((colormap,
# heightmap[..., None],
# heightmap[..., None],
# heightmap[..., None]), axis=2)
input_image = colormap
input_images.append(input_image)
p0s.append(p0)
p0_thetas.append(p0_theta)
p1s.append(p1)
p1_thetas.append(p1_theta)
input_image = np.array(input_images)
p0 = np.array(p0s)
p0_theta = np.array(p0_thetas)
p1 = np.array(p1s)
p1_theta = np.array(p1_thetas)
# Compute train loss - regression place
loss0 = self.pick_regression_model.train_pick(input_image, p0, p0_theta, pick_train_step)
with writer.as_default():
tf.summary.scalar('pick_loss', self.pick_regression_model.metric.result(),
step=self.total_iter+i)
# Compute train loss - regression place
loss1 = self.place_regression_model.train_pick(input_image, p1, p1_theta, place_train_step)
with writer.as_default():
tf.summary.scalar('place_loss', self.place_regression_model.metric.result(),
step=self.total_iter+i)
#loss1 = 0.0
print(f'Train Iter: {self.total_iter + i} Loss: {loss0:.4f} {loss1:.4f} Iter time:', time.time() - start)
self.total_iter += num_iter
self.save()
def act(self, obs, info):
"""Run inference and return best action given visual observations."""
self.pick_regression_model.set_batch_size(1)
self.place_regression_model.set_batch_size(1)
act = {'camera_config': self.camera_config, 'primitive': None}
if not obs:
return act
# Get heightmap from RGB-D images.
colormap, heightmap = self.get_heightmap(obs, self.camera_config)
# Concatenate color with depth images.
# input_image = np.concatenate((colormap,
# heightmap[..., None],
# heightmap[..., None],
# heightmap[..., None]), axis=2)
input_image = colormap[None, ...]
# Regression pick model
p0_yxtheta = self.pick_regression_model.forward(input_image)[0] # unbatch
p0_pixel = [int(p0_yxtheta[0]), int(p0_yxtheta[1])]
p0_theta = p0_yxtheta[2]
# Regression place model
p1_yxtheta = self.place_regression_model.forward(input_image)[0] # unbatch
p1_pixel = [int(p1_yxtheta[0]), int(p1_yxtheta[1])]
p1_theta = p1_yxtheta[2]
# make sure safe:
if p1_pixel[0] < 0:
p1_pixel[0] = 0
if p1_pixel[0] > 319:
p1_pixel[0] = 319
if p1_pixel[1] < 0:
p1_pixel[1] = 0
if p1_pixel[1] > 159:
p1_pixel[1] = 159
# Pixels to end effector poses.
p0_position = utils.pixel_to_position(p0_pixel, heightmap, self.bounds, self.pixel_size)
p1_position = utils.pixel_to_position(p1_pixel, heightmap, self.bounds, self.pixel_size)
p0_rotation = p.getQuaternionFromEuler((0, 0, -p0_theta))
p1_rotation = p.getQuaternionFromEuler((0, 0, -p1_theta))
act['primitive'] = 'pick_place'
if self.task == 'sweeping':
act['primitive'] = 'sweep'
elif self.task == 'pushing':
act['primitive'] = 'push'
params = {'pose0': (p0_position, p0_rotation),
'pose1': (p1_position, p1_rotation)}
act['params'] = params
self.pick_regression_model.set_batch_size(self.batch_size)
self.place_regression_model.set_batch_size(self.batch_size)
return act
#-------------------------------------------------------------------------
# Helper Functions
#-------------------------------------------------------------------------
def preprocess(self, image):
"""Pre-process images (subtract mean, divide by std).
image shape: [B, H, W, C]
"""
color_mean = 0.18877631
depth_mean = 0.00509261
color_std = 0.07276466
depth_std = 0.00903967
image[:, :, :, :3] = (image[:, :, :, :3] / 255 - color_mean) / color_std
#image[:, :, :, 3:] = (image[:, :, :, 3:] - depth_mean) / depth_std
return image
def get_heightmap(self, obs, configs):
"""Reconstruct orthographic heightmaps with segmentation masks."""
heightmaps, colormaps = utils.reconstruct_heightmaps(
obs['color'], obs['depth'], configs, self.bounds, self.pixel_size)
colormaps = np.float32(colormaps)
heightmaps = np.float32(heightmaps)
# Fuse maps from different views.
valid = np.sum(colormaps, axis=3) > 0
repeat = np.sum(valid, axis=0)
repeat[repeat == 0] = 1
colormap = np.sum(colormaps, axis=0) / repeat[..., None]
colormap = np.uint8(np.round(colormap))
heightmap = np.max(heightmaps, axis=0)
return colormap, heightmap
def load(self, num_iter):
pass
def save(self):
pass
class PickThenPlaceRegressionAgent(RegressionAgent):
def __init__(self, name, task):
super().__init__(name, task)
self.pick_regression_model = Regression(input_shape=self.input_shape,
preprocess=self.preprocess)
self.pick_regression_model.set_batch_size(self.batch_size)
self.place_regression_model = Regression(input_shape=self.input_shape,
preprocess=self.preprocess)
self.place_regression_model.set_batch_size(self.batch_size) |
import pickle
import numpy as np
import matplotlib.pyplot as plt
import keras
from keras.models import load_model
import random
plt.switch_backend('agg')
f=open("./id_to_data","rb+")
data=pickle.load(f)
f=open("./id_to_box","rb+")
box=pickle.load(f)
f=open("./id_to_size","rb+")
size=pickle.load(f)
index=[i for i in range(11788)]
index=random.sample(index,100)
model=keras.models.load_model("./model.h5")
result=model.predict(data[index,:,:,:])
mean=[0.485,0.456,0.406]
std=[0.229,0.224,0.225]
j=0
for i in index:
print("Predicting "+str(i)+"th image.")
true_box=box[i]
image=data[i]
prediction=result[j]
j+=1
for channel in range(3):
image[:,:,channel]=image[:,:,channel]*std[channel]+mean[channel]
image=image*255
image=image.astype(np.uint8)
plt.imshow(image)
plt.gca().add_patch(plt.Rectangle((true_box[0],true_box[1]),true_box[2],true_box[3],fill=False,edgecolor='red',linewidth=2,alpha=0.5))
plt.gca().add_patch(plt.Rectangle((prediction[0]*224,prediction[1]*224),prediction[2]*224,prediction[3]*224,fill=False,edgecolor='green',linewidth=2,alpha=0.5))
plt.show()
plt.savefig("./prediction/"+str(i)+".png")
plt.cla()
|
from django.urls import path
from .views import (
list_hackathons,
create_hackathon,
update_hackathon,
delete_hackathon,
enroll_toggle,
judging,
check_projects_scores,
view_hackathon,
update_hackathon_status,
change_awards,
judge_teams,
assign_mentors,
view_hackathon_public,
)
from teams.views import change_teams
urlpatterns = [
path('', list_hackathons, name="hackathon-list"),
path("<int:hackathon_id>/team/<int:team_id>/judging/",
judging, name="judging"),
path("<int:hackathon_id>/final_score/", check_projects_scores,
name="final_score"),
path("<int:hackathon_id>/change_teams/", change_teams,
name="change_teams"),
path("<int:hackathon_id>/awards/", change_awards, name="awards"),
path("create_hackathon", create_hackathon, name='create_hackathon'),
path("<int:hackathon_id>/", view_hackathon,
name='view_hackathon'),
path("public/<int:hackathon_id>/", view_hackathon_public,
name='view_hackathon_public'),
path("<int:hackathon_id>/update/", update_hackathon,
name="update_hackathon"),
path("<int:hackathon_id>/update_hackathon_status/",
update_hackathon_status, name="update_hackathon_status"),
path("<int:hackathon_id>/delete/", delete_hackathon,
name="delete_hackathon"),
path('enroll/', enroll_toggle, name='enroll_toggle'),
path('<int:hackathon_id>/judge_teams/', judge_teams,
name="judge_teams"),
path('<int:hackathon_id>/assign_mentors/', assign_mentors,
name="assign_mentors"),
]
|
import json
import sys
import mso
import urllib3
import json
import pprint
try:
from credentials import MSO_IP, MSO_ADMIN, MSO_PASSWORD
except ImportError:
sys.exit("Error: please verify credentials file format.")
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
rc = mso.RestClient(MSO_IP, MSO_ADMIN, MSO_PASSWORD)
roles = rc.get('/roles')
rolesJson = json.loads(roles.text)
for role in rolesJson['roles']:
if role['name'] == 'powerUser':
roleId = role['id']
userJson = {"lastName":"Testing",
"username":"test",
"phoneNumber":"555-123-456",
"emailAddress":"[email protected]",
"id":"",
"firstName":"Subject",
"confirmPassword":"cisco123!!!!",
"password":"cisco123!!!!",
"roles":[ {"roleId":roleId,
"accessType":"readOnly"}],
"accountStatus":"active"}
resp = rc.post('/users', json_body=userJson)
respJson = json.loads(resp.text)
newUserId = respJson['id']
print("Created new user with id {}".format(newUserId))
discard = input("Take a look at MSO for the presence of the new user. Should we delete it? [Y/N] ")
if discard == "Y":
path = '/users/' + newUserId
resp = rc.delete(path)
if resp.status_code == 204:
print("User deleted")
|
# https://matplotlib.org/api/_as_gen/matplotlib.gridspec.GridSpec.html#matplotlib.gridspec.GridSpec
import os
import matplotlib.pyplot as plt
import numpy as np
from fears.utils import results_manager, plotter
data_folder = 'results_07202021_0000'
exp_info_file = 'experiment_info_07202021_0000.p'
fig,ax = plt.subplots(figsize=(3,7.75))
labelsize=12
exp_folders,exp_info = results_manager.get_experiment_results(data_folder,
exp_info_file)
max_cells = exp_info.populations[0].max_cells
n_sims = exp_info.n_sims
p_drop = exp_info.prob_drops
# make a dummy barchart
# p_drop = p_drop[2:]
x = np.arange(len(p_drop))
barchart_data = np.ones(len(p_drop))*100
rects = ax.barh(x,barchart_data,color='slategrey',facecolor='w')
tc_axes = []
drug_axes = []
pop_axes = []
exp_folders.reverse()
p_drop = np.flip(p_drop)
thresh = 1
# data_extinct = np.zeros((999,1))
for exp in exp_folders:
p_drop_t = exp[exp.find('=')+1:]
p_drop_t = p_drop_t.replace(',','.')
p_drop_t = float(p_drop_t)
num = np.argwhere(p_drop == p_drop_t)
num = num[0,0]
# generate timecourse axes
width = 100
height = rects.patches[num].get_height()
ypos = rects.patches[num].get_y()
xpos = 125
popax = ax.inset_axes([xpos,ypos,width,height],transform=ax.transData)
xpos = xpos + width + 30
tcax = ax.inset_axes([xpos,ypos,width,height],transform=ax.transData)
# da = tcax.twinx()
sim_files = os.listdir(path=exp)
sim_files = sorted(sim_files)
survive_count = 0
counts_total = None
k=0
while k < len(sim_files):
# while k < 10:
sim = sim_files[k]
sim = exp + os.sep + sim
data = results_manager.get_data(sim)
dc = data[:,-2]
data = data[:,0:-2]
# data = data/np.max(data)
data_t = data[-1,:]
# check to see if any genotypes are at least 10% of the max cell count
if any(data_t >= thresh):
survive_count += 1
if counts_total is None:
counts_total = data
else:
counts_total += data
# exp_info.populations[num].counts_log_scale = True
d = np.sum(data,axis=1)
popax = plotter.plot_population_count(exp_info.populations[num],
d,
popax,
thresh=thresh,
normalize=False,
logscale=True)
# popax.yaxis.tick_right()
data = data/max_cells
if k==0:
drug_kwargs = {'alpha':1,
'color':'black',
'linewidth':1,
'label':'Drug Concentration ($\u03BC$M)'
}
tcax,drug_ax = plotter.plot_timecourse_to_axes(exp_info.populations[num],
data,
tcax,
drug_curve=dc,
drug_curve_linestyle='-',
legend_size=3,
drug_ax_sci_notation=True,
drug_kwargs=drug_kwargs,
legend_labels=False,
grayscale=True,
color='gray',
linewidth=1,
labelsize=10,
alpha=0.7
)
drug_ax.set_ylabel('')
drug_axes.append( drug_ax )
else:
tcax,da = plotter.plot_timecourse_to_axes(exp_info.populations[num],
data,
tcax,
grayscale=True,
color='gray',
legend_size=3,
legend_labels=False,
linewidth=2,
labelsize=10,
alpha=0.2
)
# drug_ax.set_ylim(0,10**4)
k+=1
if survive_count > 0:
counts_avg = counts_total/survive_count
# counts_avg = counts_avg/np.max(counts_avg)
# counts_avg = counts_total
counts_avg = counts_avg/np.max(counts_avg)
tcax,temp = plotter.plot_timecourse_to_axes(exp_info.populations[num],
counts_avg,
tcax,
legend_size=3,
labelsize=10)
# t = np.arange(len(dc))
# t = t*exp_info.populations[0].timestep_scale/24
# da.plot(t,dc)
tc_axes.append( tcax )
pop_axes.append( popax )
barchart_data[num] = survive_count
# for a in tc_axes:
# a.set_yscale('log',base=2)
# a.set_ylim(10,max_cells)
# for da in drug_axes:
# da.ticklabel_format(style='sci',axis='y',scilimits=(0,4))
# da.set_yticks(da.get_yticks())
# yt = da.get_yticks
# yt = yt/10**3
# drug_axes[1].set_ylabel('Drug Concentration (uM)', color='gray',fontsize=labelsize)
# tc_axes[3].set_ylabel('Proportion of \nmax cell count',fontsize=labelsize)
# tc_axes[0].set_xlabel('Days',fontsize=labelsize)
pop_axes[0].set_xlabel('Days',fontsize=labelsize)
rects = ax.barh(x,barchart_data,color='slategrey')
ax.set_yticks(x)
ax.set_yticklabels(p_drop)
# ax.yaxis.set_major_formatter(ScalarFormatter())
# ax.ticklabel_format(style='sci',axis='y')
ax.set_xlabel('% Resistant',fontsize=12)
ax.set_ylabel('$p_{forget}$',fontsize=12)
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
# compute error bars
# rule of succession explanation: https://en.wikipedia.org/wiki/Rule_of_succession
p = (np.array(barchart_data) + 1)/(n_sims + 2) # uniform prior (rule of succession)
n = n_sims
q = 1-p
sd = 100*(p*q/n)**0.5 # standard deviation of the estimator of the parameter of a bernoulli distribution
# rects = bar_ax.barh(x, barchart_data,color='slategrey')
# errorbar_pos = x + rects[0].get_height()/2
# bar_ax.errorbar(x=barchart_data, y=errorbar_pos, xerr=sd,linewidth=0,elinewidth=2,capsize=5,color='tomato')
ax.errorbar(x=barchart_data, y=x, xerr=sd,linewidth=0,elinewidth=2,capsize=5,color='black')
tc_axes[0].legend(frameon=False,fontsize=11,loc='lower left',
bbox_to_anchor=(-0.25,-0.9),ncol=4)
drug_axes[0].legend(frameon=False,fontsize=11,loc='lower left',
bbox_to_anchor=(-0.8,-1.4),ncol=1)
# ax.annotate('Proportion of carrying capacity',rotation=90,
# fontsize=labelsize,xy=(97,0.65),
# ha='center') # xy in data points
# ax.annotate('Drug Concentration ($\u03BC$M)',rotation=90,
# fontsize=labelsize,xy=(242,0.75),
# ha='center',annotation_clip=False) # xy in data points
# ax.annotate('Cell count',rotation=90,
# fontsize=labelsize,xy=(380,1.15),
# ha='center',annotation_clip=False)
# ax.annotate('Cell count',rotation=90,
# fontsize=labelsize,xy=(97,1.15),
# ha='center',annotation_clip=False)
# for da in drug_axes:
# da.set_yticks([0,1e3,2e3])
# da.ticklabel_format(style='sci',axis='y',scilimits=(0,3))
for pa in pop_axes:
xl = pa.get_xlim()
xl = [0,xl[1]]
pa.set_xlim(xl)
# # xl = tc_axes[0].get_xlim()
# # xt = tc_axes[0].get_xticks()
# for pa in pop_axes:
# pa.set_xlim(xl)
# pa.set_xticks(xt)
# pa.set_ylim([10**0,10**12])
handles, labels = pop_axes[-1].get_legend_handles_labels()
r_index = labels.index('resistant')
e_index = labels.index('extinct')
pop_axes[-1].legend([handles[r_index],handles[e_index]],
['resistant','extinct'],frameon=False)
results_manager.save_fig(fig,'adherance_v_survival.pdf',bbox_inches='tight') |
from pydantic import BaseModel
class UtilsNotificationsLinksResponse(BaseModel):
notifications: bool
class UtilsNotificationsLinks(BaseModel):
response: UtilsNotificationsLinksResponse
|
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import os
import re
import logging
import logging.config
class LisaLogging(object):
@classmethod
def setup(self, filepath='logging.conf', level=logging.INFO):
"""
Initialize logging used for all the LISA modules.
:param filepath: the relative or absolute path of the logging configuration to use.
Relative path uses the LISA_HOME environment variable
has base folder.
:type filepath: str
:param level: the default log level to enable, INFO by default
:type level: logging.<level> or int in [0..50]
"""
# Load the specified logfile using an absolute path
basepath = os.path.dirname(__file__).replace('/libs/utils', '')
filepath = os.path.join(basepath, filepath)
if not os.path.exists(filepath):
raise ValueError('Logging configuration file not found in: {}'\
.format(filepath))
logging.config.fileConfig(filepath)
logging.getLogger().setLevel(level)
logging.info('Using LISA logging configuration:')
logging.info(' %s', filepath)
class JsonConf(object):
"""
Class for parsing a JSON superset with comments.
Simply strips comments and then uses the standard JSON parser.
:param filename: Path to file to parse
:type filename: str
"""
def __init__(self, filename):
self.filename = filename
self.json = None
def load(self):
"""
Parse a JSON file
First remove comments and then use the json module package
Comments look like :
::
// ...
or
::
/*
...
*/
"""
# Setup logging
self._log = logging.getLogger('JsonConf')
if not os.path.isfile(self.filename):
raise RuntimeError(
'Missing configuration file: {}'.format(self.filename)
)
self._log.debug('loading JSON...')
with open(self.filename) as fh:
content = ''.join(fh.readlines())
## Looking for comments
match = JSON_COMMENTS_RE.search(content)
while match:
# single line comment
content = content[:match.start()] + content[match.end():]
match = JSON_COMMENTS_RE.search(content)
# Allow trailing commas in dicts an lists in JSON
# Note that this simple implementation will mangle things like:
# {"config": ", }"}
content = re.sub(r',[ \t\r\n]+}', '}', content)
content = re.sub(r',[ \t\r\n]+\]', ']', content)
# Return json file
self.json = json.loads(content, parse_int=int)
self._log.debug('Loaded JSON configuration:')
self._log.debug(' %s', self.json)
return self.json
def show(self):
"""
Pretty-print content of parsed JSON
"""
print json.dumps(self.json, indent=4)
# Regular expression for comments
JSON_COMMENTS_RE = re.compile(
r'(^)?[^\S\n]*/(?:\*(.*?)\*/[^\S\n]*|/[^\n]*)($)?',
re.DOTALL | re.MULTILINE
)
|
import socket
import sys
from threading import Thread, Lock
from PyQt5.QtWidgets import *
class MySocket(Thread):
def __init__(self, output, sock=None):
Thread.__init__(self)
self.interface = output
self.thread_active = True
if sock is None:
self.sock = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM)
else:
self.sock = sock
def connect(self, host, port):
try:
self.sock.bind((host, port))
except:
return 1, "Address already in use: NET_Bind"
return 0, "Server is working on port " + str(port)
def run(self):
while self.thread_active:
data, addr = self.sock.recvfrom(1024)
self.interface.append(
str(addr) + " | size: " + str(len(data)) + " : " + data.decode())
self.sock.sendto(data, addr)
def myclose(self):
self.thread_active = False
self.sock.close()
class MainWidow(QWidget):
def __init__(self):
super().__init__()
self.output_text = QTextEdit()
self.socket_ins = MySocket(self.output_text)
grid = QGridLayout()
self.setLayout(grid)
grid.addWidget(QLabel("Listening port"), 0, 0)
self.port_text = QLineEdit("8000")
grid.addWidget(self.port_text, 0, 1)
self.start_button = QPushButton("Start")
self.start_button.clicked.connect(self.connect)
grid.addWidget(self.start_button, 0, 3)
grid.addWidget(self.output_text, 2, 0, 1, 6)
self.show()
def connect(self):
err_code, output = self.socket_ins.connect("localhost",
int(self.port_text.text()))
self.output_text.append(output)
if (err_code == 0):
self.output_text.append("Waiting for new message")
self.socket_ins.start()
def closeEvent(self, event):
self.socket_ins.myclose() # close socket on exit
if __name__ == "__main__":
app = QApplication(sys.argv)
ex = MainWidow()
sys.exit(app.exec_())
|
# -*- coding: utf-8 -*-
from girderformindlogger.api import access
from girderformindlogger.api.describe import Description, autoDescribeRoute
from girderformindlogger.api.rest import Resource
from girderformindlogger.models.collection import Collection
from girderformindlogger.models.folder import Folder
from girderformindlogger.models.setting import Setting
from . import constants
from .settings import PluginSettings
class Homepage(Resource):
def __init__(self):
super(Homepage, self).__init__()
self.resourceName = 'homepage'
self.route('GET', (), self.getSettings)
self.route('GET', ('assets',), self.getAssets)
@access.public
@autoDescribeRoute(
Description('Public url for getting the homepage properties.')
)
def getSettings(self):
settings = Setting()
return {
PluginSettings.MARKDOWN: settings.get(PluginSettings.MARKDOWN),
PluginSettings.HEADER: settings.get(PluginSettings.HEADER),
PluginSettings.SUBHEADER: settings.get(PluginSettings.SUBHEADER),
PluginSettings.WELCOME_TEXT: settings.get(PluginSettings.WELCOME_TEXT),
PluginSettings.LOGO: settings.get(PluginSettings.LOGO),
}
@access.admin
@autoDescribeRoute(
Description('Return the folder IDs for uploaded asset content.')
)
def getAssets(self):
return {
# Keep MARKDOWN folder as 'Homepage Assets', for compatibility
PluginSettings.MARKDOWN: self._getAssetsFolder('Homepage Assets')['_id'],
PluginSettings.WELCOME_TEXT: self._getAssetsFolder('Welcome Text')['_id'],
PluginSettings.LOGO: self._getAssetsFolder('Logo')['_id'],
}
def _getAssetsFolder(self, folderName):
"""
Get or create a public folder, in the private "Homepage Assets" collection.
This makes the folder effectively "unlisted" as it can't be browsed to by normal users, but
its content can still be downloaded directly.
:param folderName: The name of the folder to get or create.
:return: The new folder document.
"""
collection = Collection().createCollection(
constants.COLLECTION_NAME,
public=False,
reuseExisting=True
)
folder = Folder().createFolder(
collection,
folderName,
parentType='collection',
public=True,
reuseExisting=True
)
return folder
|
"""
Simple script which runs SNPE-A with one fixed observation.
"""
import numpy as np
import torch as to
from copy import deepcopy
from sbi import utils
import pyrado
from pyrado.algorithms.meta.bayessim import BayesSim
from pyrado.sampling.sbi_embeddings import BayesSimEmbedding
from pyrado.sampling.sbi_rollout_sampler import RolloutSamplerForSBI
from pyrado.environments.pysim.one_mass_oscillator import OneMassOscillatorSim
from pyrado.logger.experiment import setup_experiment, save_dicts_to_yaml
from pyrado.policies.special.dummy import IdlePolicy
from pyrado.policies.special.mdn import MDNPolicy
from pyrado.utils.argparser import get_argparser
if __name__ == "__main__":
# Parse command line arguments
args = get_argparser().parse_args()
# Experiment (set seed before creating the modules)
ex_dir = setup_experiment(OneMassOscillatorSim.name, f"{BayesSim.name}")
# Set seed if desired
pyrado.set_seed(args.seed, verbose=True)
# Define a mapping: index - domain parameter
dp_mapping = {0: "m", 1: "k", 2: "d"}
# Environments
env_hparams = dict(dt=1 / 50.0, max_steps=200)
env_sim = OneMassOscillatorSim(**env_hparams, task_args=dict(task_args=dict(state_des=np.array([0.5, 0]))))
# Create a fake ground truth target domain
env_real = deepcopy(env_sim)
env_real.domain_param = dict(m=0.8, k=33, d=0.3)
# Behavioral policy
policy = IdlePolicy(env_sim.spec)
# Prior
dp_nom = env_sim.get_nominal_domain_param() # m=1.0, k=30.0, d=0.5
prior_hparam = dict(
low=to.tensor([dp_nom["m"] * 0.5, dp_nom["k"] * 0.5, dp_nom["d"] * 0.5]),
high=to.tensor([dp_nom["m"] * 1.5, dp_nom["k"] * 1.5, dp_nom["d"] * 1.5]),
)
prior = utils.BoxUniform(**prior_hparam)
# Time series embedding
embedding_hparam = dict(downsampling_factor=1)
embedding = BayesSimEmbedding(env_sim.spec, RolloutSamplerForSBI.get_dim_data(env_sim.spec), **embedding_hparam)
# Posterior (mixture of Gaussians) created inside BayesSim
posterior_hparam = dict(num_comp=5, hidden_sizes=[42, 42], hidden_nonlin=to.relu, use_cuda=False)
# Generate real_world observations
num_real_obs = 1
num_segments = 1
# TODO delete below
# rollout_worker = SimRolloutSamplerForSBI(env_sim, policy, dp_mapping, embedding, num_segments=num_segments)
# dp_nom_to = to.tensor(list(dp_nom.values()))
# data_real = to.stack([rollout_worker(dp_nom_to).squeeze() for _ in range(num_real_obs)])
# Algorithm
algo_hparam = dict(
max_iter=1,
num_real_rollouts=num_real_obs,
num_sim_per_round=300,
num_segments=num_segments,
posterior_hparam=posterior_hparam,
num_sbi_rounds=1,
subrtn_sbi_training_hparam=dict(
max_iter=100,
num_eval_samples=20,
batch_size=50,
max_grad_norm=5.0,
lr=1e-3,
use_gaussian_proposal=False,
),
# num_workers=1,
)
algo = BayesSim(
ex_dir,
env_sim,
env_real,
policy,
dp_mapping,
prior,
embedding,
**algo_hparam,
)
# Save the hyper-parameters
save_dicts_to_yaml(
dict(env=env_hparams, seed=args.seed),
dict(policy_name=policy.name),
dict(prior=prior_hparam),
dict(embedding=embedding_hparam, embedding_name=embedding.name),
dict(posterior=posterior_hparam, posterior_name=MDNPolicy.name),
dict(algo=algo_hparam, algo_name=algo.name),
save_dir=ex_dir,
)
# Jeeeha
algo.train(seed=args.seed)
|
class ProfitBl(object):
pass
|
import cv2
import time
import numpy
import init_hand_tracking_module #initial file
import mediapipe
import math
from ctypes import cast, POINTER
from comtypes import CLSCTX_ALL
from pycaw.pycaw import AudioUtilities, IAudioEndpointVolume
|
# Copyright (c) 2017, The University of Bristol, Senate House, Tyndall Avenue, Bristol, BS8 1TH, United Kingdom.
# Copyright (c) 2021, COSIC-KU Leuven, Kasteelpark Arenberg 10, bus 2452, B-3001 Leuven-Heverlee, Belgium.
import itertools
class chain(object):
def __init__(self, *args):
self.args = args
def __iter__(self):
return itertools.chain(*self.args)
|
import pytest
from ..dtype_helpers import EqualityMapping
def test_raises_on_distinct_eq_key():
with pytest.raises(ValueError):
EqualityMapping([(float("nan"), "value")])
def test_raises_on_indistinct_eq_keys():
class AlwaysEq:
def __init__(self, hash):
self._hash = hash
def __eq__(self, other):
return True
def __hash__(self):
return self._hash
with pytest.raises(ValueError):
EqualityMapping([(AlwaysEq(0), "value1"), (AlwaysEq(1), "value2")])
def test_key_error():
mapping = EqualityMapping([("key", "value")])
with pytest.raises(KeyError):
mapping["nonexistent key"]
def test_iter():
mapping = EqualityMapping([("key", "value")])
it = iter(mapping)
assert next(it) == "key"
with pytest.raises(StopIteration):
next(it)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thuesday June 1 12:45:05 2021
@author: cbadenes
"""
import worker as workers
import pysolr
import html
import time
import sys
import os
from datetime import datetime
if __name__ == '__main__':
# Create a client instance. The timeout and authentication options are not required.
server = 'http://localhost:8983/solr'
if (len(sys.argv) != 2):
print("usage: python index.py <input_directory>")
sys.exit(2)
directory = sys.argv[1]
# Load articles
print("loading files from:",directory)
files = [ os.path.join(directory, entry) for entry in os.listdir(directory) if os.path.isfile(os.path.join(directory, entry))]
total_files = len(files)
print("[",datetime.now(),"] files available: ",total_files)
solr_papers = pysolr.Solr(server+'/cord19-papers', always_commit=True, timeout=120)
solr_paragraphs = pysolr.Solr(server+'/cord19-paragraphs', always_commit=True, timeout=120)
window = 100
num_papers = 0
papers = []
num_paragraphs = 0
paragraphs = []
for file in files:
try:
result = workers.parse_and_annotate(file)
if ('paper' in result):
papers.append(result['paper'])
num_papers += 1
if (len(papers) >= window):
print("[",datetime.now(),"] indexing papers: ", num_papers, "/", len(papers))
solr_papers.add(papers)
papers = []
if ('paragraphs' in result):
paragraphs.extend(result['paragraphs'])
num_paragraphs += len(result['paragraphs'])
if (len(paragraphs) >= window):
print("[",datetime.now(),"] indexing paragraphs: ", num_paragraphs,"/",len(paragraphs))
solr_paragraphs.add(paragraphs)
paragraphs = []
except Exception as e:
print("Error reading file:",file, " => ",e)
print("[",datetime.now(),"] indexing last papers: ", num_papers, "/", len(papers))
solr_papers.add(papers)
print("[",datetime.now(),"] indexing last paragraphs: ", num_paragraphs,"/",len(paragraphs))
solr_paragraphs.add(paragraphs)
print('Time to parse articles: {} mins'.format(round((time.time() - t) / 60, 2)))
print("Total Articles:",num_papers)
print("Total Paragraphs:",num_paragraphs)
|
#!/usr/bin/env/python
#-*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import csv
import argparse
import sys
#Esto va a permitir que se agregue inputs desde la terminal (justo cuando se ejecuta python3 remover_puntos.py argv[0]....)
try:
parser=argparse.ArgumentParser();
parser.add_argument("porcentaje_rem", help="Coloque como primer argumento el porcentaje de puntos a remover", type=float);
parser.add_argument("tipo estrella: ",help="Coloque el tipo de estrella con el que se va a trabajar");
parser.add_argument("numero ID estrella: ", help="Coloque el número de la estrella a la que le va a remover puntos");
args=parser.parse_args();
porcentaje=float(sys.argv[1]);
tipo_estrella=sys.argv[2];
numero_estrella=sys.argv[3];
except:
e = sys.exc_info()[0];
print(e);
#fin try
#Importar los números de las estrellas desde el archivo csv:
ID_estrellas=np.loadtxt('numero_estrellas.csv',delimiter=',',dtype='str', skiprows=1);
vecCep=ID_estrellas[:,0];
vecRRLyr=ID_estrellas[:,1];
vecECL=ID_estrellas[:,2];
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
if tipo_estrella=='Cefeida' or tipo_estrella==0 or tipo_estrella=='0':
nombre_OGLE='OGLE-LMC-CEP-'
label_path='Datos/'+'1_Cefeidas'+'/I/'+nombre_OGLE;
#numero_estrella=vecCep;
elif tipo_estrella=='RR_Lyrae' or tipo_estrella==1 or tipo_estrella=='1':
nombre_OGLE='OGLE-LMC-RRLYR-'
label_path='Datos/'+'2_RR_Lyrae'+'/I/'+nombre_OGLE;
#numero_estrella=vecRRLyr;
else:
nombre_OGLE='OGLE-LMC-ECL-'
label_path='Datos/'+'3_BinariasEclipsantes'+'/I/'+nombre_OGLE;
#numero_estrella=vecECL;
#fin if
extension='.dat';
#numero_estrella='02889';
elSeniorArchivo=label_path+numero_estrella+extension;
datos=np.genfromtxt(elSeniorArchivo,delimiter=' ');
N_datos=len(datos[:,0]);
N_remover=int(porcentaje*N_datos);
elegidos=np.random.choice(N_datos, size=N_remover,replace=False);
datos_nuevos=np.delete(datos,elegidos,0);
nombre_archivo=nombre_OGLE+numero_estrella+extension;
with open(nombre_archivo, 'w', encoding='UTF8', newline='') as f:
writer=csv.writer(f, delimiter=' ');
writer.writerows(datos_nuevos);
#fin with
|
#!/usr/bin/python
# Example using a character LCD connected to a Raspberry Pi
from lcd import LCD
import time
lcd = LCD(16, 19, 25, 11, 23, 22)
lcd.set_text('Hello!')
time.sleep(2)
lcd.set_text("World!", clean=False)
time.sleep(2)
lcd.set_text("Center!", center=True)
time.sleep(2)
lcd.cursor_visible(True)
lcd.cursor_blink(True)
time.sleep(2)
lcd.clear()
lcd.cursor_position(1, 9)
lcd.set_text("Hupp", clean=False, center=False)
lcd.autoscroll(True)
time.sleep(2)
lcd.set_text("NANO", clean=False)
time.sleep(2)
lcd.clear()
lcd.autoscroll(False)
time.sleep(4)
lcd.roll("Hello World!") |
import pkgutil
from threading import Event, Thread
from typing import Callable, List
from PyQt5.QtCore import pyqtSignal
from qtpy import QtCore, QtWidgets
class classproperty(object):
def __init__(self, fget):
self.fget = fget
def __get__(self, owner_self, owner_cls):
return self.fget(owner_cls)
def executeWaitTask(event: Event, call_after, call_after_args=[]):
"""
Waits for the event to finish and executes the call after function afterwards.
Prevents long thread executions outside the main thread.
:param event: threading event
:param call_after: function to call after event finished
:param call_after_args: arguments for the call after function
:return: None
"""
wt = _WaitingThread(event)
wt.finished.connect(lambda: call_after(*call_after_args))
wt.start()
def executeTask(execution: Callable, args=[], call_after: Callable = None, call_after_args=[]):
"""
Executes the function and executes afterwards the call after function.
The return value of the execution function will be passed to the call_after function as first parameter if not None.
Prevents long thread executions outside the main thread.
:param execution: function to execute
:param args: arguments for the execution function
:param call_after: function to call after execution finished
:param call_after_args: additional arguments for the call after function
:return: None
"""
thread = _Thread(execution, args)
if call_after:
thread.finished.connect(lambda x: call_after(x, *call_after_args) if x else call_after(*call_after_args))
thread.start()
def executeLongRunningTask(execution: Callable, args=[], message="", call_after: Callable = None, call_after_args=[]):
"""
Shows a waiting indicator and executes the function. Afterwards the call after function is called.
The return value of the execution function will be passed to the call_after function as first parameter if not None.
Prevents long thread executions outside the main thread.
:param execution: function to execute
:param args: arguments for the execution function
:param call_after: function to call after execution finished
:param call_after_args: additional arguments for the call after function
:return: None
"""
thread = _Thread(execution, args)
progress = QtWidgets.QProgressDialog(message, None, 0, 0, flags=QtCore.Qt.FramelessWindowHint)
progress.setWindowModality(QtCore.Qt.ApplicationModal)
bar = QtWidgets.QProgressBar()
bar.setRange(0, 0)
bar.setTextVisible(False)
progress.setBar(bar)
if call_after:
thread.finished.connect(lambda x: call_after(x, *call_after_args) if x else call_after(*call_after_args))
close_progress = lambda x, p=progress: p.close()
thread.finished.connect(close_progress)
progress.show()
thread.start()
def installMissingAndExecute(package_names: List[str], execution: Callable, args=[]):
"""
Checks for missing packages. The execution function is called when the requirements are fulfilled or the
missing packages could be installed (depending on the user decision).
:param package_names: required packages
:param execution: callable function
:param args: parameters of the execution
:return: None
"""
if _packagesInstalled(package_names):
execution(*args)
return
msg = "This action requires additional python modules. Do you want to install them now?"
reply = QtWidgets.QMessageBox.question(None, "Additional python package required", msg, QtWidgets.QMessageBox.Yes,
QtWidgets.QMessageBox.No)
if reply != QtWidgets.QMessageBox.Yes:
return
for pkg_name in package_names:
if not pkgutil.find_loader(pkg_name):
executeLongRunningTask(_install, [pkg_name], "Installing Packages", execution, args)
def checkPackages(package_names):
"""
Checks if the required python packages are installed.
:param package_names: python package names
:return: True if all packages are installed, False otherwise
"""
if _packagesInstalled(package_names):
return True
msg = "This action requires additional python modules. Do you want to install them now?"
reply = QtWidgets.QMessageBox.question(None, "Additional python package required", msg, QtWidgets.QMessageBox.Yes,
QtWidgets.QMessageBox.No)
if reply != QtWidgets.QMessageBox.Yes:
return False
for pkg_name in package_names:
if not pkgutil.find_loader(pkg_name):
executeLongRunningTask(_install, [pkg_name], "Downloading Packages")
return False
def _packagesInstalled(package_names):
for pkg_name in package_names:
pkg = pkgutil.find_loader(pkg_name)
if pkg is None:
return False
return True
def _install(pkg_name):
import pip
pip.main(['install', pkg_name])
class _Thread(QtCore.QObject, Thread):
finished = pyqtSignal(object)
def __init__(self, execution, args):
self.execution = execution
self.args = args
QtCore.QObject.__init__(self)
Thread.__init__(self)
def run(self):
result = self.execution(*self.args)
self.finished.emit(result)
class _WaitingThread(QtCore.QObject, Thread):
finished = pyqtSignal()
def __init__(self, event):
self.event = event
QtCore.QObject.__init__(self)
Thread.__init__(self)
def run(self):
self.event.wait()
self.finished.emit()
|
# coding: utf-8
import os
print(os.name)
# print(getattr(os, 'uname'))
print(hasattr(os, 'uname'))
print(os.environ)
print(os.environ.get('path'))
print(os.path.realpath('.'))
print(os.path.join('/a/b', 'c'))
# print(os.mkdir(os.path.realpath(os.path.join(__file__,os.pardir, 'test_path'))))
# print(os.rmdir(os.path.realpath(os.path.join(__file__,os.pardir, 'test_path'))))
print(os.path.split('/a/b/c/d.txt'))
print(os.path.splitext('/a/b/c/d.txt'))
import shutil
# shutil.copyfile('test_chain.py', 'test_chain.txt')
r = [x for x in os.listdir('.') if os.path.isdir(x)]
print(r)
r = [x for x in os.listdir('.') if os.path.isfile(x) and os.path.splitext(x)[1] == '.py']
print(r)
|
"""Tools for manipulation of rational expressions. """
from __future__ import print_function, division
from sympy.core import Basic, Add, sympify
from sympy.core.compatibility import iterable
from sympy.core.exprtools import gcd_terms
from sympy.utilities import public
@public
def together(expr, deep=False):
"""
Denest and combine rational expressions using symbolic methods.
This function takes an expression or a container of expressions
and puts it (them) together by denesting and combining rational
subexpressions. No heroic measures are taken to minimize degree
of the resulting numerator and denominator. To obtain completely
reduced expression use :func:`cancel`. However, :func:`together`
can preserve as much as possible of the structure of the input
expression in the output (no expansion is performed).
A wide variety of objects can be put together including lists,
tuples, sets, relational objects, integrals and others. It is
also possible to transform interior of function applications,
by setting ``deep`` flag to ``True``.
By definition, :func:`together` is a complement to :func:`apart`,
so ``apart(together(expr))`` should return expr unchanged. Note
however, that :func:`together` uses only symbolic methods, so
it might be necessary to use :func:`cancel` to perform algebraic
simplification and minimise degree of the numerator and denominator.
Examples
========
>>> from sympy import together, exp
>>> from sympy.abc import x, y, z
>>> together(1/x + 1/y)
(x + y)/(x*y)
>>> together(1/x + 1/y + 1/z)
(x*y + x*z + y*z)/(x*y*z)
>>> together(1/(x*y) + 1/y**2)
(x + y)/(x*y**2)
>>> together(1/(1 + 1/x) + 1/(1 + 1/y))
(x*(y + 1) + y*(x + 1))/((x + 1)*(y + 1))
>>> together(exp(1/x + 1/y))
exp(1/y + 1/x)
>>> together(exp(1/x + 1/y), deep=True)
exp((x + y)/(x*y))
>>> together(1/exp(x) + 1/(x*exp(x)))
(x + 1)*exp(-x)/x
>>> together(1/exp(2*x) + 1/(x*exp(3*x)))
(x*exp(x) + 1)*exp(-3*x)/x
"""
def _together(expr):
if isinstance(expr, Basic):
if expr.is_Atom or (expr.is_Function and not deep):
return expr
elif expr.is_Add:
return gcd_terms(list(map(_together, Add.make_args(expr))))
elif expr.is_Pow:
base = _together(expr.base)
if deep:
exp = _together(expr.exp)
else:
exp = expr.exp
return expr.__class__(base, exp)
else:
return expr.__class__(*[ _together(arg) for arg in expr.args ])
elif iterable(expr):
return expr.__class__([ _together(ex) for ex in expr ])
return expr
return _together(sympify(expr))
|
class TestInflux:
params = {
'test_create': [{
'data': [{
'name': 'mongo',
'type': 'mongo',
'config': {
'configBean.mongoConfig.connectionString': 'mongodb://mongo:27017',
'configBean.mongoConfig.username': 'root',
'configBean.mongoConfig.password': 'root',
'configBean.mongoConfig.authSource': 'admin',
'configBean.mongoConfig.database': 'test',
'configBean.mongoConfig.collection': 'adtech',
'configBean.isCapped': False,
'configBean.initialOffset': '0',
'configBean.mongoConfig.offsetType': 'OBJECTID',
'configBean.offsetField': '_id',
'configBean.batchSize': 1000,
'configBean.maxBatchWaitTime': '10',
}
}],
'er': [{"config": {"configBean.batchSize": 1000, "configBean.initialOffset": "0",
"configBean.isCapped": False, "configBean.maxBatchWaitTime": "10",
"configBean.mongoConfig.authSource": "admin",
"configBean.mongoConfig.authenticationType": "USER_PASS",
"configBean.mongoConfig.collection": "adtech",
"configBean.mongoConfig.connectionString": "mongodb://mongo:27017",
"configBean.mongoConfig.database": "test",
"configBean.mongoConfig.offsetType": "OBJECTID",
"configBean.mongoConfig.password": "root", "configBean.mongoConfig.username": "root",
"configBean.offsetField": "_id"}, "name": "mongo", "type": "mongo"}]
}],
'test_edit': [{
'data': [{
'name': 'mongo',
'config': {
'configBean.mongoConfig.connectionString': 'mongodb://mongo:27017',
'configBean.mongoConfig.username': 'root',
'configBean.mongoConfig.password': 'root',
'configBean.mongoConfig.authSource': 'admin',
'configBean.mongoConfig.database': 'test',
'configBean.mongoConfig.collection': 'adtech',
'configBean.isCapped': True,
'configBean.initialOffset': '1',
'configBean.mongoConfig.offsetType': 'OBJECTID',
'configBean.offsetField': '_id',
'configBean.batchSize': 1001,
'configBean.maxBatchWaitTime': '11',
}
}],
'er': [{"config": {"configBean.batchSize": 1001, "configBean.initialOffset": "1",
"configBean.isCapped": True, "configBean.maxBatchWaitTime": "11",
"configBean.mongoConfig.authSource": "admin",
"configBean.mongoConfig.authenticationType": "USER_PASS",
"configBean.mongoConfig.collection": "adtech",
"configBean.mongoConfig.connectionString": "mongodb://mongo:27017",
"configBean.mongoConfig.database": "test",
"configBean.mongoConfig.offsetType": "OBJECTID",
"configBean.mongoConfig.password": "root", "configBean.mongoConfig.username": "root",
"configBean.offsetField": "_id"}, "name": "mongo", "type": "mongo"}]
}]
}
def test_create(self, api_client, data, er):
result = api_client.post('/sources', json=list(data))
assert result.json == er
def test_edit(self, api_client, data, er):
result = api_client.put('/sources', json=list(data))
assert result.json == er
def test_get(self, api_client):
result = api_client.get('/sources')
assert result.json == ["mongo"]
def test_delete(self, api_client):
api_client.delete('sources/mongo')
assert api_client.get('/sources').json == []
|
from .api_definition import api_list
from .project_definition import definition
|
from django.core.exceptions import PermissionDenied
from permissions import PermissionsRegistry
from permissions.exc import NoSuchPermissionError, PermissionsError
from .base import AnonymousUser, Model, TestCase, User, View
class TestRegistry(TestCase):
def test_register(self):
@self.registry.register
def can_do_things(user):
pass
self.assertTrue(hasattr(self.registry, 'can_do_things'))
@self.registry.can_do_things
def view(request):
pass
def test_register_with_args(self):
@self.registry.register(model=Model, allow_anonymous=True)
def can_do_things(user, instance):
self.assertIsInstance(instance, Model)
self.assertEqual(instance.model_id, 1)
return user.can_do_things
self.assertTrue(hasattr(self.registry, 'can_do_things'))
@self.registry.require('can_do_things', field='model_id')
def view(request, model_id):
pass
request = self.request_factory.get('/things/1')
request.user = User()
request.user.can_do_things = True
view(request, 1)
def test_cannot_use_register_as_perm_name(self):
self.assertRaises(
PermissionsError, self.registry.register, lambda u: None, name='register')
def test_get_unknown_permission(self):
with self.assertRaises(NoSuchPermissionError):
self.registry.pants
with self.assertRaises(NoSuchPermissionError):
self.registry.require('pants')
def test_bad_decoration(self):
self.registry.register(lambda u: None, name='perm')
self.assertRaises(PermissionsError, self.registry.perm, object())
def test_apply_to_class_based_view(self):
@self.registry.register(allow_anonymous=True)
def can_do_things(user):
return user.can_do_things
@self.registry.require('can_do_things')
class TestView(View):
pass
self.assertEqual(TestView.dispatch.__name__, 'dispatch')
request = self.request_factory.get('/things')
request.user = User()
request.user.can_do_things = True
view = TestView()
view.dispatch(request)
request.user.can_do_things = False
self.assertRaises(PermissionDenied, view.dispatch, request)
def test_apply_to_class_based_view_with_model(self):
@self.registry.register(model=Model, allow_anonymous=True)
def can_do_stuff(user, instance):
return user.can_do_stuff and instance is not None
@self.registry.require('can_do_stuff')
class TestView(View):
pass
request = self.request_factory.get('/stuff/1')
request.user = User()
request.user.can_do_stuff = True
view = TestView()
view.dispatch(request, 1)
request.user.can_do_stuff = False
self.assertRaises(PermissionDenied, view.dispatch, request, 1)
def test_view_args_are_passed_through_to_perm_func(self):
@self.registry.register
def perm(user, model_id, request=None, not_a_view_arg='XXX'):
self.assertEqual(model_id, 1)
self.assertIs(request, request_passed_to_view)
self.assertEqual(not_a_view_arg, 'XXX')
return True
@self.registry.perm
def view(request, model_id, view_arg_that_is_not_passed_through):
pass
request_passed_to_view = self.request_factory.get('/things/1')
request_passed_to_view.user = User()
view(request_passed_to_view, 1, 2)
def test_perm_func_is_not_called_when_user_is_staff_and_allow_staff_is_set(self):
registry = PermissionsRegistry(allow_staff=True)
@registry.register
def perm(user):
raise PermissionsError('Should not be raised')
@registry.perm
def view(request):
pass
request = self.request_factory.get('/things/1')
request.user = User(is_staff=True)
view(request)
def test_anon_is_required_to_login(self):
@self.registry.register
def perm(user):
return False
@self.registry.require('perm')
def view(request):
pass
request = self.request_factory.get('/things/1')
request.user = AnonymousUser()
response = view(request, 1)
self.assertEqual(response.status_code, 302)
def test_anon_is_required_to_login_when_perm_check_fails(self):
@self.registry.register(allow_anonymous=True)
def perm(user):
return False
@self.registry.require('perm')
def view(request):
pass
request = self.request_factory.get('/things/1')
request.user = AnonymousUser()
response = view(request, 1)
self.assertEqual(response.status_code, 302)
def test_ensure_custom_unauthenticated_handler_is_called(self):
def handler(request):
handler.called = True
handler.called = False
registry = PermissionsRegistry(unauthenticated_handler=handler)
@registry.register
def perm(user):
return False
@registry.require('perm')
def view(request):
pass
request = self.request_factory.get('/things/1')
request.user = AnonymousUser()
self.assertFalse(handler.called)
view(request, 1)
self.assertTrue(handler.called)
def test_ensure_view_perms(self):
perm_func = lambda user: True
perm_func.__name__ = 'perm'
self.registry.register(perm_func)
@self.registry.require('perm')
def view(request):
pass
entry = self.registry.entry_for_view(view, 'perm')
self.assertIsNotNone(entry)
self.assertIs(entry.perm_func, perm_func)
# try the same thing with a CBV
@self.registry.require('perm')
class AView(View):
pass
entry = self.registry.entry_for_view(AView, 'perm')
self.assertIsNotNone(entry)
self.assertIs(entry.perm_func, perm_func)
# same thing with the permission on a CBV method
class AnotherView(View):
@self.registry.require('perm')
def get(self, request):
pass
entry = self.registry.entry_for_view(AnotherView.get, 'perm')
self.assertIsNotNone(entry)
self.assertIs(entry.perm_func, perm_func)
def test_ensure_direct_call_respects_allow_staff_allow_superuser(self):
@self.registry.register(allow_staff=True, allow_superuser=True)
def perm(user):
return 'perm'
user = User(is_staff=True, is_superuser=False)
self.assertTrue(perm(user))
user = User(is_staff=False, is_superuser=True)
self.assertTrue(perm(user))
user = User(is_staff=False, is_superuser=False)
self.assertEqual(perm(user), 'perm')
|
def foo():
pass
class Model(object):
pass
|
# XY Nonlinear Kinematic MPC Module.
import time
import casadi
from controller import Controller
class KinMPCPathFollower(Controller):
def __init__(self,
N = 10, # timesteps in MPC Horizon
DT = 0.2, # discretization time between timesteps (s)
L_F = 1.5213, # distance from CoG to front axle (m)
L_R = 1.4987, # distance from CoG to rear axle (m)
V_MIN = 0.0, # min/max velocity constraint (m/s)
V_MAX = 20.0,
A_MIN = -3.0, # min/max acceleration constraint (m/s^2)
A_MAX = 2.0,
DF_MIN = -0.5, # min/max front steer angle constraint (rad)
DF_MAX = 0.5,
A_DOT_MIN = -1.5, # min/max jerk constraint (m/s^3)
A_DOT_MAX = 1.5,
DF_DOT_MIN = -0.5, # min/max front steer angle rate constraint (rad/s)
DF_DOT_MAX = 0.5,
Q = [1., 1., 10., 0.1], # weights on x, y, psi, and v.
R = [10., 100.]): # weights on jerk and slew rate (steering angle derivative)
for key in list(locals()):
if key == 'self':
pass
elif key == 'Q':
self.Q = casadi.diag(Q)
elif key == 'R':
self.R = casadi.diag(R)
else:
setattr(self, '%s' % key, locals()[key])
self.opti = casadi.Opti()
'''
(1) Parameters
'''
self.u_prev = self.opti.parameter(2) # previous input: [u_{acc, -1}, u_{df, -1}]
self.z_curr = self.opti.parameter(4) # current state: [x_0, y_0, psi_0, v_0]
# Reference trajectory we would like to follow.
# First index corresponds to our desired state at timestep k+1:
# i.e. z_ref[0,:] = z_{desired, 1}.
# Second index selects the state element from [x_k, y_k, psi_k, v_k].
self.z_ref = self.opti.parameter(self.N, 4)
self.x_ref = self.z_ref[:,0]
self.y_ref = self.z_ref[:,1]
self.psi_ref = self.z_ref[:,2]
self.v_ref = self.z_ref[:,3]
'''
(2) Decision Variables
'''
# Actual trajectory we will follow given the optimal solution.
# First index is the timestep k, i.e. self.z_dv[0,:] is z_0.
# It has self.N+1 timesteps since we go from z_0, ..., z_self.N.
# Second index is the state element, as detailed below.
self.z_dv = self.opti.variable(self.N+1, 4)
self.x_dv = self.z_dv[:, 0]
self.y_dv = self.z_dv[:, 1]
self.psi_dv = self.z_dv[:, 2]
self.v_dv = self.z_dv[:, 3]
# Control inputs used to achieve self.z_dv according to dynamics.
# First index is the timestep k, i.e. self.u_dv[0,:] is u_0.
# Second index is the input element as detailed below.
self.u_dv = self.opti.variable(self.N, 2)
self.acc_dv = self.u_dv[:,0]
self.df_dv = self.u_dv[:,1]
# Slack variables used to relax input rate constraints.
# Matches self.u_dv in structure but timesteps range from -1, ..., N-1.
self.sl_dv = self.opti.variable(self.N , 2)
self.sl_acc_dv = self.sl_dv[:,0]
self.sl_df_dv = self.sl_dv[:,1]
'''
(3) Problem Setup: Constraints, Cost, Initial Solve
'''
self._add_constraints()
self._add_cost()
self._update_initial_condition(0., 0., 0., 1.)
self._update_reference([self.DT * (x+1) for x in range(self.N)],
self.N*[0.],
self.N*[0.],
self.N*[1.])
self._update_previous_input(0., 0.)
# Ipopt with custom options: https://web.casadi.org/docs/ -> see sec 9.1 on Opti stack.
p_opts = {'expand': True}
s_opts = {'max_cpu_time': 0.1, 'print_level': 0}
self.opti.solver('ipopt', p_opts, s_opts)
sol = self.solve()
def _add_constraints(self):
# State Bound Constraints
self.opti.subject_to( self.opti.bounded(self.V_MIN, self.v_dv, self.V_MAX) )
# Initial State Constraint
self.opti.subject_to( self.x_dv[0] == self.z_curr[0] )
self.opti.subject_to( self.y_dv[0] == self.z_curr[1] )
self.opti.subject_to( self.psi_dv[0] == self.z_curr[2] )
self.opti.subject_to( self.v_dv[0] == self.z_curr[3] )
# State Dynamics Constraints
for i in range(self.N):
beta = casadi.atan( self.L_R / (self.L_F + self.L_R) * casadi.tan(self.df_dv[i]) )
self.opti.subject_to( self.x_dv[i+1] == self.x_dv[i] + self.DT * (self.v_dv[i] * casadi.cos(self.psi_dv[i] + beta)) )
self.opti.subject_to( self.y_dv[i+1] == self.y_dv[i] + self.DT * (self.v_dv[i] * casadi.sin(self.psi_dv[i] + beta)) )
self.opti.subject_to( self.psi_dv[i+1] == self.psi_dv[i] + self.DT * (self.v_dv[i] / self.L_R * casadi.sin(beta)) )
self.opti.subject_to( self.v_dv[i+1] == self.v_dv[i] + self.DT * (self.acc_dv[i]) )
# Input Bound Constraints
self.opti.subject_to( self.opti.bounded(self.A_MIN, self.acc_dv, self.A_MAX) )
self.opti.subject_to( self.opti.bounded(self.DF_MIN, self.df_dv, self.DF_MAX) )
# Input Rate Bound Constraints
self.opti.subject_to( self.opti.bounded( self.A_DOT_MIN*self.DT - self.sl_acc_dv[0],
self.acc_dv[0] - self.u_prev[0],
self.A_DOT_MAX*self.DT + self.sl_acc_dv[0]) )
self.opti.subject_to( self.opti.bounded( self.DF_DOT_MIN*self.DT - self.sl_df_dv[0],
self.df_dv[0] - self.u_prev[1],
self.DF_DOT_MAX*self.DT + self.sl_df_dv[0]) )
for i in range(self.N - 1):
self.opti.subject_to( self.opti.bounded( self.A_DOT_MIN*self.DT - self.sl_acc_dv[i+1],
self.acc_dv[i+1] - self.acc_dv[i],
self.A_DOT_MAX*self.DT + self.sl_acc_dv[i+1]) )
self.opti.subject_to( self.opti.bounded( self.DF_DOT_MIN*self.DT - self.sl_df_dv[i+1],
self.df_dv[i+1] - self.df_dv[i],
self.DF_DOT_MAX*self.DT + self.sl_df_dv[i+1]) )
# Other Constraints
self.opti.subject_to( 0 <= self.sl_df_dv )
self.opti.subject_to( 0 <= self.sl_acc_dv )
# e.g. things like collision avoidance or lateral acceleration bounds could go here.
def _add_cost(self):
def _quad_form(z, Q):
return casadi.mtimes(z, casadi.mtimes(Q, z.T))
cost = 0
for i in range(self.N):
cost += _quad_form(self.z_dv[i+1, :] - self.z_ref[i,:], self.Q) # tracking cost
for i in range(self.N - 1):
cost += _quad_form(self.u_dv[i+1, :] - self.u_dv[i,:], self.R) # input derivative cost
cost += (casadi.sum1(self.sl_df_dv) + casadi.sum1(self.sl_acc_dv)) # slack cost
self.opti.minimize( cost )
def solve(self):
st = time.time()
try:
sol = self.opti.solve()
# Optimal solution.
u_mpc = sol.value(self.u_dv)
z_mpc = sol.value(self.z_dv)
sl_mpc = sol.value(self.sl_dv)
z_ref = sol.value(self.z_ref)
is_opt = True
except:
# Suboptimal solution (e.g. timed out).
u_mpc = self.opti.debug.value(self.u_dv)
z_mpc = self.opti.debug.value(self.z_dv)
sl_mpc = self.opti.debug.value(self.sl_dv)
z_ref = self.opti.debug.value(self.z_ref)
is_opt = False
solve_time = time.time() - st
sol_dict = {}
sol_dict['u_control'] = u_mpc[0,:] # control input to apply based on solution
sol_dict['optimal'] = is_opt # whether the solution is optimal or not
sol_dict['solve_time'] = solve_time # how long the solver took in seconds
sol_dict['u_mpc'] = u_mpc # solution inputs (N by 2, see self.u_dv above)
sol_dict['z_mpc'] = z_mpc # solution states (N+1 by 4, see self.z_dv above)
sol_dict['sl_mpc'] = sl_mpc # solution slack vars (N by 2, see self.sl_dv above)
sol_dict['z_ref'] = z_ref # state reference (N by 4, see self.z_ref above)
return sol_dict
def update(self, update_dict):
self._update_initial_condition( *[update_dict[key] for key in ['x0', 'y0', 'psi0', 'v0']] )
self._update_reference( *[update_dict[key] for key in ['x_ref', 'y_ref', 'psi_ref', 'v_ref']] )
self._update_previous_input( *[update_dict[key] for key in ['acc_prev', 'df_prev']] )
if 'warm_start' in update_dict.keys():
# Warm Start used if provided. Else I believe the problem is solved from scratch with initial values of 0.
self.opti.set_initial(self.z_dv, update_dict['warm_start']['z_ws'])
self.opti.set_initial(self.u_dv, update_dict['warm_start']['u_ws'])
self.opti.set_initial(self.sl_dv, update_dict['warm_start']['sl_ws'])
def _update_initial_condition(self, x0, y0, psi0, vel0):
self.opti.set_value(self.z_curr, [x0, y0, psi0, vel0])
def _update_reference(self, x_ref, y_ref, psi_ref, v_ref):
self.opti.set_value(self.x_ref, x_ref)
self.opti.set_value(self.y_ref, y_ref)
self.opti.set_value(self.psi_ref, psi_ref)
self.opti.set_value(self.v_ref, v_ref)
def _update_previous_input(self, acc_prev, df_prev):
self.opti.set_value(self.u_prev, [acc_prev, df_prev])
if __name__ == '__main__':
kmpc = KinMPCPathFollower()
sol_dict = kmpc.solve()
for key in sol_dict:
print(key, sol_dict[key])
|
# -*- coding: utf-8 -*-
import models
import wizard
import controllers
import tests.test_mail_model
|
#-*- coding:utf-8 -*-
from django.shortcuts import render
from blog.models import Article, Tag, Classification,User #导入创建的模型
from django import forms
from django.shortcuts import render_to_response,get_object_or_404
from django.core.paginator import Paginator,InvalidPage,EmptyPage,PageNotAnInteger
from django.template import RequestContext
from django.http import Http404
from haystack.forms import SearchForm
from django.contrib.auth.models import User
from django.contrib import auth
from django.db.models import Q
from django.template import loader,Context
from django.http import HttpResponse
#从Article、Tag、Classification中获取了所有的blog对象、tag对象和classification对象;
def blog_list(request):
blogs = Article.objects.all().order_by('-publish_time')
#调用了render_to_response方法,它的第一个参数是要使用的模板名称;第二个参数是为该模板创建 Context 时所使用的字典,可以理解为要传入模板的参数;而RequestContext默认地在模板context中加入了一些变量,如HttpRequest对象或当前登录用户的相关信息。
return render_to_response('index.html', {"blogs": blogs}, context_instance=RequestContext(request))
def index(request):
articles = Article.objects.all()
page_size=2
paginator = Paginator(articles, page_size)
try:
page = int(request.GET.get('page','1'))
except ValueError:
page = 1
print page
try:
posts = paginator.page(page)
except (EmptyPage, InvalidPage):
posts = paginator.page(paginator.num_pages)
classification = Classification.objects.order_by('-id')
return render_to_response('blog/index.html',
locals(),
context_instance=RequestContext(request))
def content(request, id):
article = get_object_or_404(Article, id=id)
classification = Classification.objects.order_by('-id')
# classification = Classification.objects.all()
return render_to_response('blog/content.html',
locals(),
context_instance=RequestContext(request))
#def classification(request,id):
# try:
# cate = Classification.objects.get(id=id) #点击分类,进入分类列表
# except Classification.DoesNotExist:
# raise Http404
# articles_1 = Article.objects.filter(classification=cate) #外键匹配
# is_category = True #如果该分类下存在文章
# classification = Classification.objects.all() #取得所有文章
# return render_to_response('blog/index.html',
# locals(),
# context_instance=RequestContext(request))
# categorys = Classification.objects.order_by('-id') #分类文章的数目
# articles = Article.objects.all() #分类文章的数目
# 计算每类的文章数目
# for category in categorys:
# category.article_num = 0
# for article in articles:
# if article.classification == category:
# category.article_num += 1
# category.save()
# return categorys
def classification(request,id):
try:
cate = Classification.objects.get(id=id)
except Classification.DoesNotExist:
raise Http404
articles = Article.objects.filter(classification=cate)
is_category = True
classification = Classification.objects.all()
return render_to_response('blog/index.html',
locals(),
context_instance=RequestContext(request))
#博文目录
def article_list(request):
categorys = classification(request)
keywords = request.GET.get('keywords')
cate = request.GET.get('category')
#根据搜索、分类和默认确定对应的文章
if keywords:
articles = Article.objects.order_by('-id').filter(Q(caption__icontains=keywords) | Q(content__icontains=keywords))
elif cate:
articles = Article.objects.order_by('-id').filter(category__id=int(cate))
else:
articles = Article.objects.order_by('-id')
#分页
paginator = Paginator(articles,20)
#确定某页的文章
try:
page = int(request.GET.get('page',1))
if page < 1:
page = 1
except ValueError:
page = 1
try:
articlelist = paginator.page(page)
except(EmptyPage,InvalidPage,PageNotAnInteger):
articlelist = paginator.page(1)
#页数太多时,页数范围确定为当前页码的前后几页
after_range_num = 3
before_range_num = 2
if page >= after_range_num:
page_range = paginator.page_range[page - after_range_num:page + before_range_num]
else:
page_range = paginator.page_range[0:page + before_range_num]
return render_to_response('blog/_article.html',locals())
def full_search(request):
"""全局搜索"""
keywords = request.GET['q']
sform = SearchForm(request.GET)
posts = sform.search()
return render(request, 'blog/post_search_list.html',
{'posts': posts, 'list_header': '关键字 \'{}\' 搜索结果'.format(keywords)})
class UserForm(forms.Form):
username = forms.CharField(label='用户名:',max_length=100)
passworld = forms.CharField(label='密码:',widget=forms.PasswordInput())
email = forms.EmailField(label='电子邮件:')
# Create your views here.
def register(request):
if request.method == "POST":
uf = UserForm(request.POST)
if uf.is_valid():
#获取表单信息
username = uf.cleaned_data['username']
passworld = uf.cleaned_data['passworld']
email = uf.cleaned_data['email']
#将表单写入数据库
user = User()
user.username = username
user.passworld = passworld
user.email = email
user.save()
#返回注册成功页面
return render_to_response('blog/success.html',{'username':username})
else:
uf = UserForm()
return render_to_response('blog/register.html',{'uf':uf})
|
class Solution(object):
def minimumTotal(self, triangle):
"""
:type triangle: List[List[int]]
:rtype: int
"""
total = [i for i in triangle[0]]
for i in range(1, len(triangle)):
prev = sys.maxint
for j in range(len(total)):
temp = total[j]
total[j] = triangle[i][j] + min(prev, total[j])
prev = temp
total.append(triangle[i][-1] + prev)
return min(total) |
from sqlalchemy import Column, Integer, String
from base import Base
class Qualifier(Base):
__tablename__ = 'Qualifiers'
id = Column('QualifierID', Integer, primary_key=True)
code = Column('QualifierCode', String, nullable=False)
description = Column('QualifierDescription', String, nullable=False)
def __repr__(self):
return "<Qualifier('%s', '%s', '%s')>" % (self.id, self.code, self.description)
|
import os
import numpy as np
from Sample import Sample
from RecordingArtifact import RecordingArtifact
from TranscriptArtifact import TranscriptArtifact
from AudioTextSample import AudioTextSample
from load_and_resample_if_necessary import load_and_resample_if_necessary
from power_split import power_split
class RecordingTranscriptionSample(Sample):
def __init__(self, _config, _afn, _tfn):
_key = (os.path.basename(_afn)[0:-4],)
_audio = load_and_resample_if_necessary(_config, _afn)
super().__init__((_config.language,)+_key,
RecordingArtifact(_config, _audio, _afn),
TranscriptArtifact(_config, _tfn))
def display(self):
print('KEY', self.key)
print('SOURCE')
self.source.display('10-MINUTE RECORDING')
print('TARGET')
self.target.display()
print()
def gold(self):
return self.target.gold()
def transcript_split(self):
x_np = self.source.value
C = self.source.C
speech=[(float(x[-3]), float(x[-2]), x[-1]) for x in self.target.value if len(x)==6]
speech_segments=[(int(a*C.sample_rate), int(b*C.sample_rate), words)
for (a,b,words) in speech
if 'IGNORE' not in words]
return [AudioTextSample(C, self.key+((lower,upper),), x_np[lower:upper], words.lower())
for i, (lower, upper, words) in enumerate(speech_segments)]
def split_on_silence(self, goal_length_in_seconds):
C = self.source.C
audio = self.source.value
clips = power_split(C, audio, goal_length_in_seconds)
return [AudioTextSample(C, self.key+((clip.parent_start,clip.parent_end)), clip.clipped, ' ') for clip in clips]
|
import goose
from bs4 import BeautifulSoup
import random
import ctypes
import concurrent.futures
import os, sys, json
import readability
import lxml
import re
__version__ = '0.0.0'
def extract_clean_content(content):
global __version__
# I found out about goose and readability here:
# http://stackoverflow.com/questions/14164350/identifying-large-bodies-of-text-via-beautifulsoup-or-other-python-based-extract
# The poster seems to like goose more.
# One difference is that goose cleans up all the html, while readability
# usually just remove cruft that isn't related to the article text.
# There is a trade off between retaining links and formatting, and
# getting cleaner text.
# Readability seems to be better at finding the content in some cases
# so it is used for initial cleaning, then goose is used since its
# plain text output is easier to deal with downstream.
method = None
cleaned_content = ''
###### Readability code:
readability_error = None
try:
document = readability.readability.Document(content)
cleaner_content = document.summary().strip()
if len(cleaner_content) > 50:
content = cleaner_content
else:
readability_error = "Readability content too short: " + cleaner_content
except readability.readability.Unparseable as e:
readability_error = '\n'.join([str(i) for i in sys.exc_info()])
except (lxml.etree.XMLSyntaxError,
lxml.etree.DocumentInvalid,
lxml.etree.ParserError) as e:
readability_error = '\n'.join([str(i) for i in sys.exc_info()])
except (AttributeError, ValueError, TypeError) as e:
# This ought to be handled by readability.
readability_error = '\n'.join([str(i) for i in sys.exc_info()])
######
if not content.startswith('<html>'):
content = '<html><body>' + content + '</body></html>'
try:
cleaned_content = goose.Goose({
'parser_class':'soup',
'enable_image_fetching' : False,
}).extract(raw_html=content).cleaned_text
except ValueError:
cleaned_content = ''
if len(cleaned_content) < 1:
# Goose doesn't do well with foreign language content.
# If we can't find content with goose try extracting
# all the text with Beautiful soup.
# Beautiful soup doesn't attempt to extract the article,
# it just finds all the text in the html, which seems to be
# good enough since we've already used readability on the articles.
content = re.sub('\<br\s?\/?\>', '\n', content)
cleaned_content = BeautifulSoup(content).text
return {
'clearnerVersion' : __version__,
'method' : method,
'content' : cleaned_content,
'readability_error' : readability_error,
# Malformed should be true whenever we can detect an issue with the
# content that was extracted.
'malformed' : len(cleaned_content) < 50
}
|
#!/usr/bin/env python
# -*- coding: utf8
import json
from datetime import datetime
import codecs
def open_utf8(filename, *args, **kwargs):
logger.debug('open(%s, %s, %s)', filename, args, kwargs)
return codecs.open(filename, *args, encoding="utf-8-sig", **kwargs)
import os.path
import logging
logger = logging.getLogger('barnehagefakta.generate_html')
from jinja2 import Template
# This project
import update_osm
import osmapis_nsrid as osmapis
from utility_to_osm.kommunenummer import kommunenummer
from utility_to_osm.generate_html_history_chart import render_history_chart
# from difflib import HtmlDiff
# def my_htmldiff(a, b):
# d = HtmlDiff()
# t = d.make_table(a.encode('utf8'), b.encode('utf8'))
# return t
from htmldiff import htmldiff
def my_htmldiff(a, b):
try:
d = htmldiff.HTMLMatcher(a.encode('utf8'), b.encode('utf8'),
accurate_mode=True)
return d.htmlDiff(addStylesheet=False).decode('utf8')
except:
d = htmldiff.HTMLMatcher(a, b,
accurate_mode=True)
return d.htmlDiff(addStylesheet=False)
link_template = u'<a href="{href}"\ntitle="{title}">{text}</a>'
# base_url = 'http://obtitus.github.io/barnehagefakta_osm_data/'
# base_url = ''
def not_empty_file(filename, ignore_missing_file=False):
"""Return True if file does exist and is not empty"""
#if os.path.exists(filename):
try:
with open(filename, 'r') as f:
c = f.read()
if c.strip() != '':
return True
except Exception as e:
if not(ignore_missing_file):
logger.warning('file does not exists "%s", %s', filename, e)
return False
def get_lat_lon(osm, osm_data):
way = None
node = None
if isinstance(osm_data, osmapis.Relation):
try:
way = osm.ways[osm_data.members[0]['ref']]
except KeyError:
logger.warning('Not yet supported, unable to get lat/lon from Relations')
return None
elif isinstance(osm_data, osmapis.Way):
way = osm_data
elif isinstance(osm_data, osmapis.Node):
node = osm_data
else:
raise ValueError('expected osmapis.Relation/Way/Node object, got %s', type(osm_data))
if way is not None:
# not a node? just pick the first node in way
node = osm.nodes[way.nds[0]]
lat, lon = node.attribs['lat'], node.attribs['lon']
return lat, lon
def create_pre(dict1, dict_compare, mark_missing_key=True, ignore_keys=('ADDRESS', )):
tags = '<pre>'
for key, value in sorted(dict1.items()):
# if key in ignore_keys:
# continue
missing_key = key not in dict_compare
ignore = key in ignore_keys
diff_value = not(ignore) and not(missing_key) and dict_compare[key] != dict1[key]
if key == 'capacity' and diff_value: # okey, maybe try and not be so strict with capacity
diff_value = not(update_osm.compare_capacity(dict_compare[key], dict1[key]))
if diff_value:
a, b = dict_compare[key], dict1[key]
value = my_htmldiff(a, b)
line = '%s = %s\n' % (key, value)
if diff_value:
line = '<diff_value>%s</diff_value>' % line
if mark_missing_key and missing_key and not(ignore):
line = '<missing_key>%s</missing_key>' % line
tags += line
tags += '</pre>'
return tags
def create_rows(osm, data):
table = list()
count_osm = 0
count_duplicate_osm = 0
for kindergarten in sorted(data, key=lambda x: int(x.tags.get('capacity', 0)), reverse=True):
# shorthands
nsrId = kindergarten.tags['no-barnehage:nsrid']
lat, lon = kindergarten.attribs['lat'], kindergarten.attribs['lon']
row = list()
# Name
name_column = kindergarten.tags['name']
# Tags from OSM
osm_data = []
if osm is not None:
osm_data = osm.nsrids.get(nsrId, [])
tags = ''
osm_url = None
osm_url_api = None
osm_data_tags = {}
# osm_xml = None
if len(osm_data) == 0:
tags = 'Fant ingen openstreetmap objekt med no-barnehage:nsrid = %s' % nsrId
elif len(osm_data) != 1:
count_duplicate_osm += 1
tags = 'FEIL: Flere openstreetmap objekter funnet med no-barnehage:nsrid = %s' % nsrId
else:
count_osm += 1
assert len(osm_data) == 1
osm_data = osm_data[0]
osm_data_tags = osm_data.tags
tags = create_pre(osm_data.tags, kindergarten.tags, mark_missing_key=False)
if isinstance(osm_data, osmapis.Node):
osm_type_str = 'node'
elif isinstance(osm_data, osmapis.Way):
osm_type_str = 'way'
elif isinstance(osm_data, osmapis.Relation):
osm_type_str = 'relation'
else:
raise ValueError('osm_data type not recognized, %s, %s', type(osm_data), osm_data)
osm_id = osm_data.attribs['id']
full = ''
if osm_type_str is not 'node':
full = '/full'
osm_url_api = '"https://www.openstreetmap.org/api/0.6/%s/%s%s"' % (osm_type_str, osm_id, full)
osm_url = 'http://www.openstreetmap.org/%s/%s' % (osm_type_str, osm_id)
try:
lat, lon = get_lat_lon(osm, osm_data)
except TypeError:
pass # hack
except Exception as e:
logger.exception('Unable to get lat/lon from %s %s', osm_data, e)
# Tags from nbr
mark_missing_key = True
if len(osm_data_tags) == 0: mark_missing_key=False # do not mark the 'not found'
tags_nbr = create_pre(kindergarten.tags, osm_data_tags, mark_missing_key=mark_missing_key)
row.append(name_column)
row.append(tags_nbr)
row.append(tags)
# Links
links = '<pre>'
href = 'http://barnehagefakta.no/barnehage/{0}'.format(nsrId)
title = u'Du blir sendt til barnehagens side på barnehagefakta.no'
text = u'Besøk på barnehagefakta.no'
links += link_template.format(href=href, title=title, text=text) + '\n'
if osm_url is None:
href = 'http://www.openstreetmap.org/#map=17/{lat}/{lon}'.format(lat=lat,lon=lon)
else:
href = osm_url
title = u'Se posisjonen i openstreetmap'
text = u'Besøk på openstreetmap.org'
links += link_template.format(href=href, title=title, text=text) + '\n'
href = 'https://nbr.udir.no/status/rapporterfeil/{0}'.format(nsrId)
title = u'Du blir sendt til nbr.uio.no hvor du kan melde om feil i data-settet. Vurder også å melde fra til kommunen.'
text = u'Meld feil til NBR'
links += link_template.format(href=href, title=title, text=text) + '\n'
href = 'http://www.openstreetmap.org/note/new#map=17/{lat}/{lon}'.format(lat=lat,lon=lon)
title = u'Gjør openstreetmap mappere oppmerksom på feil i kartet.'
text = 'Meld feil til OSM'
links += link_template.format(href=href, title=title, text=text) + '\n'
href = 'http://www.openstreetmap.org/edit?editor=id&lat={lat}&lon={lon}&zoom=17'.format(lat=lat,lon=lon)
title = u'Du blir sendt til openstreetmap sin web-editor'
text = 'Editer OSM'
links += link_template.format(href=href, title=title, text=text)
website_address = ''
if len(osm_data_tags) == 0:
website_address = kindergarten.tags.get('contact:website', '')
else:
# use website from osm
website_address = osm_data_tags.get('contact:website', '')
if website_address != '':
links += '\n' + link_template.format(href=website_address,
title='website',
text='Barnehagens webside')
links += '</pre>'
row.append(links)
# Map
if osm_url_api is None: osm_url_api = 'null'
# if osm_xml is None: osm_xml = 'null'
# _map = '<div id="wrapper" style="width:256px;">'
_map = '<div id="map{0}" style="width: 256px; height: 256px;position: relative"></div>'.format(nsrId)
_map += '<script>create_map(map{nsrId}, {lat}, {lon}, {osm_url_api})</script>'.format(nsrId=nsrId,
osm_url_api=osm_url_api,
lat=lat,
lon=lon)
# _map += '</div>'
row.append(_map)
table.append(row)
return table, count_osm, count_duplicate_osm
#yield row
def main(osm, data_dir='data', root_output='',
root=''):
index_template = os.path.join(root, 'templates', 'index_template.html')
template = os.path.join(root, 'templates', 'kommune_page_template.html')
with open_utf8(template) as f:
template = Template(f.read())
with open_utf8(index_template) as f:
index_template = Template(f.read())
kommune_nr2name, kommune_name2nr = kommunenummer(cache_dir=data_dir)
index_table = list()
# counters for bottom of main table (what a mess)
total_nbr = 0
total_osm = 0
for kommune_nr in os.listdir(data_dir):
folder = os.path.join(data_dir, kommune_nr)
if os.path.isdir(folder):
try:
kommune_name = kommune_nr2name[int(kommune_nr)] + ' kommune'
except KeyError as e:
logger.warning('Could not translate kommune_nr = %s to a name. Skipping', kommune_nr)
#kommune_name = 'ukjent'
continue
page_filename = os.path.join(root_output, kommune_nr + '.html')
warning_filename = os.path.join(root_output, 'data', kommune_nr, 'warnings.log')
discontinued_filename = os.path.join(root_output, 'data', kommune_nr, 'barnehagefakta_discontinued.csv')
last_update_stamp = os.path.getmtime(folder)
last_update_datetime = datetime.fromtimestamp(last_update_stamp)
last_update = last_update_datetime.strftime('%Y-%m-%d %H:%M')
logger.info('Kommune folder = %s', folder)
table = list()
info = ''
info_warning = ''
count_osm = 0
count_duplicate_osm = 0
for filename, data in update_osm.get_osm_files(folder):
t, c_osm, c_duplicate_osm = create_rows(osm, data)
table.extend(t)
count_osm += c_osm
count_duplicate_osm += c_duplicate_osm
filename_base = os.path.basename(filename)
if filename.endswith('barnehagefakta.osm'):
link = u'<a href="{href}"\ntitle="{title}">\n{text}</a>'.format(href=filename,
title=u"Trykk for å laste ned "+ filename_base,
text=filename_base)
info += u'<p>{link} inneholder data fra NBR som noder, denne kan åpnes i JOSM.</p>'.format(link=link)
if filename.endswith('barnehagefakta_familiebarnehager.osm'):
link = u'<a href="{href}"\ntitle="{title}">\n{text}</a>'.format(href=filename,
title=u"Trykk for å laste ned "+filename_base,
text=filename_base)
info += u'<p>Familiebarnehager er vanskeligere å kartlegge, disse ligger derfor i sin egen fil: {link}</p>'.format(link=link)
if not_empty_file(warning_filename):
link = u'<a href="{href}"\ntitle="{title}">\n{text}</a>'.format(href=warning_filename,
title=u"Sjekk gjerne warnings.log",
text='warnings.log')
info_warning += u'<p>Sjekk gjerne {0}</p>\n'.format(link)
if not_empty_file(discontinued_filename, ignore_missing_file=True):
link = u'<a href="{href}"\ntitle="{title}">\n{text}</a>'.format(href=discontinued_filename,
title=u"Sjekk gjerne discontinued.csv",
text='discontinued.csv')
info_warning += u'<p>Sjekk gjerne {0} for barnehager i nbr sitt register som ikke ligger i barnehagefakta.no</p>\n'.format(link)
if len(table) != 0:
total_nbr += len(table)
total_osm += count_osm
per = (100.*count_osm)/len(table)
progress = '<meter style="width:100%" value="{value}" min="{min}" max="{max}" optimum="{max}">{per} %</meter>'\
.format(value=count_osm,
min=0, max=len(table),
per=per)
index_table.append((page_filename, u'Vis kommune', [kommune_nr, kommune_name, len(table), count_osm, progress]))
page = template.render(kommune_name=kommune_name,
kommune_nr=kommune_nr,
table=table, info=info,
info_warning=info_warning,
last_update=last_update)
# Kommune-folder
with open_utf8(page_filename, 'w') as output:
output.write(page)
# total:
per = (100.*total_osm)/total_nbr
progress = '<meter style="width:100%" value="{value}" min="{min}" max="{max}" optimum="{max}">{per} %</meter>'\
.format(value=total_osm,
min=0, max=total_nbr,
per=per)
total = ['Sum', '', total_nbr, total_osm, progress]
# dump progress to csv
today = datetime.utcnow()
td = (today - datetime(1970, 1, 1))
td_s = td.total_seconds()
with open('history.csv', 'a') as f:
f.write('{0},{1},{2}\n'.format(td_s, total_nbr, total_osm))
info = u"""
<p>Data fra <a href=https://nbr.udir.no>https://nbr.udir.no</a> og <a href=http://openstreetmap.org> openstreetmap.org</a>.
Kun barnehager med taggen "no-barnehage:nsrid" blir gjenkjent.
</p>
<p>
Trykk på en av radene i tabellen under for å vise barnehage-data for kommunen.
</p>
"""
chart = render_history_chart(root)
page = index_template.render(info=info, table=index_table, bottom_row=total, chart=chart, now=td_s)
index = os.path.join(root_output, 'index.html')
with open_utf8(index, 'w') as output:
output.write(page)
def get_osm_data():
xml = update_osm.overpass_nsrid()
osm = osmapis.OSMnsrid.from_xml(xml)
# osm_elements = list(update_osm.find_all_nsrid_osm_elements(osm))
print('Overpass returned', len(osm.nsrids), 'objects')#, osm.nsrids
for item in osm:
if 'doNotImportAddress' in item.tags:
err_msg = 'Found doNotImportAddress = %s on no-barnehage:nsrid=%s, remove key!' % (item.tags['doNotImportAddress'],
item.nsrids[key])
logger.error(err_msg)
raise ValueError(err_msg)
return osm
if __name__ == '__main__':
from utility_to_osm import argparse_util
parser = argparse_util.get_parser('Looks for <data_dir>/<kommune_id>/*.osm files and generates html for http://obtitus.github.io/barnehagefakta_osm_data/. The site is generated in the current directory by default and assumes template.html and index_template.html exists in the template directory.')
parser.add_argument('--data_dir', default='data',
help='Specify directory for .osm files, defaults to data/')
parser.add_argument('--root', default='.',
help="Specify input/output directory, defaults to current directory. Expects a templates folder with required html and javascript templates")
parser.add_argument('--no-overpass', default=False, action='store_true',
help="Do not call the openstreetmap overpass api looking for no-barnehage:nsrid")
argparse_util.add_verbosity(parser, default=logging.WARNING)
args = parser.parse_args()
logging.basicConfig(level=args.loglevel)
if args.no_overpass:
osm = None
else:
osm = get_osm_data()
main(osm, args.data_dir,
root=args.root)
|
# vim: sw=4:ts=4:et:cc=120
import datetime
import logging
import pytz
import saq
from saq.error import report_exception
from saq.analysis import Analysis, Observable
from saq.modules import AnalysisModule
from saq.constants import *
from saq.util import abs_path, create_timedelta
from saq.qradar import QRadarAPIClient
#
# Requirements for QRadar queries
#
# to have the log event time added to extracted observables, add the following column to the SELECT
# DATEFORMAT(deviceTime, 'yyyy-MM-dd H:m:s.SSS Z') as "deviceTimeFormatted",
#
# <O_VALUE> is replaced by the value of the observable
# <O_TYPE> is replaced by the type of the observable
# <O_START> is replaced by the beginning time range
# <O_STOP> is replaced by the ending time range
#
KEY_QUERY = 'query'
KEY_QUERY_RESULTS = 'query_results'
KEY_QUERY_ERROR = 'query_error'
KEY_QUERY_SUMMARY = 'query_summary'
KEY_QUESTION = 'question'
class QRadarAPIAnalysis(Analysis):
def initialize_details(self):
self.details = {
KEY_QUERY: None,
KEY_QUERY_RESULTS: None,
KEY_QUERY_ERROR: None,
KEY_QUESTION: None,
KEY_QUERY_SUMMARY: None,
}
@property
def query(self):
"""Returns the AQL query that was executed."""
return self.details[KEY_QUERY]
@query.setter
def query(self, value):
self.details[KEY_QUERY] = value
@property
def query_results(self):
"""Returns the JSON result of the query if successful."""
return self.details[KEY_QUERY_RESULTS]
@query_results.setter
def query_results(self, value):
self.details[KEY_QUERY_RESULTS] = value
@property
def query_error(self):
"""Returns the error message returned by QRadar if there was one."""
return self.details[KEY_QUERY_ERROR]
@query_error.setter
def query_error(self, value):
self.details[KEY_QUERY_ERROR] = value
@property
def question(self):
"""Returns the question configuration item for this query."""
return self.details[KEY_QUESTION]
@question.setter
def question(self, value):
self.details[KEY_QUESTION] = value
@property
def query_summary(self):
"""Returns the summary configuration item for this query."""
return self.details[KEY_QUERY_SUMMARY]
@query_summary.setter
def query_summary(self, value):
self.details[KEY_QUERY_SUMMARY] = value
def generate_summary(self):
result = f'{self.query_summary} '
if self.query_error is not None:
result += f'ERROR: {self.query_error}'
return result
elif self.query_results is not None:
result += f'({len(self.query_results["events"])} results)'
return result
else:
return self.query_summary + " (no results or error??)"
class QRadarAPIAnalyzer(AnalysisModule):
def verify_environment(self):
self.verify_config_exists('question')
self.verify_config_exists('summary')
self.verify_config_exists('aql_path')
self.verify_path_exists(abs_path(self.config['aql_path']))
@property
def generated_analysis_type(self):
return QRadarAPIAnalysis
def process_qradar_event(self, analysis, event, event_time):
"""Called for each event processed by the module. Can be overridden by subclasses."""
pass
def process_qradar_field_mapping(self, analysis, event, event_time, observable, event_field):
"""Called each time an observable is created from the observable-field mapping.
Can be overridden by subclasses."""
pass
def filter_observable_value(self, event_field, observable_type, observable_value):
"""Called for each observable value added to analysis.
Returns the observable value to add to the analysis.
By default, the observable_value is returned as-is."""
return observable_value
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# load the AQL query for this instance
with open(abs_path(self.config['aql_path']), 'r') as fp:
self.aql_query = fp.read()
# each query can specify it's own range
if 'relative_duration_before' in self.config:
self.relative_duration_before = create_timedelta(self.config['relative_duration_before'])
else:
self.relative_duration_before = create_timedelta(saq.CONFIG['qradar']['relative_duration_before'])
if 'relative_duration_after' in self.config:
self.relative_duration_after = create_timedelta(self.config['relative_duration_after'])
else:
self.relative_duration_after = create_timedelta(saq.CONFIG['qradar']['relative_duration_after'])
# load the observable mapping for this query
# NOTE that the keys (event field names) are case sensitive
self.observable_mapping = {} # key = event field name, value = observable_type
for key in self.config.keys():
if key.startswith('map_'):
event_field, observable_type = [_.strip() for _ in self.config[key].split('=', 2)]
if observable_type not in VALID_OBSERVABLE_TYPES:
logging.error(f"invalid observable type specified for observable mapping "
f"{key} in {self}: {observable_type}")
continue
self.observable_mapping[event_field] = observable_type
# the configuration can specify what field should be used as the event time
# by default this is disabled, in which case the observables are non-termporal
self.time_event_field = self.config.get('time_event_field', None)
# the format of the time can also be specified in strptime syntax
# the special value TIMESTAMP indicates a unix timestamp (this is the default)
# the special value TIMESTAMP_MILLISECONDS indicates a unix timestamp in milliseconds
self.time_event_field_format = self.config.get('time_event_field_format', 'TIMESTAMP')
def execute_analysis(self, observable):
analysis = self.create_analysis(observable)
analysis.question = self.config['question']
analysis.query_summary = self.config['summary']
client = QRadarAPIClient(saq.CONFIG['qradar']['url'],
saq.CONFIG['qradar']['token'])
# interpolate the observable value as needed
target_query = self.aql_query.replace('<O_TYPE>', observable.type)\
.replace('<O_VALUE>', observable.value) # TODO property escape stuff
# figure out the start and stop time
source_event_time = self.root.event_time_datetime
if observable.time is not None:
source_event_time = observable.time
start_time = source_event_time - self.relative_duration_before
stop_time = source_event_time + self.relative_duration_after
start_time_str = start_time.strftime('%Y-%m-%d %H:%M %z')
stop_time_str = stop_time.strftime('%Y-%m-%d %H:%M %z')
target_query = target_query.replace('<O_START>', start_time_str)\
.replace('<O_STOP>', stop_time_str)
analysis.query = target_query
try:
analysis.query_results = client.execute_aql_query(target_query,
continue_check_callback=lambda x: not self.engine.shutdown)
except Exception as e:
analysis.query_error = str(e)
return True
# map results to observables
for event in analysis.query_results['events']:
observable_time = None
#
# the time of the event is always going to be in the deviceTimeFormatted field (see above)
# 2019-10-29 19:50:38.592 -0400
if 'deviceTimeFormatted' in event:
event_time = datetime.datetime.strptime(event['deviceTimeFormatted'], '%Y-%m-%d %H:%M:%S.%f %z')
event_time = event_time.astimezone(pytz.UTC)
self.process_qradar_event(analysis, event, event_time)
for event_field in event.keys():
if event[event_field] is None:
continue
# do we have this field mapped?
if event_field in self.observable_mapping:
observable = analysis.add_observable(self.observable_mapping[event_field],
self.filter_observable_value(event_field,
self.observable_mapping[event_field],
event[event_field]),
o_time=observable_time)
self.process_qradar_field_mapping(analysis, event, event_time, observable, event_field)
return True
|
import random
from itertools import cycle
from scrapy import signals
from scrapy.exceptions import NotConfigured
class RotateUserAgentMiddleware(object):
def __init__(self, user_agents: list, min_usage: int, max_usage: int):
'''Creates a new instance of RotateUserAgentMiddleware
Keyword arguments:
user_agents -- List of user-agents
min_usage -- Minimum user-agent usage
max_usage -- Maximum user-agent usage
'''
self.items_scraped = 0
self.min_usage = min_usage
self.max_usage = max_usage
self.limit_usage = random.randint(self.min_usage, self.max_usage)
self.user_agents = cycle(user_agents)
self.user_agent = next(self.user_agents)
@classmethod
def from_crawler(cls, crawler):
if not crawler.settings.getbool('ROTATE_USER_AGENT_ENABLED', False):
raise NotConfigured()
user_agents = crawler.settings.get('USER_AGENTS', None)
min_usage = crawler.settings.getint('MIN_USER_AGENT_USAGE', 1)
max_usage = crawler.settings.getint('MAX_USER_AGENT_USAGE', 100)
if user_agents is None or min_usage < 0 or max_usage < 0:
raise NotConfigured()
return cls(user_agents, min_usage, max_usage)
def process_request(self, request, spider):
if self.items_scraped >= self.limit_usage:
self.items_scraped = 0
self.limit_usage = random.randint(self.min_usage, self.max_usage)
self.user_agent = next(self.user_agents)
request.headers['user-agent'] = self.user_agent
self.items_scraped += 1
|
#!/usr/bin/env python
class Edge:
"""Edge class, to contain a directed edge of a tree or directed graph.
attributes parent and child: index of parent and child node in the graph.
"""
def __init__ (self, parent, child, length=None):
"""create a new Edge object, linking nodes
with indices parent and child."""
self.parent = parent
self.child = child
self.length = length
def __str__(self):
res = "edge from " + str(self.parent) + " to " + str(self.child)
return res
class Tree:
""" Tree, described by its list of edges."""
def __init__(self, edgelist):
"""create a new Tree object from a list of existing Edges"""
self.edge = edgelist
if edgelist:
self.update_node2edge()
def __str__(self):
res = "parent -> child:"
for e in self.edge:
res += "\n" + str(e.parent) + " " + str(e.child)
return res
def add_edge(self, ed):
"""add an edge to the tree"""
self.edge.append(ed)
self.update_node2edge()
def new_edge(self, parent, child):
"""add to the tree a new edge from parent to child (node indices)"""
self.add_edge( Edge(parent,child) )
def update_node2edge(self):
"""dictionary child node index -> edge for fast access to edges.
also add/update root attribute."""
self.node2edge = {e.child : e for e in self.edge}
childrenset = set(self.node2edge.keys())
rootset = set(e.parent for e in self.edge).difference(childrenset)
if len(rootset) > 1:
raise Warning("there should be a single root: " + str(rootset))
if len(rootset) == 0:
raise Exception("there should be at least one root!")
self.root = rootset.pop()
def get_path2root(self, i):
"""takes the index i of a node and returns the list of nodes
from i to the root, in this order.
This function is written with a loop.
An alternative option would have been a recursive function:
that would call itself on the parent of i (unless i is the root)"""
res = []
nextnode = i
while True:
res.append(nextnode) # add node to the list
if nextnode == self.root:
break
nextnode = self.node2edge[nextnode].parent # grab the parent to get closer to root
return res
def get_dist2root(self, i):
"""take the index i of a node and return the number of edges
between node i and the root"""
path = self.get_path2root(i)
return len(path)-1
def get_nodedist(self, i, j):
"""takes 2 nodes and returns the distance between them:
number of edges from node i to node j"""
if i==j:
return 0
pathi = self.get_path2root(i) # last node in this list: root
pathj = self.get_path2root(j)
while pathi and pathj:
anci = pathi[-1] # anc=ancestor, last one
ancj = pathj[-1]
if anci == ancj:
pathi.pop()
pathj.pop()
else:
break
return len(pathi)+len(pathj)
|
"""
Created on 4th July, 2018 from mapclientplugins.meshgeneratorstep.
"""
import string
from opencmiss.utils.zinc import createFiniteElementField
from opencmiss.zinc.field import Field
from opencmiss.zinc.glyph import Glyph
import opencmiss.zinc.scenecoordinatesystem as Scenecoordinatesystem
from opencmiss.zinc.graphics import Graphics
from opencmiss.zinc.node import Node
from scaffoldmaker.scaffoldmaker import Scaffoldmaker
from scaffoldmaker.utils.zinc_utils import *
import numpy as np
import time
from mapclientplugins.meshgeneratorstep.model.blackfynnMesh import Blackfynn_2d_plate
from mapclientplugins.meshgeneratorstep.model.meshalignmentmodel import MeshAlignmentModel
STRING_FLOAT_FORMAT = '{:.8g}'
class EcgGraphics(object):
"""
ECG Graphics is used as a home for creating and modifying displays to visualise ECG data on the model
"""
def __init__(self):
self.initialised = False
self.number_of_points_on_grid_side = 8
self.node_corner_list = [0]*4
self.node_corner_points = [[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]
self.settingsLoaded = False
self.plane_normal = [0, 1, 0]
self.node_coordinate_list = []
self._child_region = None
pass
def getSettings(self):
if self.node_corner_list[0] is not 0:
return self.node_corner_points
def setSettings(self, settings):
self.node_corner_points = settings
self.settingsLoaded = True
def setRegion(self, region):
self._region = region
self._scene = self._region.getScene()
self._child_region = self._region.createChild('ecg_plane')
self.numberInModel = 0
def initialiseSpectrum(self, data):
maximum = -1000000
minimum = 1000000
for key in data['cache']:
array_max = max(data['cache'][key])
array_min = min(data['cache'][key])
maximum = max(array_max, maximum)
minimum = min(array_min, minimum)
specMod = self._scene.getSpectrummodule()
spectrum = specMod.findSpectrumByName('eegColourSpectrum')
spectrum_component = spectrum.getFirstSpectrumcomponent()
spectrum_component.setRangeMaximum(maximum)
spectrum_component.setRangeMinimum(minimum)
def updateEEGnodeColours(self, values):
fm = self._region.getFieldmodule()
fm.beginChange()
cache = fm.createFieldcache()
colour = fm.findFieldByName('colour')
colour = colour.castFiniteElement()
nodeset = fm.findNodesetByName('nodes')
for i in range(self.eegSize):
node = nodeset.findNodeByIdentifier(self.numberInModel + 1 + i)
cache.setNode(node)
colour.setNodeParameters(cache, -1, Node.VALUE_LABEL_VALUE, 1, values[(i % (len(values)-1))])
fm.endChange()
def updatePlateColours(self, values):
self.plateMesh.updatePlateColours(values)
def initialiseTimeSequences(self, data):
fm = self._region.getFieldmodule()
cache = fm.createFieldcache()
colour = fm.findFieldByName('colour')
def clearAll(self):
fm = self._region.getFieldmodule()
nodes = fm.findNodesetByFieldDomainType(Field.DOMAIN_TYPE_NODES)
for i in range(self.eegSize):
node = nodes.findNodeByIdentifier(self.numberInModel+i+1)
nodes.destroyNode(node)
def generateGridPoints(self, point1, point2, number_on_side):
# generateGridPoints generates a rectangular grid on along a plane ( x, y or z )
# using two points defined on the plan and the number of points per side of the grid
grid_size_x = abs(point1[0] - point2[0])
grid_size_y = abs(point1[1] - point2[1])
#scale sides so they have same number of points
step_size_x = grid_size_x/number_on_side
step_size_y = grid_size_y/number_on_side
eeg_coord = []
for i in range(number_on_side):
for j in range(number_on_side):
transformed_x = point1[0] + i * step_size_x
eeg_coord.append([point1[0] + i * step_size_x, .65, point1[1] + j * step_size_y])
eeg_coord2 = []
for i in range(len(eeg_coord)):
eeg_coord2.append(np.cross(eeg_coord[i], self.plane_normal))
return eeg_coord2
def generateGridPoints4(self, number_on_side):
# We generate our grid points by having 4 points that we assign weightings to
# based on how far we are away from them.
# (1 being on the point 0 being in a region the point does not effect the grid)
p1 = self.node_corner_points[0]
p2 = self.node_corner_points[1]
p3 = self.node_corner_points[2]
p4 = self.node_corner_points[3]
ns = number_on_side
ns1 = number_on_side - 1
plane_normal_offset = .4 # For offsetting the solver to solve from outside the mesh -> on it
grid_coord = []
for i in range(number_on_side):
for j in range(number_on_side):
# Create our weightings (since we are setting points in a ccwise fashion our diagonal is w3
w1 = i*j/(ns1**2)
w2 = (j/ns1) * (ns1 - i)/ns1
w4 = (i/ns1) * (ns1 - j)/ns1 # The 'bottom left' point, p4
w3 = ((ns1-i)*(ns1-j))/(ns1**2) # The diagonal point, p3
# Use our weightings to find coordinates of our new point
x = p4[0] * w1 + p3[0] * w2 + p2[0] * w3 + p1[0] * w4
y = p4[1] * w1 + p3[1] * w2 + p2[1] * w3 + p1[1] * w4
z = p4[2] * w1 + p3[2] * w2 + p2[2] * w3 + p1[2] * w4
grid_coord.append([x, y, z])
plane_norm = np.array(self.plane_normal)
eeg_coord2 = []
for i in range(len(grid_coord)):
#projected_point = np.cross(grid_coord[i], plane_norm)
shifted_point = grid_coord[i] + plane_norm*plane_normal_offset
eeg_coord2.append(shifted_point.tolist())
return eeg_coord2
def moveNode(self, nodeKey, cache, tol=.01, max_iterations=20):
# createEEGPoints creates subgroups of points that use the 'colour' field to change colour
# Re-aquire openzinc variables
fm = self._region.getFieldmodule()
coordinates = fm.findFieldByName('coordinates')
coordinates = coordinates.castFiniteElement()
# Create templates
nodes = fm.findNodesetByFieldDomainType(Field.DOMAIN_TYPE_NODES)
nodetemplate = nodes.createNodetemplate()
nodetemplate.defineField(coordinates)
nodetemplate.setValueNumberOfVersions(coordinates, -1, Node.VALUE_LABEL_VALUE, 1)
# Create our first new node for the search
plane_normal_offset = .1
old_node = nodes.findNodeByIdentifier(nodeKey)
cache.setNode(old_node)
[result, old_coords] = coordinates.evaluateReal(cache, 3)
plane_norm = np.array(self.plane_normal)
shifted_point = old_coords + plane_norm * plane_normal_offset
coordinates.setNodeParameters(cache, -1, Node.VALUE_LABEL_VALUE, 1, shifted_point.tolist())
# Create our mesh search
mesh = fm.findMeshByName('mesh3d')
mesh_location = fm.createFieldStoredMeshLocation(mesh)
found_mesh_location = fm.createFieldFindMeshLocation(coordinates, coordinates, mesh)
found_mesh_location.setSearchMode(found_mesh_location.SEARCH_MODE_NEAREST)
it = 1
old_coords = shifted_point
test_coords = [10, 10, 10]
new_coords = shifted_point
start3 = time.clock()
while abs(np.linalg.norm(np.dot(test_coords, plane_norm) - np.dot(new_coords, plane_norm))) > tol:
# ^^ test if x and y changes are within tolerence
end3 = time.clock()
# Find nearest mesh location
start = time.clock()
[el, coords] = found_mesh_location.evaluateMeshLocation(cache, 3)
end = time.clock()
cache.setMeshLocation(el, coords)
[result, mesh_coords] = coordinates.evaluateReal(cache, 3)
# Update our search location
start2 = time.clock()
new_coords = old_coords + np.dot(mesh_coords - old_coords, plane_norm)*plane_norm
cache.setNode(old_node)
coordinates.setNodeParameters(cache, -1, Node.VALUE_LABEL_VALUE, 1, new_coords.tolist())
end2 = time.clock()
test_coords = old_coords
old_coords = new_coords
# Break in case we can not converge
it += 1
if it > max_iterations:
print(f'Could not converge on node {nodeKey}')
break
print(f'Find mesh took: {end-start}')
print(f'Update search took: {end2-start2}')
print(f'Stop evaluation took: {end3-start3}')
start3 = time.clock()
self.node_coordinate_list.append(new_coords)
print(f'Node {nodeKey} was solved in {it-1} iterations' )
def nudgeNode(self, nodeKey, eegCoords):
# createEEGPoints creates subgroups of points that use the 'colour' field to change colour
tol = .01
max_iterations = 10
# Re-aquire openzinc variables
fm = self._region.getFieldmodule()
coordinates = fm.findFieldByName('coordinates')
coordinates = coordinates.castFiniteElement()
cache = fm.createFieldcache()
# Create templates
nodes = fm.findNodesetByFieldDomainType(Field.DOMAIN_TYPE_NODES)
nodetemplate = nodes.createNodetemplate()
nodetemplate.defineField(coordinates)
nodetemplate.setValueNumberOfVersions(coordinates, -1, Node.VALUE_LABEL_VALUE, 1)
# Create our new node for the search
plane_normal_offset = .15
old_node = nodes.findNodeByIdentifier(nodeKey)
cache.setNode(old_node)
# Update our nodes location
coordinates.setNodeParameters(cache, -1, Node.VALUE_LABEL_VALUE, 1, eegCoords)
[result, old_coords] = coordinates.evaluateReal(cache, 3)
plane_norm = np.array(self.plane_normal)
shifted_point = old_coords + plane_norm * plane_normal_offset
coordinates.setNodeParameters(cache, -1, Node.VALUE_LABEL_VALUE, 1, shifted_point.tolist())
# Create our mesh search
mesh = fm.findMeshByName('mesh3d')
mesh_location = fm.createFieldStoredMeshLocation(mesh)
found_mesh_location = fm.createFieldFindMeshLocation(coordinates, coordinates, mesh)
found_mesh_location.setSearchMode(found_mesh_location.SEARCH_MODE_NEAREST)
it = 1
old_coords = shifted_point
test_coords = [10, 10, 10]
new_coords = shifted_point
while abs(np.linalg.norm(np.dot(test_coords, plane_norm) - np.dot(new_coords, plane_norm))) > tol:
# ^^ test if x and y changes are within tolerence
# Find nearest mesh location
[element, local_coords] = found_mesh_location.evaluateMeshLocation(cache, 3)
cache.setMeshLocation(element, local_coords)
[result, mesh_coords] = coordinates.evaluateReal(cache, 3)
# Update our search location
new_coords = old_coords + np.dot(mesh_coords - old_coords, plane_norm) * plane_norm
cache.setNode(old_node)
coordinates.setNodeParameters(cache, -1, Node.VALUE_LABEL_VALUE, 1, new_coords.tolist())
test_coords = old_coords
old_coords = new_coords
# Break in case we can not converge
it += 1
if it > max_iterations:
print(f'Could not converge on node {nodeKey}')
break
self.node_coordinate_list.append(new_coords)
print(f'Node {nodeKey} was solved in {it-1} iterations')
def updateGrid(self, new_point_id, new_point):
index = self.node_corner_list.index(new_point_id)
self.node_corner_points[index] = new_point
eeg_coord = self.generateGridPoints4(8)
self._scene.beginChange()
self.node_coordinate_list = []
for i in range(len(eeg_coord)-1):
self.nudgeNode(i + self.numberInModel + 1, eeg_coord[i])
self._scene.endChange()
def createEEGPointsWithNormal(self, region, eeg_group, eeg_coord, i, cache):
# createEEGPoints creates subgroups of points that use the 'colour' field to change colour
# Re-aquire openzinc variables
fm = region.getFieldmodule()
coordinates = fm.findFieldByName('coordinates')
coordinates = coordinates.castFiniteElement()
colour = fm.findFieldByName('colour')
colour = colour.castFiniteElement()
eegNode = nodes.createNode(self.numberInModel + i + 1, nodetemplate)
cache.setNode(eegNode)
coordinates.setNodeParameters(cache, -1, Node.VALUE_LABEL_VALUE, 1, eeg_coord[i])
#user our solver to find the nodes location
self.moveNode(self.numberInModel + i + 1, cache)
# Create the final node with our search coordinates
eeg_group.addNode(eegNode)
def createGraphics(self, new_points=False, point1=[0, 1], point2=[0, 0], point3=[1, 1], point4=[.8, 0], number_on_side=8):
# createGraphics creates our EEG points and assigns them a spectrum for future animation.
fm = self._region.getFieldmodule()
scene = self._region.getScene()
scene.beginChange()
coordinates = fm.findFieldByName('coordinates')
coordinates = coordinates.castFiniteElement()
cache = fm.createFieldcache()
#save points
if new_points:
self.node_corner_points[0] = point1
self.node_corner_points[1] = point2
self.node_corner_points[2] = point3
self.node_corner_points[3] = point4
# Add EEG nodes
eeg_coord = self.generateGridPoints4(number_on_side)
self.eegSize = len(eeg_coord)
# Add Spectrum
spcmod = scene.getSpectrummodule()
spec = spcmod.getDefaultSpectrum()
spec.setName('eegColourSpectrum')
# Initialise all subgroup parameters
self.ndsg = [] # (node set group)
self.pointattrList = []
self.spectrumList = []
self.nodeColours = []
self.node_coordinate_list = []
finite_element_field = []
nodes = fm.findNodesetByFieldDomainType(Field.DOMAIN_TYPE_NODES)
self.numberInModel = nodes.getSize()
# Create all EEG subgroups
colour = fm.createFieldFiniteElement(1)
colour.setName('colour')
colour.setManaged(True)
# Create new graphics for our subgroup
nodeColours = self._scene.createGraphicsPoints()
nodeColours.setFieldDomainType(Field.DOMAIN_TYPE_NODES)
nodeColours.setCoordinateField(coordinates)
# create new subgroup containing our node
fng = fm.createFieldNodeGroup(fm.findNodesetByName('nodes'))
ndsg = fng.getNodesetGroup()
ndsg.removeAllNodes()
# Create templates
nodes = fm.findNodesetByFieldDomainType(Field.DOMAIN_TYPE_NODES)
nodetemplate = nodes.createNodetemplate()
nodetemplate.defineField(coordinates)
nodetemplate.setValueNumberOfVersions(coordinates, -1, Node.VALUE_LABEL_VALUE, 1)
nodetemplate.defineField(colour)
nodetemplate.setValueNumberOfVersions(colour, -1, Node.VALUE_LABEL_VALUE, 1)
# Assign values for the new EEG subset
for i in range(len(eeg_coord)):
eegNode = nodes.createNode(self.numberInModel + i + 1, nodetemplate)
cache.setNode(eegNode)
coordinates.setNodeParameters(cache, -1, Node.VALUE_LABEL_VALUE, 1, eeg_coord[i])
# user our solver to find the nodes location
self.moveNode(self.numberInModel + i + 1, cache)
# Create the final node with our search coordinates
ndsg.addNode(eegNode)
self.ndsg = ndsg
nodeColours.setSubgroupField(fng)
# Set attributes for our new node
nodeColours.setSpectrum(spec)
nodeColours.setDataField(colour)
pointattr = nodeColours.getGraphicspointattributes()
# pointattr.setGlyphShapeType(Glyph.SHAPE_TYPE_SPHERE)
# pointattr.setBaseSize([.05, .05, .05])
# Add a colour bar for the spectrum
check = nodes.findNodeByIdentifier(1000)
if not check.isValid():
screen_coords = fm.createFieldFiniteElement(2)
spectrum_template = nodes.createNodetemplate()
spectrum_template.defineField(screen_coords)
spectrum_node = nodes.createNode(1000, spectrum_template)
cache.setNode(spectrum_node)
screen_coords.setNodeParameters(cache, -1, Node.VALUE_LABEL_VALUE, 1, [-.95, -.78])
fng = fm.createFieldNodeGroup(nodes)
spectrum_group = fng.getNodesetGroup()
spectrum_group.addNode(spectrum_node)
spectrum_graphics = scene.createGraphicsPoints()
spectrum_graphics.setScenecoordinatesystem(Scenecoordinatesystem.SCENECOORDINATESYSTEM_NORMALISED_WINDOW_FIT_BOTTOM)
spectrum_graphics.setFieldDomainType(Field.DOMAIN_TYPE_NODES)
spectrum_graphics.setCoordinateField(screen_coords)
spectrum_graphics.setSubgroupField(fng)
spectrum_graphics.setSpectrum(spec)
spectrum_point_attr = spectrum_graphics.getGraphicspointattributes()
gm = self._scene.getGlyphmodule()
colour_bar = gm.createGlyphColourBar(spec)
colour_bar.setLabelDivisions(6)
spectrum_point_attr.setGlyph(colour_bar)
spectrum_point_attr.setBaseSize([.3, .4,])
scene.endChange()
# Create node corner list (used to check which nodes are on corner later
base_node = self.numberInModel
self.node_corner_list[0] = base_node + 1
self.node_corner_list[2] = base_node + self.number_of_points_on_grid_side
self.node_corner_list[1] = base_node + 1 + self.number_of_points_on_grid_side * (self.number_of_points_on_grid_side - 1)
self.node_corner_list[3] = base_node + self.number_of_points_on_grid_side ** 2
#del self.pointattrList[-1]
#self.generateMesh()
def deleteAll(self):
fm = self._region.getFieldmodule()
nodes = fm.findNodesetByFieldDomainType(Field.DOMAIN_TYPE_NODES)
for i in range(self.eegSize):
node = nodes.findNodeByIdentifier(i + self.numberInModel + 1)
nodes.destroyNode(node)
def generateMesh(self):
plateMesh = Blackfynn_2d_plate(self._region, self.node_coordinate_list)
plateMesh.drawMesh(self._region, self.node_coordinate_list) |
from datetime import datetime
import time
import matplotlib.pyplot as plt
import random
import io
import threading
def filter24hours(data):
now = datetime.timestamp(datetime.now())
return sorted([d for d in data if (now - d["timestamp"]) < 86400], key=lambda x: x["timestamp"])
def filter30days(data):
now = datetime.timestamp(datetime.now())
return sorted([d for d in data if (now - d["timestamp"]) < (86400 * 30)], key=lambda x: x["timestamp"])
def messageStats(data):
pos = [d for d in data if d["score"] > 0.5]
neg = [d for d in data if d["score"] <= 0.5]
avg = sum([d["score"] for d in data]) / len(data)
top5 = [d["message"] for d in sorted(data, key=lambda x: -x["score"])[:min(5, len(data))]]
return {
"num_pos": len(pos),
"num_neg": len(neg),
"avg_score": avg,
"top_5": top5
}
def graph24hours(data, stats):
X = [d["timestamp"] for d in data[4:]]
Y = [sum([d["score"] for d in data[:i]])/i for i in range(5, len(data)+1)]
"""
totalScore = 0
numberPoints = 0
for message in data:
totalScore += message["score"]
numberPoints += 1
average = totalScore/numberPoints
Y.append(average)
"""
plt.plot(X, Y, color="skyblue", linewidth=4, alpha=0.3)
plt.xticks([])
plt.xlabel("Your Day")
plt.ylabel("Average Mood Score")
def graphRatio(data, stats):
labels = ["Positive Thoughts", "Negative Thoughts"]
colors = ["lemonchiffon", "lightcyan"]
explode = (0.05, 0)
plt.pie([stats["num_pos"], stats["num_neg"]], colors=colors, explode=explode, labels=labels, autopct="%1.0f%%")
def graph30days(data, stats):
data = [random.random() for i in range(30)]
plt.hist(data, [0, 0.2, 0.4, 0.6, 0.8, 1.0], rwidth=0.8, color="lemonchiffon")
plt.xticks([])
plt.xlabel("Mood Score Distribution")
plt.ylabel("Number of Days")
graph_mutex = threading.Lock()
def generateGraph(graph, data, stats, small=True):
graph_mutex.acquire()
if small:
fig = plt.figure(figsize=(10, 10))
else:
fig = plt.figure(figsize=(20, 10))
graph(data, stats)
buf = io.BytesIO()
fig.savefig(buf, format="png")
buf.seek(0)
plt.close(fig)
graph_mutex.release()
return buf
|
import unittest
import asm.cms.htmlpage
class HTMLPageTests(unittest.TestCase):
def test_constructor(self):
htmlpage = asm.cms.htmlpage.HTMLPage()
self.assertEquals('', htmlpage.content)
|
import numpy as np
from source.env.systems import ai
from source.env.lib.enums import Material
class Stat:
def __init__(self, val, maxVal):
self._val = val
self._max = maxVal
def increment(self, amt=1):
self._val = min(self.max, self.val + amt)
def decrement(self, amt=1):
self._val = max(0, self.val - amt)
def center(self):
return (self.val - self.max / 2.0) / self.max
@property
def val(self):
return self._val
@property
def max(self):
return self._max
def packet(self):
return {'val': self.val, 'max': self.max}
class Player:
public = set(
'pos lastPos R C food water health entID annID name colorInd color timeAlive kill attackMap damage freeze immune'.split())
def __init__(self, entID, color, config):
self._config = config
self._annID, self._color = color
self._colorInd = self._annID
self._R, self._C = config.R, config.C
self._pos = config.SPAWN()
self._lastPos = self.pos
self._food = Stat(config.FOOD, config.FOOD)
self._water = Stat(config.WATER, config.WATER)
self._health = Stat(config.HEALTH, config.HEALTH)
self._entID = entID
self._name = 'Alice' if self._colorInd == 0 else 'Bob' # ''Neural_' + str(self._entID)
self._timeAlive = 0
self._damage = None
self._freeze = 0
self._immune = True
self._kill = False
self._index = 1
self._immuneTicks = 1
self.move = None
self.attack = None
self.shareWater, self.shareFood = 0, 0
self._attackMap = np.zeros((7, 7, 3)).tolist()
self.moveDec = 0
self.attackDec = 0
self.shareWaterDec = 0
self.shareFoodDec = 0
def __getattribute__(self, name):
if name in Player.public:
return getattr(self, '_' + name)
return super().__getattribute__(name)
def __setattr__(self, name, value):
if name in Player.public:
raise AttributeError('Property \"' + name + '\" is read only: agents cannot modify their server-side data')
return super().__setattr__(name, value)
def forage(self, world):
r, c = self._pos
isForest = type(world.env.tiles[r, c].mat) in [Material.FOREST.value]
if isForest and world.env.harvest(r, c):
self.food.increment(6 // len(world.env.tiles[r, c].ents))
isWater = Material.WATER.value in ai.adjacentMats(world.env, self._pos)
if isWater:
self.water.increment(6)
def lavaKill(self, world):
r, c = self._pos
if type(world.env.tiles[r, c].mat) == Material.LAVA.value:
self._kill = True
return self._kill
def updateStats(self):
if (self._food.val > self._food.max // 2 and
self._water.val > self._water.max // 2):
self._health.increment()
self._water.decrement()
self._food.decrement()
if self._food.val <= 0:
self._health.decrement()
if self._water.val <= 0:
self._health.decrement()
def updateCounts(self, world):
r, c = self._pos
world.env.tiles[r, c].counts[self._colorInd] += 1
def step(self, world):
if not self.alive: return
self._freeze = max(0, self._freeze - 1)
self.updateCounts(world)
if self.lavaKill(world): return
self.forage(world)
self.updateStats()
self._damage = None
self._timeAlive += 1
if self._timeAlive > self._config.HORIZON:
self._kill = True
self.updateImmune()
def act(self, world, actions, arguments):
self.mapAttack()
self._lastPos = self._pos
for action, args in zip(actions, arguments):
action.call(world, self, *args)
@property
def alive(self):
return self._health.val > 0
def getLifetime(self):
return self._timeAlive
def updateImmune(self):
if self._timeAlive >= self._immuneTicks:
self._immune = False
# Note: does not stack damage, but still applies to health
def applyDamage(self, damage):
if self.immune:
return
self._damage = damage
self._health.decrement(damage)
def mapAttack(self):
if self.attack is not None:
attack = self.attack
name = attack.action.__name__
if name == 'Melee':
attackInd = 0
elif name == 'Range':
attackInd = 1
elif name == 'Mage':
attackInd = 2
rt, ct = attack.args.pos
rs, cs = self._pos
dr = rt - rs
dc = ct - cs
if abs(dr) <= 3 and abs(dc) <= 3:
self._attackMap[3 + dr][3 + dc][attackInd] += 1
def giveResources(self, ent, n_water=0, n_food=0):
n_water = min(n_water, self._water._val, ent.water.max - ent.water.val)
n_food = min(n_food, self._food._val, ent.food.max - ent.food.val)
self._water.decrement(n_water)
ent.water.increment(n_water)
self._food.decrement(n_food)
ent.food.increment(n_food)
|
# Project Euler Problem 0008
# Largest product in a series
# The four adjacent digits in the 1000-digit number that have the greatest product are 9 × 9 × 8 × 9 = 5832.
# 73167176531330624919225119674426574742355349194934
# 96983520312774506326239578318016984801869478851843
# 85861560789112949495459501737958331952853208805511
# 12540698747158523863050715693290963295227443043557
# 66896648950445244523161731856403098711121722383113
# 62229893423380308135336276614282806444486645238749
# 30358907296290491560440772390713810515859307960866
# 70172427121883998797908792274921901699720888093776
# 65727333001053367881220235421809751254540594752243
# 52584907711670556013604839586446706324415722155397
# 53697817977846174064955149290862569321978468622482
# 83972241375657056057490261407972968652414535100474
# 82166370484403199890008895243450658541227588666881
# 16427171479924442928230863465674813919123162824586
# 17866458359124566529476545682848912883142607690042
# 24219022671055626321111109370544217506941658960408
# 07198403850962455444362981230987879927244284909188
# 84580156166097919133875499200524063689912560717606
# 05886116467109405077541002256983155200055935729725
# 71636269561882670428252483600823257530420752963450
# Find the thirteen adjacent digits in the 1000-digit number that have the greatest product. What is the value of this product?
# NOTES:
# - split the string on "0"
# - collect all remaining strings into a list
# - isolate strings that have length greater than or equal to 13
# - cycle through the 13 adjacent digits and keep the maximum product
# - https://docs.python.org/3/library/stdtypes.html#str.split
# - https://www.geeksforgeeks.org/filter-in-python/
# Answer:
"""
def f_max_product:
# write function here
# initialize variables
string_of_digits = '7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450'
num_adjacent_digits = 13
# print(list(filter(lambda x: len(x) >= 13, string_of_digits.split('0'))))
# brute force
for i in range(1, len(string_of_digits) - num_adjacent_digits + 1):
max_product = f_max_product(string_of_digits[i:13])
# more efficient for larger strings
""" |
# This script just ensures that all the JSON files can be parsed correctly
from __future__ import print_function
import os
import glob
import json
MONTAGELIB = os.path.join('..', '..', 'MontageLib')
for json_file in glob.glob(os.path.join(MONTAGELIB, '*', '*.json')):
print("Validating {0}...".format(json_file))
with open(json_file, 'r') as fjson:
data = json.load(fjson)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Redis storage file reader."""
import unittest
import fakeredis
import redis
from plaso.containers import sessions
from plaso.containers import tasks
from plaso.storage.redis import reader
from tests.storage import test_lib
class RedisStorageReaderTest(test_lib.StorageTestCase):
"""Tests for the Redis storage file reader."""
_REDIS_URL = 'redis://127.0.0.1/0'
def _CreateRedisClient(self):
"""Creates a Redis client for testing.
This method will attempt to use a Redis server listening on localhost and
fallback to a fake Redis client if no server is available or the connection
timed out.
Returns:
Redis: a Redis client.
"""
try:
redis_client = redis.from_url(self._REDIS_URL, socket_timeout=60)
redis_client.ping()
except redis.exceptions.ConnectionError:
redis_client = fakeredis.FakeStrictRedis()
return redis_client
def testInitialization(self):
"""Tests the __init__ function."""
test_redis_client = self._CreateRedisClient()
session = sessions.Session()
task = tasks.Task(session_identifier=session.identifier)
test_reader = reader.RedisStorageReader(
session.identifier, task.identifier, redis_client=test_redis_client)
self.assertIsNotNone(test_reader)
if __name__ == '__main__':
unittest.main()
|
from diot import Diot
from bioprocs.utils import funcargs
from bioprocs.utils.tsvio2 import TsvWriter, TsvRecord
from gff import Gff
infile = {{i.infile | quote}}
outfile = {{o.outfile | quote}}
bedcols = {{args.bedcols | repr}}
keepattrs = {{args.keepattrs | repr}}
outhead = {{args.outhead | repr}}
bedcols.NAME = bedcols.get('NAME', 'lambda attrs: \
attrs["id"] if "id" in attrs else \
attrs["name"] if "name" in attrs else \
attrs["CHR"] + ":" + attrs["START"] + "-" + attrs["END"]')
# convert strings to functions
for key in bedcols:
bedcols[key] = eval(bedcols[key])
writer = TsvWriter(outfile)
writer.cnames = ['CHR', 'START', 'END', 'NAME', 'SCORE', 'STRAND']
writer.cnames.extend([field for field in bedcols if field != 'NAME'])
if keepattrs:
writer.cnames.append('ATTRIBUTES')
bedcols.ATTRIBUTES = lambda attrs: ';'.join(
['{0}={1}'.format(key, val) for key, val in attrs.items()
if key not in writer.cnames])
if outhead:
writer.writeHead(lambda cns: ('' if outhead is True else outhead) + '\t'.join(cns))
gff = Gff(infile)
for record in gff:
r = TsvRecord()
r.CHR = record['seqid']
r.START = record['start']
r.END = record['end']
r.SCORE = record['score']
r.STRAND = record['strand']
attrs = record['attributes']
attrs.update(dict(
CHR = r.CHR,
START = r.START,
END = r.END,
SCORE = r.SCORE,
STRAND = r.STRAND
))
for key in bedcols:
r[key] = bedcols[key](attrs)
writer.write(r)
writer.close()
|
from flask.ext.wtf import Form
from wtforms import StringField, TextAreaField, BooleanField, SelectField,\
SubmitField, FileField, SelectMultipleField, widgets
from wtforms.validators import Required, Length, Email, Regexp
from wtforms import ValidationError
from flask.ext.pagedown.fields import PageDownField
from ..models import Role, User
from flask.ext import admin
from wtforms.widgets import TextArea
from flask.ext.admin.contrib.sqla import ModelView
class NameForm(Form):
name = StringField('What is your name?', validators=[Required()])
submit = SubmitField('Submit')
class EditProfileForm(Form):
name = StringField('Real name', validators=[Length(0, 64)])
location = StringField('Location', validators=[Length(0, 64)])
about_me = TextAreaField('About me')
submit = SubmitField('Submit')
class EditProfileAdminForm(Form):
email = StringField('Email', validators=[Required(), Length(1, 64),
Email()])
username = StringField('Username', validators=[
Required(), Length(1, 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
'Usernames must have only letters, '
'numbers, dots or underscores')])
confirmed = BooleanField('Confirmed')
role = SelectField('Role', coerce=int)
name = StringField('Real name', validators=[Length(0, 64)])
location = StringField('Location', validators=[Length(0, 64)])
about_me = TextAreaField('About me')
submit = SubmitField('Submit')
def __init__(self, user, *args, **kwargs):
super(EditProfileAdminForm, self).__init__(*args, **kwargs)
self.role.choices = [(role.id, role.name)
for role in Role.query.order_by(Role.name).all()]
self.user = user
def validate_email(self, field):
if field.data != self.user.email and \
User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
def validate_username(self, field):
if field.data != self.user.username and \
User.query.filter_by(username=field.data).first():
raise ValidationError('Username already in use.')
class PostForm(Form):
#body = PageDownField("Post your update", validators=[Required()])
body = TextAreaField("Post your update", validators=[Required()])
#category = StringField("Category", validators=[Required()])
category = SelectField("Category", coerce=int)
photo = FileField('Photo')
submit = SubmitField('Submit')
class MultiCheckboxField(SelectMultipleField):
widget = widgets.ListWidget(prefix_label=False)
option_widget = widgets.CheckboxInput()
class EditPostForm(Form):
body = TextAreaField("Edit your update", validators=[Required()])
category = SelectField("Category", coerce=int)
attached = MultiCheckboxField("Attachments", coerce=int)
photo = FileField('Photo')
submit = SubmitField('Submit')
class CKTextAreaWidget(TextArea):
def __call__(self, field, **kwargs):
kwargs.setdefault('class_', 'ckeditor')
return super(CKTextAreaWidget, self).__call__(field, **kwargs)
class CKTextAreaField(TextAreaField):
widget = CKTextAreaWidget()
class TestAdmin(ModelView):
form_overrides = dict(text=CKTextAreaField)
|
'''analyze WORKING/samples-train.csv
INVOCATION: python samples-train-analysis.py ARGS
INPUT FILES:
WORKING/samples-train.csv
OUTPUT FILES:
WORKING/ME/0log.txt log file containing what is printed
WORKING/ME/transactions.csv with columns apn | date | sequence | actual_price
'''
import argparse
import collections
import math
import multiprocessing
import numpy as np
import os
import pandas as pd
import pdb
import random
import sys
import Bunch
import dirutility
import Logger
import Month
import Path
import Timer
def make_control(argv):
print 'argv', argv
parser = argparse.ArgumentParser()
parser.add_argument('invocation')
parser.add_argument('--test', action='store_true', help='if present, truncated input and enable test code')
parser.add_argument('--trace', action='store_true', help='if present, call pdb.set_trace() early in run')
arg = parser.parse_args(argv) # ignore invocation name
arg.me = arg.invocation.split('.')[0]
if arg.trace:
pdb.set_trace()
random_seed = 123
random.seed(random_seed)
dir_working = Path.Path().dir_working()
path_out_dir = dirutility.assure_exists(dir_working + arg.me + ('-test' if arg.test else '') + '/')
return Bunch.Bunch(
arg=arg,
path_in_samples=dir_working + 'samples-train.csv',
path_out_log=path_out_dir + '0log.txt',
path_out_csv=path_out_dir + 'transactions.csv',
random_seed=random_seed,
test=arg.test,
timer=Timer.Timer(),
)
def make_index(apn, date, sequence_number):
return '%d-%d-%d' % (apn, date, sequence_number)
APN_Date = collections.namedtuple('APN_Date', 'apn date')
ColumnName = collections.namedtuple('ColumnName', 'apn date actual_price')
def column_names():
'return names of columns in the input csv'
return ColumnName(
apn='APN UNFORMATTED_deed',
date='SALE DATE_deed',
actual_price='SALE AMOUNT_deed',
)
def create_new_row(apn, date, sequence_number, row, column):
'return DataFrame with one row'
date = int(date)
date_year = int(date / 10000)
date_month = int((date - date_year * 10000) / 100)
date_day = int(date - date_year * 10000 - date_month * 100)
assert date == date_year * 10000 + date_month * 100 + date_day, date
new_df = pd.DataFrame(
data={
'apn': int(apn),
'date': date,
'year': date_year,
'month': date_month,
'day': date_day,
'sequence_number': sequence_number,
'actual_price': row[column.actual_price],
},
index=[make_index(apn, date, sequence_number)],
)
return new_df
class DuplicateFinder(object):
def __init__(self, df, column):
self.df = df
self.column = column
def find_duplicates_method(self, apn_date):
mask_apn = self.df[self.column.apn] == apn_date.apn
mask_date = self.df[self.column.date] == apn_date.date
mask = mask_apn & mask_date
df_apn_date = self.df[mask]
sequence_number = 0
result_duplicates = set()
result_df = None
for i, row in df_apn_date.iterrows():
if sequence_number > 0:
result_duplicates.add(apn_date)
new_df = create_new_row(apn_date.apn, apn_date.date, sequence_number, row, self.column)
result_df = new_df if result_df is None else result_df.append(new_df, verify_integrity=True)
sequence_number += 1
return result_df, result_duplicates
Info = collections.namedtuple('Info', 'apn_date, df, column')
MappedItem = collections.namedtuple('MappedItem', 'transactions duplicates')
def make_transactions_mapper(info):
'return (df of transactions, set of duplicates)'
verbose = False
if verbose:
print 'make_transactions_mapper begin', info.apn_date, 'pid', os.getpid()
apn_date, df, column = info.apn_date, info.df, info.column
mask_apn = df[column.apn] == apn_date.apn
mask_date = df[column.date] == apn_date.date
mask = mask_apn & mask_date
df_apn_date = df[mask]
sequence_number = 0
result_duplicates = set()
result_df = pd.DataFrame()
for label, row in df_apn_date.iterrows():
if verbose:
print 'make_transactions_mapper iter row label', info.apn_date, label
if sequence_number > 0:
result_duplicates.add(apn_date)
new_df = create_new_row(apn_date.apn, apn_date.date, sequence_number, row, column)
result_df = result_df.append(new_df, ignore_index=True, verify_integrity=True)
# result_df = new_df if result_df is None else result_df.append(new_df, verify_integrity=True)
sequence_number += 1
if verbose:
print 'make_transactions_mapper end', len(result_df), len(result_duplicates)
return MappedItem(
transactions=result_df,
duplicates=result_duplicates,
)
def make_transactions_reducer(mapped_items, sofar_transactions=pd.DataFrame(), sofar_duplicates=set()):
'reduce list of results from mapper to final result'
for mapped_item in mapped_items:
transactions, duplicates = mapped_item.transactions, mapped_item.duplicates
sofar_transactions = sofar_transactions.append(transactions, verify_integrity=True, ignore_index=True)
if len(duplicates) > 0:
sofar_duplicates = sofar_duplicates.update(duplicates)
# otherwise set().update(set()) --> None (not set(), which would be reasonable)
return sofar_transactions, sofar_duplicates
def make_transactions_parallel(df, test, in_parallel):
'return (df of transaction with unique APNs and IDs, set(APN_Date) of duplicates)'
# use 4 cores
def make_infos():
'return list of Info'
column = column_names()
result = []
for apn in set(df[column.apn]):
df_apn = df[df[column.apn] == apn]
for date in set(df_apn[column.date]):
result.append(Info(
apn_date=(APN_Date(apn, date)),
df=df,
column=column,
))
return result
all_infos = make_infos()
print 'len(all_infos)', len(all_infos)
infos = all_infos[:100] if test else all_infos
print 'len(infos)', len(infos)
if in_parallel:
n_processes = 1
print 'n_processes', n_processes
p = multiprocessing.Pool(n_processes)
mapped = p.map(make_transactions_mapper, infos)
all_transactions, all_duplicates = make_transactions_reducer(
mapped,
sofar_transactions=pd.DataFrame(),
sofar_duplicates=set(),
)
else:
# conserve memory by processing each info one by one
verbose = False
all_transactions = pd.DataFrame()
all_duplicates = set()
for info in infos:
if verbose:
print all_transactions
print all_duplicates
mapped_item = make_transactions_mapper(info)
if verbose:
print mapped_item.transactions
print mapped_item.duplicates
print len(mapped_item)
all_transactions, all_duplicates = make_transactions_reducer(
[mapped_item],
sofar_transactions=all_transactions,
sofar_duplicates=all_duplicates,
)
assert len(all_transactions) > 0
assert all_duplicates is not None
return all_transactions, all_duplicates
def make_transactionsOLD(df, test):
'return (df of transaction IDs and prices, set(APN_Date) of duplicates)'
column = column_names()
result_df = None
result_set = set()
for apn in set(df[column.apn]):
if test and len(result_set) > 10:
break
df_apn = df[df[column.apn] == apn]
for date in set(df_apn[column.date]):
duplicates_df, duplicates_set = make_transactions_mapper(APN_Date(apn, date), df, column)
result_df = duplicates_df if result_df is None else result_df.append(duplicates_df, verify_integrity=True, ignore_index=True)
result_set.append(duplicates_set)
return result_df, result_set
def make_how_different(df, duplicates):
'return tuple (dict[column] = set((value0, value1)), matched_counter) of mismatched fields'
def isnan(x):
if isinstance(x, float):
return math.isnan(x)
if isinstance(x, np.float64):
return np.isnan(x)
return False
def find_mismatched_values(ordered_columns, matched):
'return None or (column, value0, value1) of mistmatched fields in first 2 records'
# TODO: can compare all records until a mismatch is found
match0 = matches.iloc[0]
match1 = matches.iloc[1]
for column in ordered_columns:
value0 = match0[column]
value1 = match1[column]
# print value0, value1, type(value0), type(value1)
if isnan(value0) and isnan(value1):
# NaN stands for Missing in pandas.DataFrame
continue # pretend that two NaN values are equal to each other
if value0 != value1:
print column, value0, value1
return column, value0, value1
return None # should not happen
def make_ordered_columns(column, df):
'return list of column names to examine'
all_but_price = [
column_name
for column_name in df.columns
if column_name not in (
column.actual_price,
'Unnamed: 0',
'Unnamed: 0.1',
)
]
ordered_columns = [column.actual_price]
ordered_columns.extend(all_but_price)
return ordered_columns
column = column_names()
ordered_columns = make_ordered_columns(column, df)
result = collections.defaultdict(list)
matched_counter = collections.Counter()
for duplicate in duplicates:
mask_apn = df[column.apn] == duplicate.apn
mask_date = df[column.date] == duplicate.date
mask = mask_apn & mask_date
matches = df[mask]
matched_counter[len(matches)] += 1
if len(matches) > 1:
maybe_mismatched_values = find_mismatched_values(ordered_columns, matches)
if maybe_mismatched_values is None:
print ' all fields in first 2 records were equal'
pdb.set_trace()
else:
column_name, value0, value1 = maybe_mismatched_values
result[column_name].append((value0, value1))
else:
print matches
print duplicate
print len(matches)
print 'no mismatched fields'
pdb.set_trace()
return result, matched_counter
def do_work(control):
df = pd.read_csv(
control.path_in_samples,
low_memory=False,
nrows=1000 if control.test and False else None,
)
print 'column names in input file', control.path_in_samples
for i, column_name in enumerate(df.columns):
print i, column_name
control.timer.lap('printed column names')
transactions_df2, duplicates2 = make_transactions_parallel(df, control.test, False)
print 'not parallel lens', len(transactions_df2), len(duplicates2)
control.timer.lap('make transactions without parallel')
transactions_df, duplicates = make_transactions_parallel(df, control.test, True)
print 'parallel lens', len(transactions_df), len(duplicates)
control.timer.lap('make transactions in parallel')
print 'number of duplicate apn|date values', len(duplicates)
print 'number of training samples', len(df)
print 'number of unique apn-date-sequence_numbers', len(transactions_df)
transactions_df.to_csv(control.path_out_csv)
how_different, matched_counter = make_how_different(df, duplicates)
print 'first field difference in first 2 records of duplicate apn|date transactions'
for column, values in how_different.iteritems():
print column
for value in values:
print ' ', value
print
print 'number of matches in duplicate records'
for num_matched, num_times in matched_counter.iteritems():
print '%d records had identical APN and sale dates %d times' % (num_matched, num_times)
return None
def main(argv):
control = make_control(argv)
sys.stdout = Logger.Logger(logfile_path=control.path_out_log) # now print statements also write to the log file
print control
lap = control.timer.lap
do_work(control)
lap('work completed')
if control.test:
print 'DISCARD OUTPUT: test'
print control
print 'done'
return
if __name__ == '__main__':
main(sys.argv)
if False:
np
pdb
pd
|
class Move:
def __init__(self, value):
self._value = value
@property
def value(self):
return self._value
def is_valid(self):
return 1 <= self._value <= 9
def get_row(self):
if self._value in (1, 2, 3):
return 0
elif self._value in (4, 5, 6):
return 1
else:
return 2
def get_column(self):
if self._value in (1, 4, 7):
return 0
elif self._value in (2, 5, 8):
return 1
else:
return 2
|
from environments.base_environment import BaseEnvironment
import numpy as np
def _pos(i, j):
return 7 * i + j
rows = [
[_pos(line, col_start+k) for k in range(4)]
for line in range(6)
for col_start in range(4)
]
columns = [
[_pos(line_start+k, col) for k in range(4)]
for line_start in range(3)
for col in range(7)
]
diagonals_1 = [
[_pos(line_start+k, col_start+k) for k in range(4)]
for line_start in range(3)
for col_start in range(4)
]
diagonals_2 = [
[_pos(line_start+k, col_start-k) for k in range(4)]
for line_start in range(3)
for col_start in range(3, 7)
]
class Environment(BaseEnvironment):
lines = rows + columns + diagonals_1 + diagonals_2
def __init__(self):
# Encode the board as 42 positions with 0 (empty), 1 (first player) or -1 (second player).
# The position at line `i` and column `j` will be at `7*i+j`.
# The 43-th position is the current player (-1 or 1)
self.state = np.zeros((43,))
def reset(self):
self.state[:42] = 0
self.state[42] = 1
return self.state, list(range(7))
def step(self, action):
assert self.state[_pos(0, action)] == 0, 'Invalid action'
# Put the piece on the board
for i in reversed(range(6)):
if self.state[_pos(i, action)] == 0:
pos = _pos(i, action)
self.state[pos] = self.state[42]
break
# Check for win
for pos_s in self.lines:
if pos in pos_s:
values = self.state[pos_s]
if np.all(values == 1) or np.all(values == -1):
return self.state, 1, True, []
# Check for draw
if np.all(self.state != 0):
return self.state, 0, True, []
# update list of possible actions
self.state[42] = -self.state[42]
return self.state, 0, False, np.nonzero(self.state[[_pos(0, k) for k in range(7)]] == 0)[0]
def to_jsonable(self):
"""
Return a representation of the current state that can be encoded as JSON.
This will be used later to visually display the game state at each step
"""
return self.state.tolist()
@staticmethod
def html_head():
"""
Return HTML string to inject at the page's <head>
"""
return '<link rel="stylesheet" href="/static/environment-connect-four/style.css">'
@staticmethod
def jsonable_to_html(jsonable):
"""
Return a HTML representation of the game at the given state
"""
# Detect winning line
state = np.asarray(jsonable)
win_cells = []
for pos_s in Environment.lines:
values = state[pos_s]
if np.all(values == 1) or np.all(values == -1):
win_cells = pos_s
break
# Build board
lines_html = [
'<tr>' + ''.join(
f'<td class="connect-four-{int(state[_pos(line, col)])} \
{"connect-four-win-line" if _pos(line, col) in win_cells else ""}"></td>'
for col in range(7)
) + '</tr>'
for line in range(6)
]
table_html = '\n'.join(lines_html)
return f'''
<table class="connect-four-board {"connect-four-board-win" if len(win_cells) else ""}">
{table_html}
</table>
'''
|
import sys, time
import logging
proxy = "sal146-us"
domain = ".netnut.io"
port = "33128"
USR = "netnut username"
PSWD = "netnut password"
global LOGF
global INFOF
TIME = time.strftime("%H%M%S",time.localtime())
LOGF = "log.txt"
INFOF = TIME+"-Info.txt"
class LOGGING:
LogFile = LOGF
InfoFile = INFOF
def infoLog(self,name):
"""
Store IP:PORT:USER:PASS
"""
formatter = logging.Formatter(fmt='%(message)s')
handler = logging.FileHandler(self.InfoFile,mode='w')
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
return logger
def loggingLog(self,name):
"""
Logging
"""
formatter = logging.Formatter(fmt='%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
handler = logging.FileHandler(self.LogFile,mode='w')
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
return logger
def nnApiInit():
global INFO
global LOGGER
INFO = LOGGING().infoLog('Info File')
LOGGER = LOGGING().loggingLog('Logging File')
LOGGER.info("Creating Netnut Proxies ...")
print("Start:")
def nnApiCreate(number, username, password):
global INFO
global LOGGER
for i in range(0,int(number)):
result = proxy + str(i+1) + domain + ":" + port + ":" + str(username) + ":" + str(password)
LOGGER.info("Created proxy:" + result)
INFO.info(result)
LOGGER.info("Successful creation :)")
print("NN Done.")
time.sleep(10)
return 1
def nnApiReturnFileName():
global INFO
global LOGGER
""" Return names of Log File and Info File """
return LOGF, INFOF
if __name__ == "__main__":
print("NetNut")
apiCreate(10, USR, PSWD) |
#!/usr/bin/env python
#########################################################################################
#
# Perform mathematical operations on images
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2015 Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Julien Cohen-Adad, Sara Dupont
#
# About the license: see the file LICENSE.TXT
#########################################################################################
import os
import sys
import pickle
import gzip
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import spinalcordtoolbox.math as sct_math
from spinalcordtoolbox.image import Image
from spinalcordtoolbox.utils.shell import SCTArgumentParser, Metavar, list_type, display_viewer_syntax
from spinalcordtoolbox.utils.sys import init_sct, printv, set_global_loglevel
from spinalcordtoolbox.utils.fs import extract_fname
def get_parser():
parser = SCTArgumentParser(
description='Perform mathematical operations on images. Some inputs can be either a number or a 4d image or '
'several 3d images separated with ","'
)
mandatory = parser.add_argument_group("MANDATORY ARGUMENTS")
mandatory.add_argument(
"-i",
metavar=Metavar.file,
help="Input file. Example: data.nii.gz",
required=True)
mandatory.add_argument(
"-o",
metavar=Metavar.file,
help='Output file. Example: data_mean.nii.gz',
required=True)
optional = parser.add_argument_group("OPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit")
basic = parser.add_argument_group('BASIC OPERATIONS')
basic.add_argument(
"-add",
metavar='',
nargs="+",
help='Add following input. Can be a number or multiple images (separated with space).',
required=False)
basic.add_argument(
"-sub",
metavar='',
nargs="+",
help='Subtract following input. Can be a number or an image.',
required=False)
basic.add_argument(
"-mul",
metavar='',
nargs="+",
help='Multiply by following input. Can be a number or multiple images (separated with space).',
required=False)
basic.add_argument(
"-div",
metavar='',
nargs="+",
help='Divide by following input. Can be a number or an image.',
required=False)
basic.add_argument(
'-mean',
help='Average data across dimension.',
required=False,
choices=('x', 'y', 'z', 't'))
basic.add_argument(
'-rms',
help='Compute root-mean-squared across dimension.',
required=False,
choices=('x', 'y', 'z', 't'))
basic.add_argument(
'-std',
help='Compute STD across dimension.',
required=False,
choices=('x', 'y', 'z', 't'))
basic.add_argument(
"-bin",
type=float,
metavar=Metavar.float,
help='Binarize image using specified threshold. Example: 0.5',
required=False)
thresholding = parser.add_argument_group("THRESHOLDING METHODS")
thresholding.add_argument(
'-otsu',
type=int,
metavar=Metavar.int,
help='Threshold image using Otsu algorithm (from skimage). Specify the number of bins (e.g. 16, 64, 128)',
required=False)
thresholding.add_argument(
"-adap",
metavar=Metavar.list,
type=list_type(',', int),
help="R|Threshold image using Adaptive algorithm (from skimage). Provide 2 values separated by ',' that "
"correspond to the parameters below. For example, '-adap 7,0' corresponds to a block size of 7 and an "
"offset of 0.\n"
" - Block size: Odd size of pixel neighborhood which is used to calculate the threshold value. \n"
" - Offset: Constant subtracted from weighted mean of neighborhood to calculate the local threshold "
"value. Suggested offset is 0.",
required=False)
thresholding.add_argument(
"-otsu-median",
metavar=Metavar.list,
type=list_type(',', int),
help="R|Threshold image using Median Otsu algorithm (from dipy). Provide 2 values separated by ',' that "
"correspond to the parameters below. For example, '-otsu-median 3,5' corresponds to a filter size of 3 "
"repeated over 5 iterations.\n"
" - Size: Radius (in voxels) of the applied median filter.\n"
" - Iterations: Number of passes of the median filter.",
required=False)
thresholding.add_argument(
'-percent',
type=int,
help="Threshold image using percentile of its histogram.",
metavar=Metavar.int,
required=False)
thresholding.add_argument(
"-thr",
type=float,
help='Use following number to threshold image (zero below number).',
metavar=Metavar.float,
required=False)
mathematical = parser.add_argument_group("MATHEMATICAL MORPHOLOGY")
mathematical.add_argument(
'-dilate',
type=int,
metavar=Metavar.int,
help="Dilate binary or greyscale image with specified size. If shape={'square', 'cube'}: size corresponds to the length of "
"an edge (size=1 has no effect). If shape={'disk', 'ball'}: size corresponds to the radius, not including "
"the center element (size=0 has no effect).",
required=False)
mathematical.add_argument(
'-erode',
type=int,
metavar=Metavar.int,
help="Erode binary or greyscale image with specified size. If shape={'square', 'cube'}: size corresponds to the length of "
"an edge (size=1 has no effect). If shape={'disk', 'ball'}: size corresponds to the radius, not including "
"the center element (size=0 has no effect).",
required=False)
mathematical.add_argument(
'-shape',
help="R|Shape of the structuring element for the mathematical morphology operation. Default: ball.\n"
"If a 2D shape {'disk', 'square'} is selected, -dim must be specified.",
required=False,
choices=('square', 'cube', 'disk', 'ball'),
default='ball')
mathematical.add_argument(
'-dim',
type=int,
help="Dimension of the array which 2D structural element will be orthogonal to. For example, if you wish to "
"apply a 2D disk kernel in the X-Y plane, leaving Z unaffected, parameters will be: shape=disk, dim=2.",
required=False,
choices=(0, 1, 2))
filtering = parser.add_argument_group("FILTERING METHODS")
filtering.add_argument(
"-smooth",
metavar=Metavar.list,
type=list_type(',', float),
help='Gaussian smoothing filtering. Supply values for standard deviations in mm. If a single value is provided, '
'it will be applied to each axis of the image. If multiple values are provided, there must be one value '
'per image axis. (Examples: "-smooth 2.0,3.0,2.0" (3D image), "-smooth 2.0" (any-D image)).',
required=False)
filtering.add_argument(
'-laplacian',
metavar=Metavar.list,
type=list_type(',', float),
help='Laplacian filtering. Supply values for standard deviations in mm. If a single value is provided, it will '
'be applied to each axis of the image. If multiple values are provided, there must be one value per '
'image axis. (Examples: "-laplacian 2.0,3.0,2.0" (3D image), "-laplacian 2.0" (any-D image)).',
required=False)
filtering.add_argument(
'-denoise',
help='R|Non-local means adaptative denoising from P. Coupe et al. as implemented in dipy. Separate with ". Example: p=1,b=3\n'
' p: (patch radius) similar patches in the non-local means are searched for locally, inside a cube of side 2*p+1 centered at each voxel of interest. Default: p=1\n'
' b: (block radius) the size of the block to be used (2*b+1) in the blockwise non-local means implementation. Default: b=5 '
' Note, block radius must be smaller than the smaller image dimension: default value is lowered for small images)\n'
'To use default parameters, write -denoise 1',
required=False)
similarity = parser.add_argument_group("SIMILARITY METRIC")
similarity.add_argument(
'-mi',
metavar=Metavar.file,
help='Compute the mutual information (MI) between both input files (-i and -mi) as in: '
'http://scikit-learn.org/stable/modules/generated/sklearn.metrics.mutual_info_score.html',
required=False)
similarity.add_argument(
'-minorm',
metavar=Metavar.file,
help='Compute the normalized mutual information (MI) between both input files (-i and -mi) as in: '
'http://scikit-learn.org/stable/modules/generated/sklearn.metrics.normalized_mutual_info_score.html',
required=False)
similarity.add_argument(
'-corr',
metavar=Metavar.file,
help='Compute the cross correlation (CC) between both input files (-i and -cc).',
required=False)
misc = parser.add_argument_group("MISC")
misc.add_argument(
'-symmetrize',
type=int,
help='Symmetrize data along the specified dimension.',
required=False,
choices=(0, 1, 2))
misc.add_argument(
'-type',
required=False,
help='Output type.',
choices=('uint8', 'int16', 'int32', 'float32', 'complex64', 'float64', 'int8', 'uint16', 'uint32', 'int64',
'uint64'))
optional.add_argument(
'-v',
metavar=Metavar.int,
type=int,
choices=[0, 1, 2],
default=1,
# Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as "if verbose == #" in API
help="Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode")
return parser
# MAIN
# ==========================================================================================
def main(argv=None):
"""
Main function
:param argv:
:return:
"""
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_global_loglevel(verbose=verbose)
dim_list = ['x', 'y', 'z', 't']
fname_in = arguments.i
fname_out = arguments.o
output_type = arguments.type
# Open file(s)
im = Image(fname_in)
data = im.data # 3d or 4d numpy array
dim = im.dim
# run command
if arguments.otsu is not None:
param = arguments.otsu
data_out = sct_math.otsu(data, param)
elif arguments.adap is not None:
param = arguments.adap
data_out = sct_math.adap(data, param[0], param[1])
elif arguments.otsu_median is not None:
param = arguments.otsu_median
data_out = sct_math.otsu_median(data, param[0], param[1])
elif arguments.thr is not None:
param = arguments.thr
data_out = sct_math.threshold(data, param)
elif arguments.percent is not None:
param = arguments.percent
data_out = sct_math.perc(data, param)
elif arguments.bin is not None:
bin_thr = arguments.bin
data_out = sct_math.binarize(data, bin_thr=bin_thr)
elif arguments.add is not None:
data2 = get_data_or_scalar(arguments.add, data)
data_concat = sct_math.concatenate_along_4th_dimension(data, data2)
data_out = np.sum(data_concat, axis=3)
elif arguments.sub is not None:
data2 = get_data_or_scalar(arguments.sub, data)
data_out = data - data2
elif arguments.laplacian is not None:
sigmas = arguments.laplacian
if len(sigmas) == 1:
sigmas = [sigmas for i in range(len(data.shape))]
elif len(sigmas) != len(data.shape):
printv(parser.error('ERROR: -laplacian need the same number of inputs as the number of image dimension OR only one input'))
# adjust sigma based on voxel size
sigmas = [sigmas[i] / dim[i + 4] for i in range(3)]
# smooth data
data_out = sct_math.laplacian(data, sigmas)
elif arguments.mul is not None:
data2 = get_data_or_scalar(arguments.mul, data)
data_concat = sct_math.concatenate_along_4th_dimension(data, data2)
data_out = np.prod(data_concat, axis=3)
elif arguments.div is not None:
data2 = get_data_or_scalar(arguments.div, data)
data_out = np.divide(data, data2)
elif arguments.mean is not None:
dim = dim_list.index(arguments.mean)
if dim + 1 > len(np.shape(data)): # in case input volume is 3d and dim=t
data = data[..., np.newaxis]
data_out = np.mean(data, dim)
elif arguments.rms is not None:
dim = dim_list.index(arguments.rms)
if dim + 1 > len(np.shape(data)): # in case input volume is 3d and dim=t
data = data[..., np.newaxis]
data_out = np.sqrt(np.mean(np.square(data.astype(float)), dim))
elif arguments.std is not None:
dim = dim_list.index(arguments.std)
if dim + 1 > len(np.shape(data)): # in case input volume is 3d and dim=t
data = data[..., np.newaxis]
data_out = np.std(data, dim, ddof=1)
elif arguments.smooth is not None:
sigmas = arguments.smooth
if len(sigmas) == 1:
sigmas = [sigmas[0] for i in range(len(data.shape))]
elif len(sigmas) != len(data.shape):
printv(parser.error('ERROR: -smooth need the same number of inputs as the number of image dimension OR only one input'))
# adjust sigma based on voxel size
sigmas = [sigmas[i] / dim[i + 4] for i in range(3)]
# smooth data
data_out = sct_math.smooth(data, sigmas)
elif arguments.dilate is not None:
if arguments.shape in ['disk', 'square'] and arguments.dim is None:
printv(parser.error('ERROR: -dim is required for -dilate with 2D morphological kernel'))
data_out = sct_math.dilate(data, size=arguments.dilate, shape=arguments.shape, dim=arguments.dim)
elif arguments.erode is not None:
if arguments.shape in ['disk', 'square'] and arguments.dim is None:
printv(parser.error('ERROR: -dim is required for -erode with 2D morphological kernel'))
data_out = sct_math.erode(data, size=arguments.erode, shape=arguments.shape, dim=arguments.dim)
elif arguments.denoise is not None:
# parse denoising arguments
p, b = 1, 5 # default arguments
list_denoise = (arguments.denoise).split(",")
for i in list_denoise:
if 'p' in i:
p = int(i.split('=')[1])
if 'b' in i:
b = int(i.split('=')[1])
data_out = sct_math.denoise_nlmeans(data, patch_radius=p, block_radius=b)
elif arguments.symmetrize is not None:
data_out = (data + data[list(range(data.shape[0] - 1, -1, -1)), :, :]) / float(2)
elif arguments.mi is not None:
# input 1 = from flag -i --> im
# input 2 = from flag -mi
im_2 = Image(arguments.mi)
compute_similarity(im, im_2, fname_out, metric='mi', metric_full='Mutual information', verbose=verbose)
data_out = None
elif arguments.minorm is not None:
im_2 = Image(arguments.minorm)
compute_similarity(im, im_2, fname_out, metric='minorm', metric_full='Normalized Mutual information', verbose=verbose)
data_out = None
elif arguments.corr is not None:
# input 1 = from flag -i --> im
# input 2 = from flag -mi
im_2 = Image(arguments.corr)
compute_similarity(im, im_2, fname_out, metric='corr', metric_full='Pearson correlation coefficient', verbose=verbose)
data_out = None
# if no flag is set
else:
data_out = None
printv(parser.error('ERROR: you need to specify an operation to do on the input image'))
if data_out is not None:
# Write output
nii_out = Image(fname_in) # use header of input file
nii_out.data = data_out
nii_out.save(fname_out, dtype=output_type)
# TODO: case of multiple outputs
# assert len(data_out) == n_out
# if n_in == n_out:
# for im_in, d_out, fn_out in zip(nii, data_out, fname_out):
# im_in.data = d_out
# im_in.absolutepath = fn_out
# if arguments.w is not None:
# im_in.hdr.set_intent('vector', (), '')
# im_in.save()
# elif n_out == 1:
# nii[0].data = data_out[0]
# nii[0].absolutepath = fname_out[0]
# if arguments.w is not None:
# nii[0].hdr.set_intent('vector', (), '')
# nii[0].save()
# elif n_out > n_in:
# for dat_out, name_out in zip(data_out, fname_out):
# im_out = nii[0].copy()
# im_out.data = dat_out
# im_out.absolutepath = name_out
# if arguments.w is not None:
# im_out.hdr.set_intent('vector', (), '')
# im_out.save()
# else:
# printv(parser.usage.generate(error='ERROR: not the correct numbers of inputs and outputs'))
# display message
if data_out is not None:
display_viewer_syntax([fname_out], verbose=verbose)
else:
printv('\nDone! File created: ' + fname_out, verbose, 'info')
def get_data(list_fname):
"""
Get data from list of file names
:param list_fname:
:return: 3D or 4D numpy array.
"""
try:
nii = [Image(f_in) for f_in in list_fname]
except Exception as e:
printv(str(e), 1, 'error') # file does not exist, exit program
data0 = nii[0].data
data = nii[0].data
# check that every images have same shape
for i in range(1, len(nii)):
if not np.shape(nii[i].data) == np.shape(data0):
printv('\nWARNING: shape(' + list_fname[i] + ')=' + str(np.shape(nii[i].data)) + ' incompatible with shape(' + list_fname[0] + ')=' + str(np.shape(data0)), 1, 'warning')
printv('\nERROR: All input images must have same dimensions.', 1, 'error')
else:
data = sct_math.concatenate_along_4th_dimension(data, nii[i].data)
return data
def get_data_or_scalar(argument, data_in):
"""
Get data from list of file names (scenario 1) or scalar (scenario 2)
:param argument: list of file names of scalar
:param data_in: if argument is scalar, use data to get np.shape
:return: 3d or 4d numpy array
"""
# try to convert argument in float
try:
# build data2 with same shape as data
data_out = data_in[:, :, :] * 0 + float(argument[0])
# if conversion fails, it should be a string (i.e. file name)
except ValueError:
data_out = get_data(argument)
return data_out
def compute_similarity(img1: Image, img2: Image, fname_out: str, metric: str, metric_full: str, verbose):
"""
Sanitize input and compute similarity metric between two images data.
"""
if img1.data.size != img2.data.size:
raise ValueError(f"Input images don't have the same size! \nPlease use \"sct_register_multimodal -i im1.nii.gz -d im2.nii.gz -identity 1\" to put the input images in the same space")
res, data1_1d, data2_1d = sct_math.compute_similarity(img1.data, img2.data, metric=metric)
if verbose > 1:
matplotlib.use('Agg')
plt.plot(data1_1d, 'b')
plt.plot(data2_1d, 'r')
plt.title('Similarity: ' + metric_full + ' = ' + str(res))
plt.savefig('fig_similarity.png')
path_out, filename_out, ext_out = extract_fname(fname_out)
if ext_out not in ['.txt', '.pkl', '.pklz', '.pickle']:
raise ValueError(f"The output file should a text file or a pickle file. Received extension: {ext_out}")
if ext_out == '.txt':
with open(fname_out, 'w') as f:
f.write(metric_full + ': \n' + str(res))
elif ext_out == '.pklz':
pickle.dump(res, gzip.open(fname_out, 'wb'), protocol=2)
else:
pickle.dump(res, open(fname_out, 'w'), protocol=2)
if __name__ == "__main__":
init_sct()
main(sys.argv[1:])
|
from bidict import bidict
from editor.utils.common_functions import (
get_lowest_byte_value,
round_down,
)
from editor.attributes.player.player_attribute import (
PlayerAttribute,
PlayerAttributeTypes,
)
from editor.attributes.player.player_attribute_option import (
PlayerAttributeOption,
)
from editor.attributes.player.player_attribute_bracelet_type import (
PlayerAttributeBraceletType,
)
from editor.attributes.player.player_attribute_bracelet_color import (
PlayerAttributeBraceletColor,
)
from editor.attributes.player.player_attribute_undershorts import (
PlayerAttributeUndershorts,
)
class PlayerAttributeBraceletTypeColorUndershorts(PlayerAttribute):
@classmethod
def att_class_name(cls):
return "Bracelet Type + Color/Undershorts"
@classmethod
def att_class_type(cls):
return PlayerAttributeTypes.Accessories
@classmethod
def att_class_hex_info(cls):
return None
@classmethod
def att_class_array_pos(cls):
return 99
@classmethod
def att_class_array_opts(cls):
return None
@classmethod
def att_class_array_opts_bracelet_type(cls):
options_by_value = bidict(
{
0: PlayerAttributeOption.OPT_N,
1: PlayerAttributeOption.OPT_L,
2: PlayerAttributeOption.OPT_R,
}
)
return options_by_value
@property
def array_opts_bracelet_type(self):
return self.att_class_array_opts_bracelet_type()
@classmethod
def att_class_array_opts_bracelet_color(cls):
options_by_value = bidict(
{
0: PlayerAttributeOption.OPT_WHITE,
4: PlayerAttributeOption.OPT_BLACK,
8: PlayerAttributeOption.OPT_RED,
12: PlayerAttributeOption.OPT_BLUE,
16: PlayerAttributeOption.OPT_YELLOW,
20: PlayerAttributeOption.OPT_GREEN,
24: PlayerAttributeOption.OPT_PURPLE,
28: PlayerAttributeOption.OPT_CYAN,
}
)
return options_by_value
@property
def array_opts_bracelet_color(self):
return self.att_class_array_opts_bracelet_color()
@classmethod
def att_class_array_opts_undershorts(cls):
options_by_value = bidict(
{
0: PlayerAttributeOption.OPT_N,
128: PlayerAttributeOption.OPT_Y,
}
)
return options_by_value
@property
def array_opts_undershorts(self):
return self.att_class_array_opts_undershorts()
@classmethod
def att_class_hidden(cls):
return True
def get_raw_value(self):
"""
Get byte value currently set in player's byte array
"""
of_data = self.player.option_file.data
value = of_data[self.player.address + self.array_pos]
return value
def get_value(self):
value = self.get_raw_value()
return value
def get_bracelet_type_label(self):
value = self.get_value()
value = get_lowest_byte_value(value, 4)
return self.array_opts_bracelet_type[value]
def get_bracelet_color_label(self):
value = self.get_value()
value = get_lowest_byte_value(value, 32)
value = round_down(value, 4)
return self.array_opts_bracelet_color[value]
def get_undershorts_label(self):
value = self.get_value()
value = round_down(value, 128)
return self.array_opts_undershorts[value]
def get_label(self):
bracelet_type_label = self.get_bracelet_type_label()
bracelet_color_label = self.get_bracelet_color_label()
undershorts_label = self.get_undershorts_label()
return (bracelet_type_label, bracelet_color_label, undershorts_label)
def set_value(self, value):
of_data = self.player.option_file.data
of_data[self.player.address + self.array_pos] = value
return True
def get_value_from_label(self, label):
first_opt_value = self.array_opts_bracelet_type.inverse[label[0]]
second_opt_value = self.array_opts_bracelet_color.inverse[label[1]]
third_opt_value = self.array_opts_undershorts.inverse[label[2]]
return first_opt_value + second_opt_value + third_opt_value
def set_value_from_label(self, label):
value = self.get_value_from_label(label)
self.set_value(value)
return True
def create_child_attributes(self):
"""
Create Bracelet Type, Color and Undershorts attributes
and link to this attribute
"""
self.bracelet_type = PlayerAttributeBraceletType(
self.player, parent=self
)
self.barcelet_color = PlayerAttributeBraceletColor(
self.player, parent=self
)
self.undershorts = PlayerAttributeUndershorts(self.player, parent=self)
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
import uuid
import warnings
from typing import Dict, List, Optional
import numpy as np
class Message():
"""Message base class.
All message class should inherit this class. The basic use of a Message
instance is to carray a piece of text message (self.msg) and a dict that
stores structured data (self.data), e.g. frame image, model prediction,
et al.
A message may also hold route information, which is composed of
information of all nodes the message has passed through.
Parameters:
msg (str): The text message.
data (dict, optional): The structured data.
"""
def __init__(self, msg: str = '', data: Optional[Dict] = None):
self.msg = msg
self.data = data if data else {}
self.route_info = []
self.timestamp = time.time()
self.id = uuid.uuid4()
def update_route_info(self,
node=None,
node_name: Optional[str] = None,
node_type: Optional[str] = None,
info: Optional[Dict] = None):
"""Append new node information to the route information.
Args:
node (Node, optional): An instance of Node that provides basic
information like the node name and type. Default: None.
node_name (str, optional): The node name. If node is given,
node_name will be ignored. Default: None.
node_type (str, optional): The class name of the node. If node
is given, node_type will be ignored. Default: None.
info (dict, optional): The node information, which is usually
given by node.get_node_info(). Default: None.
"""
if node is not None:
if node_name is not None or node_type is not None:
warnings.warn(
'`node_name` and `node_type` will be overridden if node'
'is provided.')
node_name = node.name
node_type = node.__class__.__name__
node_info = {'node': node_name, 'node_type': node_type, 'info': info}
self.route_info.append(node_info)
def set_route_info(self, route_info: List):
"""Directly set the entire route information.
Args:
route_info (list): route information to set to the message.
"""
self.route_info = route_info
def merge_route_info(self, route_info: List):
"""Merge the given route information into the original one of the
message. This is used for combining route information from multiple
messages. The node information in the route will be reordered according
to their timestamps.
Args:
route_info (list): route information to merge.
"""
self.route_info += route_info
self.route_info.sort(key=lambda x: x.get('timestamp', np.inf))
def get_route_info(self) -> List:
return self.route_info.copy()
class VideoEndingMessage(Message):
"""A special message to indicate the input video is ending."""
class FrameMessage(Message):
"""The message to store information of a video frame.
A FrameMessage instance usually holds following data in self.data:
- image (array): The frame image
- detection_results (list): A list to hold detection results of
multiple detectors. Each element is a tuple (tag, result)
- pose_results (list): A list to hold pose estimation results of
multiple pose estimator. Each element is a tuple (tag, result)
"""
def __init__(self, img):
super().__init__(data=dict(image=img))
def get_image(self):
"""Get the frame image.
Returns:
array: The frame image.
"""
return self.data.get('image', None)
def set_image(self, img):
"""Set the frame image to the message."""
self.data['image'] = img
def add_detection_result(self, result, tag: str = None):
"""Add the detection result from one model into the message's
detection_results.
Args:
tag (str, optional): Give a tag to the result, which can be used
to retrieve specific results.
"""
if 'detection_results' not in self.data:
self.data['detection_results'] = []
self.data['detection_results'].append((tag, result))
def get_detection_results(self, tag: str = None):
"""Get detection results of the message.
Args:
tag (str, optional): If given, only the results with the tag
will be retrieved. Otherwise all results will be retrieved.
Default: None.
Returns:
list[dict]: The retrieved detection results
"""
if 'detection_results' not in self.data:
return None
if tag is None:
results = [res for _, res in self.data['detection_results']]
else:
results = [
res for _tag, res in self.data['detection_results']
if _tag == tag
]
return results
def add_pose_result(self, result, tag=None):
"""Add the pose estimation result from one model into the message's
pose_results.
Args:
tag (str, optional): Give a tag to the result, which can be used
to retrieve specific results.
"""
if 'pose_results' not in self.data:
self.data['pose_results'] = []
self.data['pose_results'].append((tag, result))
def get_pose_results(self, tag=None):
"""Get pose estimation results of the message.
Args:
tag (str, optional): If given, only the results with the tag
will be retrieved. Otherwise all results will be retrieved.
Default: None.
Returns:
list[dict]: The retrieved pose results
"""
if 'pose_results' not in self.data:
return None
if tag is None:
results = [res for _, res in self.data['pose_results']]
else:
results = [
res for _tag, res in self.data['pose_results'] if _tag == tag
]
return results
def get_full_results(self):
"""Get all model predictions of the message.
See set_full_results() for inference.
Returns:
dict: All model predictions, including:
- detection_results
- pose_results
"""
result_keys = ['detection_results', 'pose_results']
results = {k: self.data[k] for k in result_keys}
return results
def set_full_results(self, results):
"""Set full model results directly.
Args:
results (dict): All model predictions including:
- detection_results (list): see also add_detection_results()
- pose_results (list): see also add_pose_results()
"""
self.data.update(results)
|
# -*- coding: utf-8 -*-
"""
Escribir un programa que pregunte el nombre del usuario en la consola y después de que el usuario lo introduzca muestre
por pantalla la cadena ¡Hola <nombre>!, donde <nombre> es el nombre que el usuario haya introducido.
"""
nombre = input("Introduce tu nombre: ")
print("¡Hola " + nombre + "!") |
from user_page.views.MyPage import *
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('helios_auth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Institution',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=250)),
('short_name', models.CharField(max_length=100, blank=True)),
('main_phone', models.CharField(max_length=25)),
('sec_phone', models.CharField(max_length=25, blank=True)),
('address', models.TextField()),
('idp_address', models.URLField(unique=True)),
('upload_voters', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='InstitutionUserProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('email', models.EmailField(max_length=254)),
('expires_at', models.DateTimeField(default=None, null=True, blank=True)),
('active', models.BooleanField(default=False)),
('django_user', models.ForeignKey(to=settings.AUTH_USER_MODEL, unique=True)),
('helios_user', models.ForeignKey(default=None, blank=True, to='helios_auth.User', null=True)),
('institution', models.ForeignKey(to='heliosinstitution.Institution')),
],
options={
'permissions': (('delegate_institution_mngt', 'Can delegate institution management tasks'), ('revoke_institution_mngt', 'Can revoke institution management tasks'), ('delegate_election_mngt', 'Can delegate election management tasks'), ('revoke_election_mngt', 'Can revoke election management tasks')),
},
),
]
|
# import queue
from typing import List
class Solution:
"""
https://leetcode-cn.com/problems/zui-xiao-tiao-yue-ci-shu/submissions/
动态规划:
1. 首先要有一个确定的边界数字,例如开始 或者 结束
2. 再者又一个状态转移的 循环 正序 or 倒序
3. 每一个状态的改变都有可能引起两种变化,对于已知的DP的修改 和 对于 未知DP的修改
4. 最简单的情况就是对于未知的修改,
5. 本题,难点在于对于已知状态的修改+剪枝,太漂亮了
思路:
1. DP[n] 表示第N各节点跳出边界的动作次数
2. 状态转移,DP[n] = DP[n+jump[n]]+1, 表示从确定的方向,跳到不确定的点上,并且更新其状态
3. 状态更新之后,对于已知的DP,可能会产生的影响是,默认都是从左向右跳,这时候要判断先从右像左跳,再向挑右的可能性
4. 剪枝的三个条件:
i<len(dp),不越界
i < n+jump[n],这个往后,n也过不去
dp[i]>dp[n],如果 d[i]<=dp[n] ,那么说明i往后的点,向左跳到
"""
def minJump(self, jump: List[int]) -> int:
max_value = int(1e6)+1
dp = [max_value]*len(jump)
dp[0] = 0
end = len(jump)-1
dp[end] = 1
for n in range(len(jump)-1, -1, -1):
if n+jump[n]>=len(dp):
dp[n]=1
else:
dp[n] = dp[n+jump[n]] + 1
i = n+1
while i<len(dp) and dp[i]>dp[n] and i < n+jump[n]:
dp[i] = min([dp[i], dp[n]+1])
i+=1
return dp[0]
if __name__ == '__main__':
s = Solution()
ans = s.minJump([2, 5, 1, 1, 1, 1])
print(ans)
ans = s.minJump([1])
print(ans)
ans = s.minJump([1,2])
print(ans)
ans = s.minJump([1,2,1,1,1,1,1])
print(ans)
ans = s.minJump([1,3,1,1,1,1,1])
print(ans)
ans = s.minJump([2,3,6,7,4,3,10,4,4,9,3,4,6,6,4,8,8,4,1,3,5,7,5,1,5,4,9,3,6,5,5,4,4,10,1,7,10,1,9,5,8,5,5,9,3,10,10,4,1,1,4,5,9,10,2,2,5,7,7,5,5,10,8,6,1,6,10,3,6,1,5,5,10,7,8,10,6,8,2,2,9,9,10,10,7,6,1,8,6,8,7,3,4,1,7,8,7,10,1,4])
print(ans)
ans = s.minJump([24,100,2,25,27,70,78,90,66,67,25,78,21,65,62,71,77,61,83,80,62,84,24,37,56,55,91,13,17,6,3,40,10,51,3,88,18,25,92,40,2,41,8,43,43,35,70,82,71,42,37,37,92,15,87,53,46,36,78,51,93,81,2,3,79,73,83,32,74,97,55,87,39,98,82,73,38,68,43,30,5,55,25,74,96,19,86,76,23,24,69,14,82,50,74,2,8,91,36,22,59,100,34,61,4,20,36,13,39,49,5,65,21,96,10,78,30,72,9,87,81,43,52,69,83,72,16,77,52,99,37,14,30,95,28,31,94,13,34,71,42,87,53,80,21,59,45,56,51,26,38,67,43,14,10,44,49,96,55,48,74,97,20,53,50,92,44,10,99,55,12,72,6,88,51,51,40,8,90,89,61,90,12,88,50,37,33,10,25,87,99,27,8,81,64,14,100,20,64,26,38,78,11,42,3,36,24,13,11,16,50,32,3,73,95,3,34,13,96,59,10,56,62,69,36,34,61,65,38,95,81,2,54,96,5,23,51,70,83,54,29,8,100,89,36,83,46,24,23,87,85,97,41,84,4,70,81,75,35,16,9,93,38,98,20,43,50,56,72,59,17,21,21,90,32,62,82,61,54,100,84,85,88,96,64,96,34,25,69,62,13,10,48,21,75,75,72,50,28,38,58,8,35,5,72,24,76,31,86,35,27,73,51,71,38,20,2,50,30,93,29,57,63,44,19,65,100,99,13,72,52,87,99,73,55,44,48,71,30,58,25,35,22,8,11,27,38,98,37,49,38,50,54,36,31,47,100,1,62,81,13,43,36,26,92,25,47,49,89,6,78,92,61,77,92,65,94,39,3,47,66,62,87,77,79,99,72,41,92,92,38,67,17,92,6,42,97,50,49,13,54,41,99,49,95,11,68,52,65,1,38,26,20,52,57,87,65,98,73,13,40,34,13,9,97,44,56,36,95,82,32,77,4,99,62,22,2,99,71,32,63,95,50,90,78,60,45,54,32,87,34,82,38,83,56,67,60,80,16,91,90,3,47,19,73,71,15,15,40,100,34,36,22,20,93,60,64,41,69,81,35,3,50,26,9,32,52,76,77,12,83,55,58,31,5,92,50,7,43,12,35,11,33,25,92,6,70,1,60,46,49,74,10,19,99,18,2,3,64,56,64,72,38,59,83,24,97,2,59,52,35,90,58,41,24,73,24,2,44,99,100,72,15,94,65,59,68,49,27,33,47,52,71,19,96,22,48,12,85,37,71,38,35,3,66,46,32,10,16,17,36,18,87,62,91,90,29,21,3,32,71,51,57,37,36,1,33,39,26,1,44,50,41,44,10,88,72,90,52,15,97,86,71,30,47,16,91,99,33,99,33,52,27,23,67,62,85,43,93,60,50,98,16,39,99,57,54,53,26,25,56,48,15,65,92,34,57,96,99,76,39,99,93,76,64,32,4,93,33,51,13,16,63,28,84,61,97,8,9,71,89,19,62,84,34,9,90,20,9,18,95,9,78,80,65,81,85,92,29,66,56,43,81,44,95,9,44,85,21,29,19,80,1,52,82,20,79,14,98,3,41,13,11,36,4,40,3,12,13,73,82,80,85,81,14,9,25,82,73,72,49,33,55,96,99,58,92,33,8,89,94,55,88,89,29,53,74,67,9,88,90,44,93,7,95,43,25,32,94,75,33,13,29,9,86,50,92,57,36,27,33,2,49,28,35,82,77,61,62,11,49,8,12,78,66,6,100,5,74,10,46,43,1,95,89,3,81,66,61,100,53,25,95,86,5,23,77,35,24,25,9,96,33,69,43,60,76,10,66,49,12,98,54,97,27,25,26,9,50,72,59,83,4,97,88,8,15,38,73,57,52,32,75,32,30,33,12,6,94,93,25,44,85,19,28,45,68,35,40,30,47,87,61,50,32,37,54,76,79,70,21,65,93,54,89,55,53,24,97,17,42,24,59,41,5,38,66,69,71,27,17,18,21,57,7,44,35,86,60,58,6,1,98,44,96,68,87,43,41,63,40,30,42,52,74,32,80,26,75,44,99,34,3,15,57,73,82,71,70,11,50,69,62,93,50,71,15,24,93,90,70,57,51,25,56,45,82,10,80,62,75,8,32,15,2,85,87,54,89,7,32,79,56,64,80,28,61,37,9,78,28,38,3,12,8,19,70,30,83,47,77,13,74,47,16,73,17,58,64,97,98,80,44,89,43,45,78,20,80,64,100,76,75,31,86,87,93,19,16,54])
print(ans)
'''
# 正确
[1, 2, 3, 4, 5, 6] [0, 1, 1000001, 1000001, 1000001, 1000001, 1000001]
[1, 2, 3, 4, 5, 6] [0, 1, 3, 2, 1000001, 1000001, 1000001]
[1, 2, 3, 4, 5, 6] [0, 1, 3, 2, 1000001, 1000001, 1000001]
[1, 2, 3, 4, 5, 6] [0, 1, 3, 2, 3, 1000001, 1000001]
[1, 2, 3, 4, 5, 6] [0, 1, 3, 2, 3, 4, 1000001]
[1, 2, 3, 4, 5, 6] [0, 1, 3, 2, 3, 4, 5]
6
[3, 4, 5, 6] [0, 1, 2, 1000001, 1000001, 1000001, 1000001]
[] [0, 1, 2, 2, 3, 3, 3]
[] [0, 1, 2, 2, 3, 3, 3]
[] [0, 1, 2, 2, 3, 3, 3]
[] [0, 1, 2, 2, 3, 3, 3]
[] [0, 1, 2, 2, 3, 3, 3]
4
'''
|
# Generated by Django 2.2.5 on 2019-10-09 02:52
from dateutil import tz
from django.db import migrations, models
from django.utils import timezone
def migrate_data_forward(Series):
tzinfo = tz.gettz("America/New_York")
for series in Series.objects.all().iterator():
series.datecounted = series.datetimecounted.astimezone(tzinfo).date()
series.save()
def migrate_data_forward_animalcount(apps, schema_editor):
Series = apps.get_model("zoo_checks", "AnimalCount")
migrate_data_forward(Series)
def migrate_data_forward_groupcount(apps, schema_editor):
Series = apps.get_model("zoo_checks", "GroupCount")
migrate_data_forward(Series)
def migrate_data_forward_speciescount(apps, schema_editor):
Series = apps.get_model("zoo_checks", "SpeciesCount")
migrate_data_forward(Series)
def migrate_data_backward(apps, schema_editor):
pass # code for reverting migration, if any
class Migration(migrations.Migration):
dependencies = [("zoo_checks", "0026_auto_20191008_2248")]
operations = [
migrations.AddField(
model_name="animalcount",
name="datecounted",
field=models.DateField(null=True),
),
migrations.RunPython(migrate_data_forward_animalcount, migrate_data_backward),
migrations.AlterField(
model_name="animalcount",
name="datecounted",
field=models.DateField(default=timezone.localdate),
),
migrations.AddField(
model_name="groupcount",
name="datecounted",
field=models.DateField(null=True),
),
migrations.RunPython(migrate_data_forward_groupcount, migrate_data_backward),
migrations.AlterField(
model_name="groupcount",
name="datecounted",
field=models.DateField(default=timezone.localdate),
),
migrations.AddField(
model_name="speciescount",
name="datecounted",
field=models.DateField(null=True),
),
migrations.RunPython(migrate_data_forward_speciescount, migrate_data_backward),
migrations.AlterField(
model_name="speciescount",
name="datecounted",
field=models.DateField(default=timezone.localdate),
),
]
|
import math
import numpy as np
from torch.utils.data import Sampler
from pidepipe.dl.utils.experiment import set_global_seed
def get_num_after_point(x: float) -> int:
balance_int = str(x)
if not '.' in balance_int:
return 0
return len(balance_int) - balance_int.index('.') - 1
def gcd(arr: [int]) -> int:
result = arr[0]
for i in arr[1:]:
result = math.gcd(result, i)
return result
class BalanceSampler(Sampler):
def __init__(
self,
labels: [int],
balance: [float],
shuffle=True,
seed: int = None
):
labels = np.array(labels)
balance = np.array(balance)
assert np.sum(balance) == 1, 'Sum of balances should be equal to 1'
samples_per_class = np.array([
np.sum(class_idx == labels) for class_idx in np.unique(labels)
])
assert balance.shape == samples_per_class.shape, f'Number of balances ({balance.shape[0]}) should be equal to number of classes ({samples_per_class.shape[0]})'
# Calculate min number of samples for balance.
num_after_point_vec = np.vectorize(get_num_after_point)(balance)
num_after_point = np.max(num_after_point_vec)
balance_int = balance * 10**num_after_point
balance_int = balance_int.astype(np.int64)
min_balance_int = balance_int // gcd(balance_int)
# Calculate max number of samples for balance.
count = 0
while (samples_per_class - min_balance_int >= 0).all():
samples_per_class -= min_balance_int
count += 1
self.samples_counts = count * min_balance_int
self.len = np.sum(self.samples_counts)
self.labels_idxs = [
np.arange(labels.size)[labels == label].tolist() for label in np.unique(labels)
]
self.shuffle = shuffle
self.seed = seed
def __iter__(self):
if self.seed is not None:
np.random.seed(self.seed)
indices = []
for label_idxs, samples_count in zip(self.labels_idxs, self.samples_counts):
replace = samples_count > len(label_idxs)
idxs = np.random.choice(
label_idxs,
size=samples_count,
replace=replace
)
indices.extend(idxs.tolist())
assert (len(indices) == self.len)
if self.shuffle:
np.random.shuffle(indices)
return iter(indices)
def __len__(self):
return self.len
if __name__ == "__main__":
set_global_seed(9)
a = [0] * 1
b = [1] * 2
c = [2] * 2000
d = [3] * 7
sampler = BalanceSampler(
labels=a + b + c + d,
balance=[0.1, 0.2, 0, 0.7]
)
for i in sampler:
print(i)
# idxs = next(iter(sampler))
# print(idxs)
|
import requests
import time
import hashlib
import json
import time
class Github():
def __init__(self, config, avoid_rate_limiting=True):
self.config = config
self.avoid_rate_limiting = avoid_rate_limiting
def __query_github(self, query):
url = self.config.get('github', 'url') + query
r = requests.get(
url, headers={'Authorization': 'token ' + self.config.get('github', 'token')})
if r.status_code != 200:
print('Error while fetching data')
return r.json()
def get_user_name(self):
json_data = self.__query_github('/user')
return json_data['name']
def get_all_commits_raw(self):
json_data = []
page = 0
while (True):
query = '/repos/' + \
self.config.get('github', 'repository') + \
'/commits?per_page=50&page=' + str(page)
new_data = self.__query_github(query)
if not new_data:
break
json_data += new_data
page += 1
print('get_all_commits... got page ' + str(page))
if self.avoid_rate_limiting:
time.sleep(1)
return json_data
def get_all_commits_annotated(self):
timestamp = time.ctime()
raw_commits = self.get_all_commits_raw()
annotated_commits = []
for commit in raw_commits:
annotated_commits.append(self.__annotate_commit(commit, timestamp))
return annotated_commits
def __annotate_commit(self, commit_json, timestamp):
data_hash = hashlib.sha224(json.dumps(
commit_json, sort_keys=True).encode('utf-8')).hexdigest()
commit_json['data_hash'] = data_hash
commit_json['last_updated_at'] = timestamp
return commit_json
|
#!/usr/bin/env python3
# coding = utf8
import unittest as ut
from mykit.core._control import tags_mapping, parse_to_tagdict, check_valid_map
class test_control_map(ut.TestCase):
_mapDict = {
"abc": {"l":"abc", "C": "Abc", "U": "ABC"},
"def": {"l":"def", "C": "Def", "U": "DEF"},
"ghi": {"l":"ghi", "C": "Ghi", "U": "GHI"},
}
def test_tags_mapping(self):
_mappedTag = tags_mapping(self._mapDict, "l", "C", "abc", "def", "ghi")
self.assertTupleEqual(("Abc","Def","Ghi"), _mappedTag)
_mappedTag = tags_mapping(self._mapDict, "C", "l", "abc", "def", "ghi")
self.assertTupleEqual((None,)*3, _mappedTag)
_mappedTag = tags_mapping(self._mapDict, "C", "l", "Abc", "Def", "Ghi")
self.assertTupleEqual(("abc","def","ghi"), _mappedTag)
_mappedTag = tags_mapping(self._mapDict, "l", "U", "abc", "def", "ghi")
self.assertTupleEqual(("ABC","DEF","GHI"), _mappedTag)
_mappedTag = tags_mapping(self._mapDict, "U", "l", "abc", "def", "ghi")
self.assertTupleEqual((None,)*3, _mappedTag)
def test_check_valid_map(self):
self.assertTrue(check_valid_map({"a":{}, "b":{}}))
self.assertTrue(self._mapDict, "l")
def test_parse_to_tagdict(self):
tags = {}
parse_to_tagdict(tags, self._mapDict, "l", abc=1)
self.assertDictEqual({"abc":1}, tags)
parse_to_tagdict(tags, self._mapDict, "C", Abc=3)
self.assertDictEqual({"abc":3}, tags)
# No change when parsing an unknown tag
parse_to_tagdict(tags, self._mapDict, "l", noexist=1)
self.assertDictEqual({"abc":3}, tags)
# No change when parsing a tag that does not correspond to its correct progName
parse_to_tagdict(tags, self._mapDict, "l", Abc=2)
self.assertDictEqual({"abc":3}, tags)
parse_to_tagdict(tags, self._mapDict, "U", ABC=2)
self.assertDictEqual({"abc":2}, tags)
# parse multiple tags
parse_to_tagdict(tags, self._mapDict, "U", DEF=1, GHI=3)
self.assertDictEqual({"abc":2, "def":1, "ghi":3}, tags)
def test_extract_from_tagdict(self):
pass
if __name__ == '__main__':
ut.main() |
from brian2 import *
# ###########################################
# Defining network model parameters
# ###########################################
simtime = 0.5*second # Simulation time
number = { 'CA3':100, 'I':10, 'CA1':1 }
epsilon = { 'CA3_CA1':0.1,'CA3_I':1.0,'I_CA1':1.0 } # Sparseness of synaptic connections
tau_ampa = 5.0*ms # Glutamatergic synaptic time constant
tau_gaba = 10.0*ms # GABAergic synaptic time constant
# ###########################################
# Neuron model
# ###########################################
gl = 10.0*nsiemens # Leak conductance
el = -60*mV # Resting potential
er = -80*mV # Inhibitory reversal potential
vt = -50.*mV # Spiking threshold
memc = 200.0*pfarad # Membrane capacitance
eqs_neurons='''
dv/dt= (-gl*(v-el) - (g_ampa*v -g_gaba*(v-er)))/memc : volt (unless refractory)
dg_ampa/dt = -g_ampa/tau_ampa : siemens
dg_gaba/dt = -g_gaba/tau_gaba : siemens '''
# ###########################################
# Interneuron specific
# ###########################################
delta = 10.*ms
# ###########################################
# Initialize neuron group
# ###########################################
CA3 = SpikeGeneratorGroup(number['CA3'], arange(number['CA3']), 100*ones(number['CA3'])*ms)
I = NeuronGroup(number['I'], model=eqs_neurons, threshold='v > vt', reset='v=el', refractory=1*ms, method='euler')
CA1 = NeuronGroup(number['CA1'], model=eqs_neurons, threshold='v > vt', reset='v=el', refractory=5*ms, method='euler')
# ###########################################
# Connecting the network
# ###########################################
CA3_CA1 = Synapses(CA3, CA1, on_pre='g_ampa += 0.3*nS')
CA3_CA1.connect(p=epsilon['CA3_CA1'])
CA3_I = Synapses(CA3, I, on_pre='g_ampa += 0.3*nS')
CA3_I.connect(p=epsilon['CA3_I'])
I_CA1 = Synapses(I, CA1, on_pre='g_gaba += 0.3*nS',delay=delta)
I_CA1.connect(p=epsilon['I_CA1'])
# ###########################################
# Setting up monitors
# ###########################################
sm = SpikeMonitor(CA3)
sm_i = SpikeMonitor(I)
trace = StateMonitor(CA1, 'v', record=True)
# ###########################################
# Run without plasticity
# ###########################################
run(simtime)
# ###########################################
# Make plots
# ###########################################
i, t = sm.it
subplot(111)
plot(t/ms, i, 'k-', ms=0.25)
title("Before")
xlabel("time (ms)")
yticks([])
xlim(0*1e3, 2*1e3)
show()
i, t = sm_i.it
subplot(111)
plot(t/ms, i, 'r-', ms=0.2, markersize='100')
title("Before")
xlabel("time (ms)")
yticks([])
xlim(0*1e3, 2*1e3)
show()
v = trace.v[0].T
t = trace.t
subplot(111)
plot(t/ms, v, 'k', ms=0.1)
ylim((-0.065,-0.058) )
xlabel("time (ms)")
title("Voltage")
show()
|
'''Tests for the Artist class
Note: these tests are not exhaustive and could always be improved. Since the
Spotify REST api is mocked, if it's functionality ever changes these tests may
become obsolete.
Last updated: May 25, 2020
'''
# These 2 statements are fine to include in your test file
#pylint: disable=missing-class-docstring
#pylint: disable=missing-function-docstring
# These are here so the template in particular passes pylint; don't copy them
#pylint: disable=no-name-in-module
#pylint: disable=no-member
#pylint: disable=import-error
#pylint: disable=redundant-unittest-assert
# Standard library imports
import unittest
from unittest.mock import patch
# Local imports
from tests.help_lib import get_dummy_data
import spotifython.constants as const
import spotifython.utils as utils
ARTIST_ID = 'deadbeef'
TOKEN = 'feebdaed'
class TestArtist(unittest.TestCase):
# This function is called before every test_* function. Anything that is
# needed by every test_* function should be stored in 'self' here.
def setUp(self):
# Note: since we're mocking Spotify and never actually using the token,
# we can put any string here for the token.
self.session = Session(TOKEN)
# Mock the sp._request method so that we never actually reach Spotify
self.patcher = patch.object(utils, 'request', autospec=True)
# Add cleanup to unmock sp._request. Cleanup always called after trying
# to execute a test, even if the test or setUp errors out / fails.
self.addCleanup(self.patcher.stop)
# Create the actual mock object
self.request_mock = self.patcher.start()
# This function is called after every test_* function. Use it to clean up
# any resources initialized by the setUp function. Only include it if you
# actually need to clean up resources.
def tearDown(self):
pass
# Test __str__, __repr__
@unittest.skip('Not yet implemented')
def test_str_overloads(self):
artist = get_dummy_data(const.ARTISTS, limit=1, to_obj=True)[0]
self.assertTrue(isinstance(artist.__str__(), str))
self.assertTrue(isinstance(artist.__repr__(), str))
# Test __eq__, __ne__, __hash__
def test_equality_overloads(self):
artists = get_dummy_data(const.ARTISTS, limit=2, to_obj=True)
self.assertTrue(artists[0] != artists[1])
self.assertTrue(artists[0] == artists[0])
self.assertTrue(artists[1] == artists[1])
# Test genres(), href(), spotify_id(), name(), popularity(), uri() when
# their corresponding fields are present
def test_field_accessors(self):
artists = get_dummy_data(const.ARTISTS, limit=2, to_obj=True)
artist_0, artist_1 = artists[0], artists[1]
self.assertTrue(all((isinstance(genre, str) for genre in x.genres()) \
for x in [artist_0, artist_1])
)
self.assertTrue(all(
isinstance(x.href(), str) for x in [artist_0, artist_1])
)
self.assertTrue(all(isinstance(x.spotify_id(), str) \
for x in [artist_0, artist_1]))
self.assertTrue(all(isinstance(x.name(), str) \
for x in [artist_0, artist_1]))
self.assertTrue(all(isinstance(x.popularity(), int) \
for x in [artist_0, artist_1]))
self.assertTrue(all(isinstance(x.uri(), str) \
for x in [artist_0, artist_1]))
# Test _update_fields()
def test_update_fields(self):
self.request_mock.return_value = (
get_dummy_data(const.ARTISTS, limit=1)[0],
200
)
expected_artist = get_dummy_data(
const.ARTISTS,
limit=1,
to_obj=True
)[0]
artist = Artist(
session=self.session,
info={
'id': expected_artist.spotify_id()
}
)
# Check state before updating the fields
self.assertTrue(artist == expected_artist)
# pylint: disable=protected-access
self.assertEqual(artist._raw.__len__(), 1)
# Check state after updating the fields
artist._update_fields()
self.assertTrue(artist == expected_artist)
# pylint: disable=protected-access
self.assertEqual(artist._raw.__len__(), expected_artist._raw.__len__())
# Test albums()
def test_albums_with_limit(self):
search_limit = 100
expected_albums_json = get_dummy_data(
const.ALBUMS,
limit=search_limit,
)
expected_albums = get_dummy_data(
const.ALBUMS,
limit=search_limit,
to_obj=True
)
self.request_mock.side_effect = [
(
{
'href': 'href_uri',
'items': expected_albums_json[:50],
'limit': 50,
'next': 'next_here',
'offset': 0,
'previous': 'previous_uri',
'total': 100,
},
200
),
(
{
'href': 'href_uri',
'items': expected_albums_json[50:100],
'limit': 50,
'next': 'next_here',
'offset': 50,
'previous': 'previous_uri',
'total': 100,
},
200
),
(
{
'href': 'href_uri',
'items': [],
'limit': 50,
'next': None,
'offset': 100,
'previous': 'previous_uri',
'total': 100,
},
200
)
]
artist = get_dummy_data(const.ARTISTS, limit=1, to_obj=True)[0]
albums = artist.albums(search_limit=search_limit)
self.assertEqual(albums, expected_albums)
def test_albums_with_no_limit(self):
search_limit = 100
expected_albums_json = get_dummy_data(
const.ALBUMS,
limit=search_limit,
)
expected_albums = get_dummy_data(
const.ALBUMS,
limit=search_limit,
to_obj=True
)
self.request_mock.side_effect = [
(
{
'href': 'href_uri',
'items': expected_albums_json[:50],
'limit': 50,
'next': 'next_here',
'offset': 0,
'previous': 'previous_uri',
'total': 100,
},
200
),
(
{
'href': 'href_uri',
'items': expected_albums_json[50:100],
'limit': 50,
'next': 'next_here',
'offset': 50,
'previous': 'previous_uri',
'total': 100,
},
200
),
(
{
'href': 'href_uri',
'items': [],
'limit': 50,
'next': None,
'offset': 100,
'previous': 'previous_uri',
'total': 100,
},
200
)
]
artist = get_dummy_data(const.ARTISTS, limit=1, to_obj=True)[0]
albums = artist.albums()
self.assertEqual(albums, expected_albums)
# Test top_tracks()
def test_top_tracks(self):
self.request_mock.return_value = (
{
'tracks': get_dummy_data(const.TRACKS, limit=10)
},
200
)
expected_tracks = get_dummy_data(const.TRACKS, limit=10, to_obj=True)
artist = get_dummy_data(const.ARTISTS, limit=1, to_obj=True)[0]
tracks = artist.top_tracks()
self.assertEqual(tracks, expected_tracks)
# Test related_artists()
def test_related_artists(self):
self.request_mock.return_value = (
{
'artists': get_dummy_data(const.ARTISTS, limit=20)
},
200
)
expected_artists = get_dummy_data(const.ARTISTS, limit=20, to_obj=True)
artist = get_dummy_data(const.ARTISTS, limit=1, to_obj=True)[0]
related_artists = artist.related_artists()
self.assertEqual(related_artists, expected_artists)
# This allows the tests to be executed
if __name__ == '__main__':
unittest.main()
#pylint: disable=wrong-import-position
#pylint: disable=wrong-import-order
from spotifython.artist import Artist
from spotifython.session import Session
|
import timm
import torch
import torch.nn as nn
from nnAudio import Spectrogram
from scalers import standard_scaler
class GeM(nn.Module):
"""
Code modified from the 2d code in
https://amaarora.github.io/2020/08/30/gempool.html
"""
def __init__(self, kernel_size=8, p=3, eps=1e-6):
super(GeM, self).__init__()
self.p = nn.Parameter(torch.ones(1) * p)
self.kernel_size = kernel_size
self.eps = eps
def forward(self, x):
return self.gem(x, p=self.p, eps=self.eps)
def gem(self, x, p=3, eps=1e-6):
with torch.cuda.amp.autocast(enabled=False): # to avoid NaN issue for fp16
return nn.functional.avg_pool1d(
x.clamp(min=eps).pow(p), self.kernel_size
).pow(1.0 / p)
def __repr__(self):
return (
self.__class__.__name__
+ "("
+ "p="
+ "{:.4f}".format(self.p.data.tolist()[0])
+ ", "
+ "eps="
+ str(self.eps)
+ ")"
)
class ResBlockGeM(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size=3,
downsample=1,
act=nn.SiLU(inplace=True),
):
super().__init__()
self.act = act
if downsample != 1 or in_channels != out_channels:
self.residual_function = nn.Sequential(
nn.Conv1d(
in_channels,
out_channels,
kernel_size=kernel_size,
padding=kernel_size // 2,
bias=False,
),
nn.BatchNorm1d(out_channels),
act,
nn.Conv1d(
out_channels,
out_channels,
kernel_size=kernel_size,
padding=kernel_size // 2,
bias=False,
),
nn.BatchNorm1d(out_channels),
GeM(kernel_size=downsample), # downsampling
)
self.shortcut = nn.Sequential(
nn.Conv1d(
in_channels,
out_channels,
kernel_size=kernel_size,
padding=kernel_size // 2,
bias=False,
),
nn.BatchNorm1d(out_channels),
GeM(kernel_size=downsample), # downsampling
) # skip layers in residual_function, can try simple MaxPool1d
else:
self.residual_function = nn.Sequential(
nn.Conv1d(
in_channels,
out_channels,
kernel_size=kernel_size,
padding=kernel_size // 2,
bias=False,
),
nn.BatchNorm1d(out_channels),
act,
nn.Conv1d(
out_channels,
out_channels,
kernel_size=kernel_size,
padding=kernel_size // 2,
bias=False,
),
nn.BatchNorm1d(out_channels),
)
self.shortcut = nn.Sequential()
def forward(self, x):
return self.act(self.residual_function(x) + self.shortcut(x))
class AdaptiveConcatPool1d(nn.Module):
"Layer that concats `AdaptiveAvgPool1d` and `AdaptiveMaxPool1d`"
def __init__(self, size=None):
super().__init__()
self.size = size or 1
self.ap = nn.AdaptiveAvgPool1d(self.size)
self.mp = nn.AdaptiveMaxPool1d(self.size)
def forward(self, x):
return torch.cat([self.mp(x), self.ap(x)], 1)
class Extractor(nn.Sequential):
def __init__(
self, in_c=8, out_c=8, kernel_size=64, maxpool=8, act=nn.SiLU(inplace=True)
):
super().__init__(
nn.Conv1d(in_c, out_c, kernel_size=kernel_size, padding=kernel_size // 2),
nn.BatchNorm1d(out_c),
act,
nn.Conv1d(out_c, out_c, kernel_size=kernel_size, padding=kernel_size // 2),
# nn.MaxPool1d(kernel_size=maxpool),
GeM(kernel_size=maxpool),
)
class ModelIafossV2(nn.Module):
def __init__(self, n=8, nh=256, act=nn.SiLU(inplace=True), ps=0.5):
super().__init__()
self.ex = nn.ModuleList(
[
nn.Sequential(
Extractor(1, n, 127, maxpool=2, act=act),
ResBlockGeM(n, n, kernel_size=31, downsample=4, act=act),
ResBlockGeM(n, n, kernel_size=31, act=act),
),
nn.Sequential(
Extractor(1, n, 127, maxpool=2, act=act),
ResBlockGeM(n, n, kernel_size=31, downsample=4, act=act),
ResBlockGeM(n, n, kernel_size=31, act=act),
),
]
)
self.conv1 = nn.ModuleList(
[
nn.Sequential(
ResBlockGeM(
1 * n, 1 * n, kernel_size=31, downsample=4, act=act
), # 512
ResBlockGeM(1 * n, 1 * n, kernel_size=31, act=act),
),
nn.Sequential(
ResBlockGeM(
1 * n, 1 * n, kernel_size=31, downsample=4, act=act
), # 512
ResBlockGeM(1 * n, 1 * n, kernel_size=31, act=act),
),
nn.Sequential(
ResBlockGeM(
3 * n, 3 * n, kernel_size=31, downsample=4, act=act
), # 512
ResBlockGeM(3 * n, 3 * n, kernel_size=31, act=act),
), # 128
]
)
self.conv2 = nn.Sequential(
ResBlockGeM(6 * n, 4 * n, kernel_size=15, downsample=4, act=act),
ResBlockGeM(4 * n, 4 * n, kernel_size=15, act=act), # 128
ResBlockGeM(4 * n, 8 * n, kernel_size=7, downsample=4, act=act), # 32
ResBlockGeM(8 * n, 8 * n, kernel_size=7, act=act), # 8
)
self.head = nn.Sequential(
AdaptiveConcatPool1d(),
nn.Flatten(),
nn.Linear(n * 8 * 2, nh),
nn.BatchNorm1d(nh),
nn.Dropout(ps),
act,
nn.Linear(nh, nh),
nn.BatchNorm1d(nh),
nn.Dropout(ps),
act,
nn.Linear(nh, 1),
)
def forward(self, x):
x0 = [
self.ex[0](x[:, 0].unsqueeze(1)),
self.ex[0](x[:, 1].unsqueeze(1)),
self.ex[1](x[:, 2].unsqueeze(1)),
]
x1 = [
self.conv1[0](x0[0]),
self.conv1[0](x0[1]),
self.conv1[1](x0[2]),
self.conv1[2](torch.cat([x0[0], x0[1], x0[2]], 1)),
]
x2 = torch.cat(x1, 1)
return self.head(self.conv2(x2))
class Combined1D2D(nn.Module):
def __init__(self, model1d, encoder="resnet18", emb_1d=128):
super().__init__()
self.model1d = model1d
# Replace last linear layer to return a embedding of size emb_1d
head = list(self.model1d.head.children())
new_linear = nn.Linear(head[-1].in_features, emb_1d)
self.model1d.head = nn.Sequential(*head[:-1] + [new_linear])
self.model2d = timm.create_model(
encoder,
pretrained=True,
num_classes=0, # 0 = feature extraction
in_chans=4,
)
# Find the embedding size of model2d
o = self.model2d(torch.randn(2, 4, 224, 224))
emb_2d = o.shape[-1]
self.head = nn.Sequential(
nn.Linear(emb_1d + emb_2d, 128),
nn.ReLU(),
nn.Linear(128, 1),
)
self.spec_transform = Spectrogram.CQT1992v2(
sr=2048,
fmin=20,
fmax=1000,
hop_length=8, # img width = sig_length / hop_length
window="flattop",
# Oversampling freq axis
bins_per_octave=48,
filter_scale=0.25,
)
def frequency_encoding(self, x):
device = x.device
bs, fbins, t = x.shape[0], x.shape[2], x.shape[3]
freq_encoding = 2 * torch.arange(fbins, device=device) / fbins - 1 # -1 to +1
freq_encoding = torch.stack([freq_encoding] * t, -1).unsqueeze(0)
freq_encoding = torch.stack([freq_encoding] * bs)
return torch.cat([x, freq_encoding], 1)
def prepare_image(self, x):
bs = x.shape[0]
x_reshaped = x.reshape(-1, 4096)
spec = self.spec_transform(x_reshaped)
spec = spec.reshape(bs, 3, spec.shape[1], spec.shape[2])
spec = standard_scaler(spec)
spec = self.frequency_encoding(spec)
return spec
def forward(self, x):
out_1d = self.model1d(x)
out_2d = self.model2d(self.prepare_image(x))
embedding = torch.cat([out_1d, out_2d], -1)
return self.head(embedding)
if __name__ == "__main__":
x = torch.randn(size=(32, 3, 4096))
model_1d = ModelIafossV2() # Load from checkpoint etc.
model = Combined1D2D(model_1d, "resnet18")
out = model(x)
print(out.shape)
|
import os
import dotenv
dotenv.load_dotenv()
APP_PORT = int(os.environ.get('APP_PORT', '8000'))
DEV = bool(int(os.environ.get('DEV')))
TESTNET = bool(int(os.environ.get('TESTNET')))
LOCAL = bool(int(os.environ.get('LOCAL')))
GOOGLE_CLIENT_KEY_FILENAME = 'gclient-keys.json'
MSCAN_APIKEY = os.environ.get('MSCAN_APIKEY')
BIP2PHONE_API_KEY = os.environ.get('BIP2PHONE_API_KEY')
GIFTERY_TEST_API = bool(int(os.environ.get('GIFTERY_TEST_API')))
DEV_GIFTERY_API_ID = os.environ.get('DEV_GIFTERY_API_ID')
DEV_GIFTERY_API_SECRET = os.environ.get('DEV_GIFTERY_API_SECRET')
GIFTERY_API_ID = os.environ.get('GIFTERY_API_ID')
GIFTERY_API_SECRET = os.environ.get('GIFTERY_API_SECRET')
GRATZ_API_KEY = os.environ.get('GRATZ_API_KEY')
UNU_API_KEY = os.environ.get('UNU_API_KEY')
BIP_WALLET = os.environ.get('BIP_WALLET')
SMTP_HOST = 'smtp-mail.outlook.com'
SMTP_PORT = 587
EMAIL_SENDER = "[email protected]"
EMAIL_PASS = os.environ.get('EMAIL_PASS')
GRATZ_OWNER_EMAIL = '[email protected]'
DEV_EMAIL = '[email protected]'
ADMIN_PASS = os.environ.get('ADMIN_PASS')
DB_NAME = os.environ.get('{}DB_NAME'.format('DEV_' if DEV else ''))
DB_USER = os.environ.get('{}DB_USER'.format('DEV_' if DEV else ''))
if LOCAL:
DB_USER = 'ivankotelnikov'
LOCAL_URL = 'http://127.0.0.1:8000'
REMOTE_URL = 'https://{}push.money'.format('dev.' if DEV else '')
YYY_PUSH_URL = 'https://{}yyy.cash/push/'.format('dev.' if DEV else '')
YOUTUBE_APIKEY = os.environ.get('YOUTUBE_APIKEY')
LOG_LEVEL = os.getenv('LOG_LEVEL', 'DEBUG')
NODE_API = os.getenv('NODE_API', 'funfasy')
FUNFASY_PROJECT_ID = os.getenv('FUNFASY_PROJECT_ID', '')
FUNFASY_PROJECT_SECRET = os.getenv('FUNFASY_PROJECT_SECRET', '')
class FlaskConfig:
LOCAL = LOCAL
DATABASE = {
'name': DB_NAME,
'engine': 'peewee.PostgresqlDatabase',
'user': DB_USER
}
FLASK_ADMIN_SWATCH = 'cyborg'
BASE_URL = LOCAL_URL if LOCAL else REMOTE_URL
UPLOADED_IMAGES_DEST = 'content/user_images'
UPLOADED_IMAGES_URL = BASE_URL + '/api/upload/'
SECRET_KEY = os.environ.get('APP_SECRET_KEY')
REMEMBER_COOKIE_NAME = 'keep'
LOGIN_DISABLED = DEV
SECURITY_DEFAULT_REMEMBER_ME = True
SECURITY_CONFIRMABLE = True
SECURITY_TRACKABLE = True
SECURITY_PASSWORD_HASH = "pbkdf2_sha512"
SECURITY_PASSWORD_SALT = os.environ.get('SECURITY_PASSWORD_SALT')
SECURITY_LOGIN_URL = '/auth/login/'
SECURITY_LOGOUT_URL = '/auth/logout/'
# SECURITY_POST_LOGIN_VIEW = '/auth/login/email'
# SECURITY_POST_LOGOUT_VIEW = '/admin/'
SOCIAL_AUTH_PIPELINE = (
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.auth_allowed',
'social_core.pipeline.social_auth.social_user',
'social_core.pipeline.user.get_username',
# Send a validation email to the user to verify its email address.
# 'social_core.pipeline.mail.mail_validation',
# Associates the current social details with another user account with
# a similar email address.
'social_core.pipeline.social_auth.associate_by_email',
'social_core.pipeline.user.create_user',
'social_core.pipeline.social_auth.associate_user',
'social_core.pipeline.social_auth.load_extra_data',
'social_core.pipeline.user.user_details'
)
SOCIAL_AUTH_REDIRECT_IS_HTTPS = not LOCAL
SOCIAL_AUTH_LOGIN_REDIRECT_URL = 'https://yyy.cash'
SOCIAL_AUTH_AUTHENTICATION_BACKENDS = (
'social_core.backends.google.GoogleOAuth2',
'social_core.backends.telegram.TelegramAuth',
'social_core.backends.email.EmailAuth'
)
SOCIAL_AUTH_USER_MODEL = 'api.models.User'
SOCIAL_AUTH_STORAGE = 'social_flask_peewee.models.FlaskStorage'
SOCIAL_AUTH_FIELDS_STORED_IN_SESSION = ['keep']
SOCIAL_AUTH_TELEGRAM_BOT_TOKEN = os.environ.get('TG_TOKEN')
SOCIAL_AUTH_TELEGRAM_LOGIN_REDIRECT_URL = '/' if DEV else 'https://yyy.cash'
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = os.environ.get('GOOGLE_OAUTH_KEY')
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = os.environ.get('GOOGLE_OAUTH_SECRET')
SOCIAL_AUTH_GOOGLE_OAUTH2_LOGIN_REDIRECT_URL = '/' if DEV else 'https://yyy.cash'
SOCIAL_AUTH_EMAIL_FORM_URL = '/auth/login/email' if DEV else 'https://yyy.cash/'
SOCIAL_AUTH_EMAIL_FORM_HTML = 'dev/login.html'
SOCIAL_AUTH_EMAIL_LOGIN_REDIRECT_URL = '/' if DEV else 'https://yyy.cash'
|
"""
Shim for NumPy's suppress_warnings
"""
try:
from numpy.testing import suppress_warnings
except ImportError:
# The following two classes are copied from python 2.6 warnings module
# (context manager)
class WarningMessage(object):
"""
Holds the result of a single showwarning() call.
Deprecated in 1.8.0
Notes
-----
`WarningMessage` is copied from the Python 2.6 warnings module,
so it can be used in NumPy with older Python versions.
"""
_WARNING_DETAILS = ("message", "category", "filename", "lineno",
"file", "line")
def __init__(self, message, category, filename, lineno, file=None,
line=None):
local_values = locals()
for attr in self._WARNING_DETAILS:
setattr(self, attr, local_values[attr])
if category:
self._category_name = category.__name__
else:
self._category_name = None
def __str__(self):
return ("{message : %r, category : %r, "
"filename : %r, lineno : %s, "
"line : %r}" % (self.message, self._category_name,
self.filename, self.lineno, self.line))
import re
import warnings
from functools import wraps
class suppress_warnings(object):
"""
Context manager and decorator doing much the same as
``warnings.catch_warnings``.
However, it also provides a filter mechanism to work around
http://bugs.python.org/issue4180.
This bug causes Python before 3.4 to not reliably show warnings again
after they have been ignored once (even within catch_warnings). It
means that no "ignore" filter can be used easily, since following
tests might need to see the warning. Additionally it allows easier
specificity for testing warnings and can be nested.
Parameters
----------
forwarding_rule : str, optional
One of "always", "once", "module", or "location". Analogous to
the usual warnings module filter mode, it is useful to reduce
noise mostly on the outmost level. Unsuppressed and unrecorded
warnings will be forwarded based on this rule. Defaults to
"always". "location" is equivalent to the warnings "default", match
by exact location the warning warning originated from.
Notes
-----
Filters added inside the context manager will be discarded again
when leaving it. Upon entering all filters defined outside a
context will be applied automatically.
When a recording filter is added, matching warnings are stored in the
``log`` attribute as well as in the list returned by ``record``.
If filters are added and the ``module`` keyword is given, the
warning registry of this module will additionally be cleared when
applying it, entering the context, or exiting it. This could cause
warnings to appear a second time after leaving the context if they
were configured to be printed once (default) and were already
printed before the context was entered.
Nesting this context manager will work as expected when the
forwarding rule is "always" (default). Unfiltered and unrecorded
warnings will be passed out and be matched by the outer level.
On the outmost level they will be printed (or caught by another
warnings context). The forwarding rule argument can modify this
behaviour.
Like ``catch_warnings`` this context manager is not threadsafe.
Examples
--------
>>> with suppress_warnings() as sup:
... sup.filter(DeprecationWarning, "Some text")
... sup.filter(module=np.ma.core)
... log = sup.record(FutureWarning, "Does this occur?")
... command_giving_warnings()
... # The FutureWarning was given once, the filtered warnings were
... # ignored. All other warnings abide outside settings (may be
... # printed/error)
... assert_(len(log) == 1)
... assert_(len(sup.log) == 1) # also stored in log attribute
Or as a decorator:
>>> sup = suppress_warnings()
>>> sup.filter(module=np.ma.core) # module must match exact
>>> @sup
>>> def some_function():
... # do something which causes a warning in np.ma.core
... pass
"""
def __init__(self, forwarding_rule="always"):
self._entered = False
# Suppressions are instance or defined inside one with block:
self._suppressions = []
if forwarding_rule not in {"always", "module", "once", "location"}:
raise ValueError("unsupported forwarding rule.")
self._forwarding_rule = forwarding_rule
def _clear_registries(self):
if hasattr(warnings, "_filters_mutated"):
# clearing the registry should not be necessary on new pythons,
# instead the filters should be mutated.
warnings._filters_mutated()
return
# Simply clear the registry, this should normally be harmless,
# note that on new pythons it would be invalidated anyway.
for module in self._tmp_modules:
if hasattr(module, "__warningregistry__"):
module.__warningregistry__.clear()
def _filter(self, category=Warning, message="", module=None,
record=False):
if record:
record = [] # The log where to store warnings
else:
record = None
if self._entered:
if module is None:
warnings.filterwarnings(
"always", category=category, message=message)
else:
module_regex = module.__name__.replace('.', r'\.') + '$'
warnings.filterwarnings(
"always", category=category, message=message,
module=module_regex)
self._tmp_modules.add(module)
self._clear_registries()
self._tmp_suppressions.append(
(category, message, re.compile(message, re.I), module,
record))
else:
self._suppressions.append(
(category, message, re.compile(message, re.I), module,
record))
return record
def filter(self, category=Warning, message="", module=None):
"""
Add a new suppressing filter or apply it if the state is entered.
Parameters
----------
category : class, optional
Warning class to filter
message : string, optional
Regular expression matching the warning message.
module : module, optional
Module to filter for. Note that the module (and its file)
must match exactly and cannot be a submodule. This may make
it unreliable for external modules.
Notes
-----
When added within a context, filters are only added inside
the context and will be forgotten when the context is exited.
"""
self._filter(category=category, message=message, module=module,
record=False)
def record(self, category=Warning, message="", module=None):
"""
Append a new recording filter or apply it if the state is entered.
All warnings matching will be appended to the ``log`` attribute.
Parameters
----------
category : class, optional
Warning class to filter
message : string, optional
Regular expression matching the warning message.
module : module, optional
Module to filter for. Note that the module (and its file)
must match exactly and cannot be a submodule. This may make
it unreliable for external modules.
Returns
-------
log : list
A list which will be filled with all matched warnings.
Notes
-----
When added within a context, filters are only added inside
the context and will be forgotten when the context is exited.
"""
return self._filter(category=category, message=message,
module=module, record=True)
def __enter__(self):
if self._entered:
raise RuntimeError("cannot enter suppress_warnings twice.")
self._orig_show = warnings.showwarning
if hasattr(warnings, "_showwarnmsg"):
self._orig_showmsg = warnings._showwarnmsg
self._filters = warnings.filters
warnings.filters = self._filters[:]
self._entered = True
self._tmp_suppressions = []
self._tmp_modules = set()
self._forwarded = set()
self.log = [] # reset global log (no need to keep same list)
for cat, mess, _, mod, log in self._suppressions:
if log is not None:
del log[:] # clear the log
if mod is None:
warnings.filterwarnings(
"always", category=cat, message=mess)
else:
module_regex = mod.__name__.replace('.', r'\.') + '$'
warnings.filterwarnings(
"always", category=cat, message=mess,
module=module_regex)
self._tmp_modules.add(mod)
warnings.showwarning = self._showwarning
if hasattr(warnings, "_showwarnmsg"):
warnings._showwarnmsg = self._showwarnmsg
self._clear_registries()
return self
def __exit__(self, *exc_info):
warnings.showwarning = self._orig_show
if hasattr(warnings, "_showwarnmsg"):
warnings._showwarnmsg = self._orig_showmsg
warnings.filters = self._filters
self._clear_registries()
self._entered = False
del self._orig_show
del self._filters
def _showwarnmsg(self, msg):
self._showwarning(msg.message, msg.category, msg.filename,
msg.lineno, msg.file, msg.line, use_warnmsg=msg)
def _showwarning(self, message, category, filename, lineno,
*args, **kwargs):
use_warnmsg = kwargs.pop("use_warnmsg", None)
for cat, _, pattern, mod, rec in (
self._suppressions + self._tmp_suppressions)[::-1]:
if (issubclass(category, cat) and
pattern.match(message.args[0]) is not None):
if mod is None:
# Message and category match, recorded or ignored
if rec is not None:
msg = WarningMessage(message, category, filename,
lineno, **kwargs)
self.log.append(msg)
rec.append(msg)
return
# Use startswith, because warnings strips the c or o from
# .pyc/.pyo files.
elif mod.__file__.startswith(filename):
# The message and module (filename) match
if rec is not None:
msg = WarningMessage(message, category, filename,
lineno, **kwargs)
self.log.append(msg)
rec.append(msg)
return
# There is no filter in place, so pass to the outside handler
# unless we should only pass it once
if self._forwarding_rule == "always":
if use_warnmsg is None:
self._orig_show(message, category, filename, lineno,
*args, **kwargs)
else:
self._orig_showmsg(use_warnmsg)
return
if self._forwarding_rule == "once":
signature = (message.args, category)
elif self._forwarding_rule == "module":
signature = (message.args, category, filename)
elif self._forwarding_rule == "location":
signature = (message.args, category, filename, lineno)
if signature in self._forwarded:
return
self._forwarded.add(signature)
if use_warnmsg is None:
self._orig_show(message, category, filename, lineno, *args,
**kwargs)
else:
self._orig_showmsg(use_warnmsg)
def __call__(self, func):
"""
Function decorator to apply certain suppressions to a whole
function.
"""
@wraps(func)
def new_func(*args, **kwargs):
with self:
return func(*args, **kwargs)
return new_func
|
"""
A simple coroutine in a module that imports the tornado package.
"""
import tornado
@tornado.gen.coroutine
def call_api():
response = yield fetch()
if response.status != 200:
raise BadStatusError()
if response.status == 204:
raise tornado.gen.Return
raise tornado.gen.Return(response.data)
|
"""General Gaussian filters based on approximating intractable quantities with numerical
quadrature.
Examples include the unscented Kalman filter / RTS smoother which is
based on a third degree fully symmetric rule.
"""
import typing
import numpy as np
import probnum.statespace as pnss
import probnum.type as pntype
from probnum import randvars
from .unscentedtransform import UnscentedTransform
class UKFComponent:
"""Interface for unscented Kalman filtering components."""
def __init__(
self,
non_linear_model,
spread: typing.Optional[pntype.FloatArgType] = 1e-4,
priorpar: typing.Optional[pntype.FloatArgType] = 2.0,
special_scale: typing.Optional[pntype.FloatArgType] = 0.0,
) -> None:
self.non_linear_model = non_linear_model
self.ut = UnscentedTransform(
non_linear_model.input_dim, spread, priorpar, special_scale
)
# Determine the linearization.
# Will be constructed later.
self.sigma_points = None
def assemble_sigma_points(self, at_this_rv: randvars.Normal) -> np.ndarray:
"""Assemble the sigma-points."""
return self.ut.sigma_points(at_this_rv)
class ContinuousUKFComponent(UKFComponent, pnss.SDE):
"""Continuous-time unscented Kalman filter transition.
Parameters
----------
non_linear_model
Non-linear continuous-time model (:class:`SDE`) that is approximated with the UKF.
mde_atol
Absolute tolerance passed to the solver of the moment differential equations (MDEs). Optional. Default is 1e-6.
mde_rtol
Relative tolerance passed to the solver of the moment differential equations (MDEs). Optional. Default is 1e-6.
mde_solver
Method that is chosen in `scipy.integrate.solve_ivp`. Any string that is compatible with ``solve_ivp(..., method=mde_solve,...)`` works here.
Usual candidates are ``[RK45, LSODA, Radau, BDF, RK23, DOP853]``. Optional. Default is LSODA.
"""
def __init__(
self,
non_linear_model,
spread: typing.Optional[pntype.FloatArgType] = 1e-4,
priorpar: typing.Optional[pntype.FloatArgType] = 2.0,
special_scale: typing.Optional[pntype.FloatArgType] = 0.0,
mde_atol: typing.Optional[pntype.FloatArgType] = 1e-6,
mde_rtol: typing.Optional[pntype.FloatArgType] = 1e-6,
mde_solver: typing.Optional[str] = "LSODA",
) -> None:
UKFComponent.__init__(
self,
non_linear_model,
spread=spread,
priorpar=priorpar,
special_scale=special_scale,
)
pnss.SDE.__init__(
self,
non_linear_model.dimension,
non_linear_model.driftfun,
non_linear_model.dispmatfun,
non_linear_model.jacobfun,
)
self.mde_atol = mde_atol
self.mde_rtol = mde_rtol
self.mde_solver = mde_solver
raise NotImplementedError(
"Implementation of the continuous UKF is incomplete. It cannot be used."
)
def forward_realization(
self,
realization,
t,
dt=None,
compute_gain=False,
_diffusion=1.0,
_linearise_at=None,
) -> typing.Tuple[randvars.Normal, typing.Dict]:
return self._forward_realization_as_rv(
realization,
t=t,
dt=dt,
compute_gain=compute_gain,
_diffusion=_diffusion,
_linearise_at=_linearise_at,
)
def forward_rv(
self, rv, t, dt=None, compute_gain=False, _diffusion=1.0, _linearise_at=None
) -> typing.Tuple[randvars.Normal, typing.Dict]:
raise NotImplementedError("TODO") # Issue #234
def backward_realization(
self,
realization_obtained,
rv,
rv_forwarded=None,
gain=None,
t=None,
dt=None,
_diffusion=1.0,
_linearise_at=None,
):
return self._backward_realization_as_rv(
realization_obtained,
rv=rv,
rv_forwarded=rv_forwarded,
gain=gain,
t=t,
dt=dt,
_diffusion=_diffusion,
_linearise_at=_linearise_at,
)
def backward_rv(
self,
rv_obtained,
rv,
rv_forwarded=None,
gain=None,
t=None,
dt=None,
_diffusion=1.0,
_linearise_at=None,
):
raise NotImplementedError("Not available (yet).")
class DiscreteUKFComponent(UKFComponent, pnss.DiscreteGaussian):
"""Discrete unscented Kalman filter transition."""
def __init__(
self,
non_linear_model,
spread: typing.Optional[pntype.FloatArgType] = 1e-4,
priorpar: typing.Optional[pntype.FloatArgType] = 2.0,
special_scale: typing.Optional[pntype.FloatArgType] = 0.0,
) -> None:
UKFComponent.__init__(
self,
non_linear_model,
spread=spread,
priorpar=priorpar,
special_scale=special_scale,
)
pnss.DiscreteGaussian.__init__(
self,
non_linear_model.input_dim,
non_linear_model.output_dim,
non_linear_model.state_trans_fun,
non_linear_model.proc_noise_cov_mat_fun,
non_linear_model.jacob_state_trans_fun,
non_linear_model.proc_noise_cov_cholesky_fun,
)
def forward_rv(
self, rv, t, compute_gain=False, _diffusion=1.0, _linearise_at=None, **kwargs
) -> typing.Tuple[randvars.Normal, typing.Dict]:
compute_sigmapts_at = _linearise_at if _linearise_at is not None else rv
self.sigma_points = self.assemble_sigma_points(at_this_rv=compute_sigmapts_at)
proppts = self.ut.propagate(
t, self.sigma_points, self.non_linear_model.state_trans_fun
)
meascov = _diffusion * self.non_linear_model.proc_noise_cov_mat_fun(t)
mean, cov, crosscov = self.ut.estimate_statistics(
proppts, self.sigma_points, meascov, rv.mean
)
info = {"crosscov": crosscov}
if compute_gain:
gain = crosscov @ np.linalg.inv(cov)
info["gain"] = gain
return randvars.Normal(mean, cov), info
def forward_realization(
self, realization, t, _diffusion=1.0, _linearise_at=None, **kwargs
):
return self._forward_realization_via_forward_rv(
realization,
t=t,
compute_gain=False,
_diffusion=_diffusion,
_linearise_at=_linearise_at,
)
def backward_rv(
self,
rv_obtained,
rv,
rv_forwarded=None,
gain=None,
t=None,
_diffusion=1.0,
_linearise_at=None,
**kwargs
):
# this method is inherited from DiscreteGaussian.
return self._backward_rv_classic(
rv_obtained,
rv,
rv_forwarded,
gain=gain,
t=t,
_diffusion=_diffusion,
_linearise_at=None,
)
def backward_realization(
self,
realization_obtained,
rv,
rv_forwarded=None,
gain=None,
t=None,
_diffusion=1.0,
_linearise_at=None,
**kwargs
):
# this method is inherited from DiscreteGaussian.
return self._backward_realization_via_backward_rv(
realization_obtained,
rv,
rv_forwarded,
gain=gain,
t=t,
_diffusion=_diffusion,
_linearise_at=_linearise_at,
)
@property
def dimension(self) -> int:
return self.ut.dimension
@classmethod
def from_ode(
cls,
ode,
prior,
evlvar=0.0,
):
spatialdim = prior.spatialdim
h0 = prior.proj2coord(coord=0)
h1 = prior.proj2coord(coord=1)
def dyna(t, x):
return h1 @ x - ode.rhs(t, h0 @ x)
def diff(t):
return evlvar * np.eye(spatialdim)
def diff_cholesky(t):
return np.sqrt(evlvar) * np.eye(spatialdim)
disc_model = pnss.DiscreteGaussian(
input_dim=prior.dimension,
output_dim=prior.spatialdim,
state_trans_fun=dyna,
proc_noise_cov_mat_fun=diff,
proc_noise_cov_cholesky_fun=diff_cholesky,
)
return cls(disc_model)
|
import scipy.io
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.autograd import Variable
import utils.utils
from utils.tof import *
import importlib
class AmplitudeMask(nn.Module):
def __init__(self, args, device):
super(AmplitudeMask, self).__init__()
if args.init.lower() == "zeros":
# All Zeroes
mask = torch.cat([torch.ones((args.views_x*args.views_y, 1, args.patch_height, args.patch_width), device=device),
torch.zeros((args.views_x*args.views_y, 1, args.patch_height, args.patch_width), device=device)], dim=1)
elif args.init.lower() == "ones":
# All Ones
mask = torch.cat([torch.zeros((args.views_x*args.views_y, 1, args.patch_height, args.patch_width), device=device),
torch.zeros((args.views_x*args.views_y, 1, args.patch_height, args.patch_width), device=device)], dim=1)
elif args.init.lower() == "uniform":
# Gaussian Random Mask
mask = torch.empty(args.views_x*args.views_y, 2, args.patch_height, args.patch_width, device=device).uniform_(0,1)
elif args.init.lower() == "bernoulli":
# Bernoulli Random Mask
mask = torch.empty(args.views_x*args.views_y, 1, args.patch_height, args.patch_width, device=device).uniform_(0,1)
mask = torch.bernoulli(mask)
mask = torch.cat([1 - mask, mask], dim=1)
elif args.init.lower() == "custom":
# Design your own
load = torch.tensor([[0.,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,1,0,0,0,0],
[0,0,1,0,1,0,1,0,0],
[0,0,1,0,1,0,1,0,0],
[0,0,1,0,1,0,1,0,0],
[0,0,0,0,1,0,0,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0]])
load = load[:,:,None,None]
m = torch.ones(9,9,args.patch_height,args.patch_width)*load
m = m.reshape(81,args.patch_height,args.patch_width)
mask = torch.zeros(81,2,args.patch_height,args.patch_width, device=device)
mask[:,0,:,:] = 10 - m*10
mask[:,1,:,:] = m*10
elif "barcode" in args.init.lower() and "/" not in args.init:
mask = torch.zeros(81,2,512,512, device=device)
load = torch.from_numpy(np.load("utils/barcode_masks/{0}.npy".format(args.init.lower()))).to(device).reshape(81,512,512)
mask[:,0,:,:][torch.where(load <= 0)] = 10
mask[:,1,:,:][torch.where(load > 0)] = 10
mask = mask[:,:,:args.patch_height,:args.patch_width]
elif "gaussian_circles" in args.init.lower() and "/" not in args.init:
init = args.init.lower().replace("gaussian_circles","")
if "," in init:
mean, sigma = [float(el) for el in init.split(",")]
else:
mean, sigma = 1.5, 1
shape = (args.views_y, args.views_x, args.patch_height, args.patch_width)
mask = utils.utils.gkern_mask(mean, sigma, shape)
mask = utils.utils.un_combine_masks(mask, shape)[:,None,:,:]*10 # scale for softmax
mask = torch.cat([10 - mask, mask], dim=1).float().to(device)
elif "/" in args.init: # load path
mask = torch.load(args.init, map_location=device)
else:
raise Exception("Not implemented.")
self.softmax_weight = 1 # softmax temperature
self.softmax = nn.Softmax(dim=2)
self.mask = nn.Parameter(data=mask, requires_grad=(args.mask_start_epoch == 0))
self.mask = self.mask.to(device)
assert args.img_height % (args.patch_height - args.pad_y*2) == 0
assert args.img_width % (args.patch_width - args.pad_x*2) == 0
self.y_repeat = args.img_height//(args.patch_height - args.pad_y*2)
self.x_repeat = args.img_width//(args.patch_width - args.pad_x*2)
self.pad_y = args.pad_x
self.pad_x = args.pad_y
def get_mask_internal(self, patch=True):
if patch:
mask = self.mask
else:
if self.pad_x > 0 or self.pad_y > 0:
mask = self.mask[:,:,self.pad_y:-self.pad_y,self.pad_x:-self.pad_x]
else:
mask = self.mask
mask = mask.repeat(1,1,self.y_repeat, self.x_repeat)
mask = utils.utils.combine_masks(mask[:,1,:,:])[None,None,:,:] # [1,1,9H,9W]
return mask
def get_mask(self):
if self.pad_x > 0 or self.pad_y > 0:
mask = self.mask[:,:,self.pad_y:-self.pad_y,self.pad_x:-self.pad_x]
else:
mask = self.mask
mask = mask.repeat(1,1,self.y_repeat, self.x_repeat)
return self.softmax(self.softmax_weight * mask)[:,1,:,:]
def forward(self, amplitudes, patch=False):
if patch:
mask = self.mask.unsqueeze(0)
else:
if self.pad_x > 0 or self.pad_y > 0:
mask = self.mask[:,:,self.pad_y:-self.pad_y,self.pad_x:-self.pad_x]
else:
mask = self.mask
mask = mask.repeat(1,1,self.y_repeat, self.x_repeat).unsqueeze(0) # [1, C, 2, H, W]
mask = self.softmax(self.softmax_weight * mask) # threshold 0-1
mask = mask[:,:,1,:,:] # select 'ON' mask, [B*num_patches, C, H, W]
mask = mask.unsqueeze(1) # [B*num_patches, 1, C, H, W]
return mask * amplitudes
class MaskToFNet(nn.Module):
def __init__(self, args, device):
super(MaskToFNet, self).__init__()
self.views_x, self.views_y = args.views_x, args.views_y
self.img_height, self.img_width = args.img_height, args.img_width
self.amplitude_mask = AmplitudeMask(args, device)
if args.use_net:
HourglassRefinement = importlib.import_module('nets.refinement.{0}'.format(args.refinement)).HourglassRefinement
self.refinement = HourglassRefinement()
def forward(self, lightfield, depth, args, parameters, patch=False):
B, C, H, W = lightfield.shape
phi_list = []
# main loop
for f in args.f_list:
amplitudes = sim_quad(depth, f, args.T, args.g, lightfield)
amplitudes = self.amplitude_mask(amplitudes, patch)
amplitudes = amplitudes.mean(dim=2, dtype=torch.float32) # [B*num_patch, 4, patch_height, patch_width]
# white gaussian noise
noise_scale = torch.zeros(amplitudes.shape[0], device=amplitudes.device).uniform_(0.75,1.25)[:,None,None,None] # [B*num_patch, 1,1,1]
noise = torch.normal(std=args.AWGN_sigma, mean=0, size=amplitudes.shape,
device=amplitudes.device, dtype=torch.float32)
if patch:
noise = noise * torch.sqrt(noise_scale) # random scale for training
amplitudes += noise
phi_est, _, _ = decode_quad(amplitudes, args.T, args.mT)
phi_list.append(phi_est.squeeze(1))
if len(args.f_list) == 1:
depth_recon = phase2depth(phi_list[0], args.f_list[0]) # [B, H, W]
else: # phase unwrapping
depth_recon = unwrap_ranking(phi_list, args.f_list, min_depth=0, max_depth=6000)
depth_recon = depth_recon.unsqueeze(1) # [B, 1, H, W]
if args.use_net:
mask = self.amplitude_mask.get_mask_internal(patch=patch)
depth_recon = self.refinement(depth_recon, mask)
return depth_recon # [B, 1, H, W]
def process_amplitudes(self, amplitudes, args, patch=False): #phi_est [B, 4, H, W]
phi_est, _, _ = decode_quad(amplitudes, args.T, args.mT)
phi_est = phi_est.squeeze(1)
depth_recon = phase2depth(phi_est, args.f_list[0]) # [B, H, W]
depth_recon = depth_recon.unsqueeze(1) # [B, 1, H, W]
if args.use_net:
mask = self.amplitude_mask.get_mask_internal(patch=patch)
depth_recon = self.refinement(depth_recon, mask)
return depth_recon # [B, 1, H, W]
def process_depth(self, depth, patch=False):
mask = self.amplitude_mask.get_mask_internal(patch=patch)
depth_recon = self.refinement(depth, mask)
return depth_recon # [B, 1, H, W]
|
import numpy as np
import pandas as pd
def createAdjMatrixFile(fileName):
dirName = "datasetTSP/%s/%s_xy.txt" % (fileName, fileName)
data = pd.read_csv(dirName,header = None, delimiter=r"\s+").as_matrix()
newMatrix = np.zeros((len(data),len(data)))
for i in range(len(data)):
for j in range(len(data)):
newMatrix[i][j] = np.linalg.norm(data[i]- data[j])
saveDir = "datasetTSP/%s/%s_d.txt" % (fileName, fileName)
np.savetxt(saveDir, newMatrix, delimiter=' ')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.